1/*
2 * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/param.h>	/* for definition of NULL */
30#include <sys/errno.h>
31#include <sys/malloc.h>
32#include <sys/socket.h>
33#include <sys/mbuf.h>
34#include <sys/systm.h>
35#include <libkern/OSAtomic.h>
36
37#include <machine/endian.h>
38
39#define _IP_VHL
40#include <net/if_var.h>
41#include <net/route.h>
42#include <net/kpi_protocol.h>
43
44#include <netinet/in_systm.h>
45#include <netinet/in.h>
46#include <netinet/in_var.h>
47#include <netinet6/in6_var.h>
48#include <netinet/ip.h>
49#include <netinet/ip6.h>
50#include <netinet/ip_var.h>
51#include <netinet6/ip6_var.h>
52#include <netinet/kpi_ipfilter_var.h>
53
54
55/*
56 * kipf_lock and kipf_ref protect the linkage of the list of IP filters
57 * An IP filter can be removed only when kipf_ref is zero
58 * If an IP filter cannot be removed because kipf_ref is not null, then
59 * the IP filter is marjed and kipf_delayed_remove is set so that when
60 * kipf_ref eventually goes down to zero, the IP filter is removed
61 */
62decl_lck_mtx_data(static, kipf_lock_data);
63static lck_mtx_t *kipf_lock = &kipf_lock_data;
64static u_int32_t kipf_ref = 0;
65static u_int32_t kipf_delayed_remove = 0;
66u_int32_t kipf_count = 0;
67
68__private_extern__ struct ipfilter_list	ipv4_filters = TAILQ_HEAD_INITIALIZER(ipv4_filters);
69__private_extern__ struct ipfilter_list	ipv6_filters = TAILQ_HEAD_INITIALIZER(ipv6_filters);
70__private_extern__ struct ipfilter_list	tbr_filters = TAILQ_HEAD_INITIALIZER(tbr_filters);
71
72__private_extern__ void
73ipf_ref(void)
74{
75	lck_mtx_lock(kipf_lock);
76    kipf_ref++;
77	lck_mtx_unlock(kipf_lock);
78}
79
80__private_extern__ void
81ipf_unref(void)
82{
83	lck_mtx_lock(kipf_lock);
84
85    if (kipf_ref == 0)
86    	panic("ipf_unref: kipf_ref == 0\n");
87
88    kipf_ref--;
89    if (kipf_ref == 0 && kipf_delayed_remove != 0) {
90    	struct ipfilter *filter;
91
92		while ((filter = TAILQ_FIRST(&tbr_filters))) {
93			ipf_detach_func ipf_detach = filter->ipf_filter.ipf_detach;
94			void* cookie = filter->ipf_filter.cookie;
95
96			TAILQ_REMOVE(filter->ipf_head, filter, ipf_link);
97			TAILQ_REMOVE(&tbr_filters, filter, ipf_tbr);
98			kipf_delayed_remove--;
99
100			if (ipf_detach) {
101				lck_mtx_unlock(kipf_lock);
102				ipf_detach(cookie);
103				lck_mtx_lock(kipf_lock);
104				/* In case some filter got to run while we released the lock */
105				if (kipf_ref != 0)
106					break;
107			}
108		}
109   	}
110	lck_mtx_unlock(kipf_lock);
111}
112
113static errno_t
114ipf_add(
115	const struct ipf_filter* filter,
116	ipfilter_t *filter_ref,
117	struct ipfilter_list *head)
118{
119	struct ipfilter	*new_filter;
120	if (filter->name == NULL || (filter->ipf_input == NULL && filter->ipf_output == NULL))
121		return EINVAL;
122
123	MALLOC(new_filter, struct ipfilter*, sizeof(*new_filter), M_IFADDR, M_WAITOK);
124	if (new_filter == NULL)
125		return ENOMEM;
126
127	lck_mtx_lock(kipf_lock);
128	new_filter->ipf_filter = *filter;
129	new_filter->ipf_head = head;
130
131	TAILQ_INSERT_HEAD(head, new_filter, ipf_link);
132
133	lck_mtx_unlock(kipf_lock);
134
135	*filter_ref = (ipfilter_t)new_filter;
136
137	/* This will force TCP to re-evaluate its use of TSO */
138	OSAddAtomic(1, &kipf_count);
139	if (use_routegenid)
140		routegenid_update();
141
142	return 0;
143}
144
145errno_t
146ipf_addv4(
147	const struct ipf_filter* filter,
148	ipfilter_t *filter_ref)
149{
150	return ipf_add(filter, filter_ref, &ipv4_filters);
151}
152
153errno_t
154ipf_addv6(
155	const struct ipf_filter* filter,
156	ipfilter_t *filter_ref)
157{
158	return ipf_add(filter, filter_ref, &ipv6_filters);
159}
160
161errno_t
162ipf_remove(
163	ipfilter_t filter_ref)
164{
165	struct ipfilter	*match = (struct ipfilter*)filter_ref;
166	struct ipfilter_list *head;
167
168	if (match == 0 || (match->ipf_head != &ipv4_filters && match->ipf_head != &ipv6_filters))
169		return EINVAL;
170
171	head = match->ipf_head;
172
173	lck_mtx_lock(kipf_lock);
174	TAILQ_FOREACH(match, head, ipf_link) {
175		if (match == (struct ipfilter*)filter_ref) {
176			ipf_detach_func ipf_detach = match->ipf_filter.ipf_detach;
177			void* cookie = match->ipf_filter.cookie;
178
179			/*
180			 * Cannot detach when they are filters running
181			 */
182			if (kipf_ref) {
183				kipf_delayed_remove++;
184				TAILQ_INSERT_TAIL(&tbr_filters, match, ipf_tbr);
185				match->ipf_filter.ipf_input = 0;
186				match->ipf_filter.ipf_output = 0;
187				lck_mtx_unlock(kipf_lock);
188			} else {
189				TAILQ_REMOVE(head, match, ipf_link);
190				lck_mtx_unlock(kipf_lock);
191				if (ipf_detach)
192					ipf_detach(cookie);
193				FREE(match, M_IFADDR);
194
195				/* This will force TCP to re-evaluate its use of TSO */
196				OSAddAtomic(-1, &kipf_count);
197				if (use_routegenid)
198					routegenid_update();
199
200			}
201			return 0;
202		}
203	}
204	lck_mtx_unlock(kipf_lock);
205
206	return ENOENT;
207}
208
209int log_for_en1 = 0;
210
211errno_t
212ipf_inject_input(
213	mbuf_t data,
214	ipfilter_t filter_ref)
215{
216	struct mbuf	*m = (struct mbuf*)data;
217	struct m_tag *mtag = 0;
218	struct ip *ip = mtod(m, struct ip *);
219	u_int8_t	vers;
220	int hlen;
221	errno_t error = 0;
222	protocol_family_t proto;
223
224	vers = IP_VHL_V(ip->ip_vhl);
225
226	switch (vers) {
227		case 4:
228			proto = PF_INET;
229			break;
230		case 6:
231			proto = PF_INET6;
232			break;
233		default:
234			error = ENOTSUP;
235			goto done;
236	}
237
238	if (filter_ref == 0 && m->m_pkthdr.rcvif == 0) {
239		m->m_pkthdr.rcvif = lo_ifp;
240		m->m_pkthdr.csum_data = 0;
241		m->m_pkthdr.csum_flags = 0;
242		if (vers == 4) {
243			hlen = IP_VHL_HL(ip->ip_vhl) << 2;
244			ip->ip_sum = 0;
245			ip->ip_sum = in_cksum(m, hlen);
246		}
247	}
248	if (filter_ref != 0) {
249		mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT,
250					 	   sizeof (ipfilter_t), M_NOWAIT, m);
251		if (mtag == NULL) {
252			error = ENOMEM;
253			goto done;
254		}
255		*(ipfilter_t*)(mtag+1) = filter_ref;
256		m_tag_prepend(m, mtag);
257	}
258
259	error = proto_inject(proto, data);
260
261done:
262	return error;
263}
264
265static errno_t
266ipf_injectv4_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options)
267{
268	struct route ro;
269	struct ip	*ip;
270	struct mbuf	*m = (struct mbuf*)data;
271	errno_t error = 0;
272	struct m_tag *mtag = NULL;
273	struct ip_moptions *imo = NULL;
274	struct ip_out_args ipoa = { IFSCOPE_NONE, { 0 }, 0 };
275
276	/* Make the IP header contiguous in the mbuf */
277	if ((size_t)m->m_len < sizeof (struct ip)) {
278		m = m_pullup(m, sizeof (struct ip));
279		if (m == NULL)
280			return (ENOMEM);
281	}
282	ip = (struct ip *)m_mtod(m);
283
284	if (filter_ref != 0) {
285		mtag = m_tag_create(KERNEL_MODULE_TAG_ID,
286		    KERNEL_TAG_TYPE_IPFILT, sizeof (ipfilter_t), M_NOWAIT, m);
287		if (mtag == NULL) {
288			m_freem(m);
289			return (ENOMEM);
290		}
291		*(ipfilter_t *)(mtag + 1) = filter_ref;
292		m_tag_prepend(m, mtag);
293	}
294
295	if (options != NULL && (options->ippo_flags & IPPOF_MCAST_OPTS) &&
296	    (imo = ip_allocmoptions(M_DONTWAIT)) != NULL) {
297		imo->imo_multicast_ifp = options->ippo_mcast_ifnet;
298		imo->imo_multicast_ttl = options->ippo_mcast_ttl;
299		imo->imo_multicast_loop = options->ippo_mcast_loop;
300	}
301
302	if (options != NULL) {
303		if (options->ippo_flags & IPPOF_SELECT_SRCIF)
304			ipoa.ipoa_flags |= IPOAF_SELECT_SRCIF;
305		if (options->ippo_flags & IPPOF_BOUND_IF) {
306			ipoa.ipoa_flags |= IPOAF_BOUND_IF;
307			ipoa.ipoa_boundif = options->ippo_flags >>
308			    IPPOF_SHIFT_IFSCOPE;
309		}
310		if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR)
311			ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
312		if (options->ippo_flags & IPPOF_BOUND_SRCADDR)
313			ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
314	}
315
316	bzero(&ro, sizeof(struct route));
317
318	/* Put ip_len and ip_off in host byte order, ip_output expects that */
319
320#if BYTE_ORDER != BIG_ENDIAN
321	NTOHS(ip->ip_len);
322	NTOHS(ip->ip_off);
323#endif
324
325	/* Send; enforce source interface selection via IP_OUTARGS flag */
326	error = ip_output(m, NULL, &ro,
327	    IP_ALLOWBROADCAST | IP_RAWOUTPUT | IP_OUTARGS, imo, &ipoa);
328
329	/* Release the route */
330	if (ro.ro_rt)
331		rtfree(ro.ro_rt);
332
333	if (imo != NULL)
334		IMO_REMREF(imo);
335
336	return (error);
337}
338
339#if INET6
340static errno_t
341ipf_injectv6_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options)
342{
343	struct route_in6 ro;
344	struct ip6_hdr	*ip6;
345	struct mbuf	*m = (struct mbuf*)data;
346	errno_t error = 0;
347	struct m_tag *mtag = NULL;
348	struct ip6_moptions *im6o = NULL;
349	struct ip6_out_args ip6oa = { IFSCOPE_NONE, { 0 }, 0 };
350
351	/* Make the IP header contiguous in the mbuf */
352	if ((size_t)m->m_len < sizeof(struct ip6_hdr)) {
353		m = m_pullup(m, sizeof(struct ip6_hdr));
354		if (m == NULL)
355			return (ENOMEM);
356	}
357	ip6 = (struct ip6_hdr*)m_mtod(m);
358
359	if (filter_ref != 0) {
360		mtag = m_tag_create(KERNEL_MODULE_TAG_ID,
361		    KERNEL_TAG_TYPE_IPFILT, sizeof (ipfilter_t), M_NOWAIT, m);
362		if (mtag == NULL) {
363			m_freem(m);
364			return (ENOMEM);
365		}
366		*(ipfilter_t *)(mtag + 1) = filter_ref;
367		m_tag_prepend(m, mtag);
368	}
369
370	if (options != NULL && (options->ippo_flags & IPPOF_MCAST_OPTS) &&
371	    (im6o = ip6_allocmoptions(M_DONTWAIT)) != NULL) {
372		im6o->im6o_multicast_ifp = options->ippo_mcast_ifnet;
373		im6o->im6o_multicast_hlim = options->ippo_mcast_ttl;
374		im6o->im6o_multicast_loop = options->ippo_mcast_loop;
375	}
376
377	if (options != NULL) {
378		if (options->ippo_flags & IPPOF_SELECT_SRCIF)
379			ip6oa.ip6oa_flags |= IP6OAF_SELECT_SRCIF;
380		if (options->ippo_flags & IPPOF_BOUND_IF) {
381			ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
382			ip6oa.ip6oa_boundif = options->ippo_flags >>
383			    IPPOF_SHIFT_IFSCOPE;
384		}
385		if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR)
386			ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
387		if (options->ippo_flags & IPPOF_BOUND_SRCADDR)
388			ip6oa.ip6oa_flags |= IP6OAF_BOUND_SRCADDR;
389	}
390
391	bzero(&ro, sizeof(struct route_in6));
392
393	/*
394	 * Send  mbuf and ifscope information. Check for correctness
395	 * of ifscope information is done while searching for a route in
396	 * ip6_output.
397	 */
398	error = ip6_output(m, NULL, &ro, IPV6_OUTARGS, im6o, NULL, &ip6oa);
399
400	/* Release the route */
401	if (ro.ro_rt)
402		rtfree(ro.ro_rt);
403
404	if (im6o != NULL)
405		IM6O_REMREF(im6o);
406
407	return (error);
408}
409#endif /* INET6 */
410
411errno_t
412ipf_inject_output(
413	mbuf_t data,
414	ipfilter_t filter_ref,
415	ipf_pktopts_t options)
416{
417	struct mbuf	*m = (struct mbuf*)data;
418	u_int8_t	vers;
419	errno_t		error = 0;
420
421	/* Make one byte of the header contiguous in the mbuf */
422	if (m->m_len < 1) {
423		m = m_pullup(m, 1);
424		if (m == NULL)
425			goto done;
426	}
427
428	vers = (*(u_int8_t*)m_mtod(m)) >> 4;
429	switch (vers)
430	{
431		case 4:
432			error = ipf_injectv4_out(data, filter_ref, options);
433			break;
434#if INET6
435		case 6:
436			error = ipf_injectv6_out(data, filter_ref, options);
437			break;
438#endif
439		default:
440			m_freem(m);
441			error = ENOTSUP;
442			break;
443	}
444
445done:
446	return error;
447}
448
449__private_extern__ ipfilter_t
450ipf_get_inject_filter(struct mbuf *m)
451{
452	ipfilter_t filter_ref = 0;
453	struct m_tag *mtag;
454
455	mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT, NULL);
456	if (mtag) {
457		filter_ref = *(ipfilter_t *)(mtag+1);
458
459		m_tag_delete(m, mtag);
460	}
461	return filter_ref;
462}
463
464__private_extern__ int
465ipf_init(void)
466{
467	int error = 0;
468	lck_grp_attr_t *grp_attributes = 0;
469	lck_attr_t *lck_attributes = 0;
470	lck_grp_t *lck_grp = 0;
471
472	grp_attributes = lck_grp_attr_alloc_init();
473	if (grp_attributes == 0) {
474		printf("ipf_init: lck_grp_attr_alloc_init failed\n");
475		error = ENOMEM;
476		goto done;
477	}
478
479	lck_grp = lck_grp_alloc_init("IP Filter", grp_attributes);
480	if (lck_grp == 0) {
481		printf("ipf_init: lck_grp_alloc_init failed\n");
482		error = ENOMEM;
483		goto done;
484	}
485
486	lck_attributes = lck_attr_alloc_init();
487	if (lck_attributes == 0) {
488		printf("ipf_init: lck_attr_alloc_init failed\n");
489		error = ENOMEM;
490		goto done;
491	}
492
493	lck_mtx_init(kipf_lock, lck_grp, lck_attributes);
494
495	done:
496	if (lck_grp) {
497		lck_grp_free(lck_grp);
498		lck_grp = 0;
499	}
500	if (grp_attributes) {
501		lck_grp_attr_free(grp_attributes);
502		grp_attributes = 0;
503	}
504	if (lck_attributes) {
505		lck_attr_free(lck_attributes);
506		lck_attributes = 0;
507	}
508
509	return error;
510}
511
512int
513ipflow_fastforward(struct mbuf *m)
514{
515    return 0;
516}
517