1/*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*	$apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30/*	$OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32/*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 *    - Redistributions of source code must retain the above copyright
42 *      notice, this list of conditions and the following disclaimer.
43 *    - Redistributions in binary form must reproduce the above
44 *      copyright notice, this list of conditions and the following
45 *      disclaimer in the documentation and/or other materials provided
46 *      with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67#include <machine/endian.h>
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/mbuf.h>
71#include <sys/filio.h>
72#include <sys/fcntl.h>
73#include <sys/socket.h>
74#include <sys/socketvar.h>
75#include <sys/kernel.h>
76#include <sys/time.h>
77#include <sys/proc_internal.h>
78#include <sys/malloc.h>
79#include <sys/kauth.h>
80#include <sys/conf.h>
81#include <sys/mcache.h>
82#include <sys/queue.h>
83
84#include <mach/vm_param.h>
85
86#include <net/dlil.h>
87#include <net/if.h>
88#include <net/if_types.h>
89#include <net/route.h>
90
91#include <netinet/in.h>
92#include <netinet/in_var.h>
93#include <netinet/in_systm.h>
94#include <netinet/ip.h>
95#include <netinet/ip_var.h>
96#include <netinet/ip_icmp.h>
97#include <netinet/if_ether.h>
98
99#if DUMMYNET
100#include <netinet/ip_dummynet.h>
101#else
102struct ip_fw_args;
103#endif /* DUMMYNET */
104
105#include <libkern/crypto/md5.h>
106
107#include <machine/machine_routines.h>
108
109#include <miscfs/devfs/devfs.h>
110
111#include <net/pfvar.h>
112
113#if NPFSYNC
114#include <net/if_pfsync.h>
115#endif /* NPFSYNC */
116
117#if PFLOG
118#include <net/if_pflog.h>
119#endif /* PFLOG */
120
121#if INET6
122#include <netinet/ip6.h>
123#include <netinet/in_pcb.h>
124#endif /* INET6 */
125
126#if PF_ALTQ
127#include <net/altq/altq.h>
128#include <net/altq/altq_cbq.h>
129#include <net/classq/classq_red.h>
130#include <net/classq/classq_rio.h>
131#include <net/classq/classq_blue.h>
132#include <net/classq/classq_sfb.h>
133#endif /* PF_ALTQ */
134
135#include <dev/random/randomdev.h>
136
137#if 0
138static void pfdetach(void);
139#endif
140static int pfopen(dev_t, int, int, struct proc *);
141static int pfclose(dev_t, int, int, struct proc *);
142static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
143static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
144    struct pfioc_table_64 *, struct proc *);
145static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
146    struct pfioc_tokens_64 *, struct proc *);
147static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
148static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
149    struct proc *);
150static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
151static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
152    struct pfioc_states_64 *, struct proc *);
153static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
154static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
155static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
156static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
157static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
158static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
159    struct pfioc_trans_64 *, struct proc *);
160static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
161    struct pfioc_src_nodes_64 *, struct proc *);
162static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
163    struct proc *);
164static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
165    struct pfioc_iface_64 *, struct proc *);
166static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
167    u_int8_t, u_int8_t, u_int8_t);
168static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
169static void pf_empty_pool(struct pf_palist *);
170#if PF_ALTQ
171static int pf_begin_altq(u_int32_t *);
172static int pf_rollback_altq(u_int32_t);
173static int pf_commit_altq(u_int32_t);
174static int pf_enable_altq(struct pf_altq *);
175static int pf_disable_altq(struct pf_altq *);
176static void pf_altq_copyin(struct pf_altq *, struct pf_altq *);
177static void pf_altq_copyout(struct pf_altq *, struct pf_altq *);
178#endif /* PF_ALTQ */
179static int pf_begin_rules(u_int32_t *, int, const char *);
180static int pf_rollback_rules(u_int32_t, int, char *);
181static int pf_setup_pfsync_matching(struct pf_ruleset *);
182static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
183static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
184static int pf_commit_rules(u_int32_t, int, char *);
185static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
186    int);
187static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
188static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
189    struct pf_state *);
190static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
191    struct pf_state *);
192static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
193static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
194static void pf_expire_states_and_src_nodes(struct pf_rule *);
195static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
196    int, struct pf_rule *);
197static void pf_addrwrap_setup(struct pf_addr_wrap *);
198static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
199    struct pf_ruleset *);
200static void pf_delete_rule_by_owner(char *, u_int32_t);
201static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
202static void pf_ruleset_cleanup(struct pf_ruleset *, int);
203static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
204    int, struct pf_rule **);
205
206#define	PF_CDEV_MAJOR	(-1)
207
208static struct cdevsw pf_cdevsw = {
209	/* open */	pfopen,
210	/* close */	pfclose,
211	/* read */	eno_rdwrt,
212	/* write */	eno_rdwrt,
213	/* ioctl */	pfioctl,
214	/* stop */	eno_stop,
215	/* reset */	eno_reset,
216	/* tty */	NULL,
217	/* select */	eno_select,
218	/* mmap */	eno_mmap,
219	/* strategy */	eno_strat,
220	/* getc */	eno_getc,
221	/* putc */	eno_putc,
222	/* type */	0
223};
224
225static void pf_attach_hooks(void);
226#if 0
227/* currently unused along with pfdetach() */
228static void pf_detach_hooks(void);
229#endif
230
231/*
232 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
233 * and used in pf_af_hook() for performance optimization, such that packets
234 * will enter pf_test() or pf_test6() only when PF is running.
235 */
236int pf_is_enabled = 0;
237
238#if PF_ALTQ
239u_int32_t altq_allowed = 0;
240#endif /* PF_ALTQ */
241
242u_int32_t pf_hash_seed;
243
244/*
245 * These are the pf enabled reference counting variables
246 */
247static u_int64_t pf_enabled_ref_count;
248static u_int32_t nr_tokens = 0;
249static u_int64_t pffwrules;
250static u_int32_t pfdevcnt;
251
252SLIST_HEAD(list_head, pfioc_kernel_token);
253static struct list_head token_list_head;
254
255struct pf_rule		 pf_default_rule;
256#if PF_ALTQ
257static int		 pf_altq_running;
258#endif /* PF_ALTQ */
259
260#define	TAGID_MAX	 50000
261#if !PF_ALTQ
262static TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags =
263    TAILQ_HEAD_INITIALIZER(pf_tags);
264#else /* PF_ALTQ */
265static TAILQ_HEAD(pf_tags, pf_tagname)
266    pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
267    pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
268#endif /* PF_ALTQ */
269
270#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
271#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
272#endif
273static u_int16_t	 tagname2tag(struct pf_tags *, char *);
274static void		 tag2tagname(struct pf_tags *, u_int16_t, char *);
275static void		 tag_unref(struct pf_tags *, u_int16_t);
276static int		 pf_rtlabel_add(struct pf_addr_wrap *);
277static void		 pf_rtlabel_remove(struct pf_addr_wrap *);
278static void		 pf_rtlabel_copyout(struct pf_addr_wrap *);
279
280#if INET
281static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
282    struct ip_fw_args *);
283#endif /* INET */
284#if INET6
285static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
286    struct ip_fw_args *);
287#endif /* INET6 */
288
289#define	DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
290
291/*
292 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
293 */
294#define	PFIOCX_STRUCT_DECL(s)						\
295struct {								\
296	union {								\
297		struct s##_32	_s##_32;				\
298		struct s##_64	_s##_64;				\
299	} _u;								\
300} *s##_un = NULL							\
301
302#define	PFIOCX_STRUCT_BEGIN(a, s, _action) {				\
303	VERIFY(s##_un == NULL);						\
304	s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO);	\
305	if (s##_un == NULL) {						\
306		_action							\
307	} else {							\
308		if (p64)						\
309			bcopy(a, &s##_un->_u._s##_64,			\
310			    sizeof (struct s##_64));			\
311		else							\
312			bcopy(a, &s##_un->_u._s##_32,			\
313			    sizeof (struct s##_32));			\
314	}								\
315}
316
317#define	PFIOCX_STRUCT_END(s, a) {					\
318	VERIFY(s##_un != NULL);						\
319	if (p64)							\
320		bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64));	\
321	else								\
322		bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32));	\
323	_FREE(s##_un, M_TEMP);						\
324	s##_un = NULL;							\
325}
326
327#define	PFIOCX_STRUCT_ADDR32(s)		(&s##_un->_u._s##_32)
328#define	PFIOCX_STRUCT_ADDR64(s)		(&s##_un->_u._s##_64)
329
330/*
331 * Helper macros for regular ioctl structures.
332 */
333#define	PFIOC_STRUCT_BEGIN(a, v, _action) {				\
334	VERIFY((v) == NULL);						\
335	(v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO);		\
336	if ((v) == NULL) {						\
337		_action							\
338	} else {							\
339		bcopy(a, v, sizeof (*(v)));				\
340	}								\
341}
342
343#define	PFIOC_STRUCT_END(v, a) {					\
344	VERIFY((v) != NULL);						\
345	bcopy(v, a, sizeof (*(v)));					\
346	_FREE(v, M_TEMP);						\
347	(v) = NULL;							\
348}
349
350#define	PFIOC_STRUCT_ADDR32(s)		(&s##_un->_u._s##_32)
351#define	PFIOC_STRUCT_ADDR64(s)		(&s##_un->_u._s##_64)
352
353static lck_attr_t *pf_perim_lock_attr;
354static lck_grp_t *pf_perim_lock_grp;
355static lck_grp_attr_t *pf_perim_lock_grp_attr;
356
357static lck_attr_t *pf_lock_attr;
358static lck_grp_t *pf_lock_grp;
359static lck_grp_attr_t *pf_lock_grp_attr;
360
361struct thread *pf_purge_thread;
362
363extern void pfi_kifaddr_update(void *);
364
365/* pf enable ref-counting helper functions */
366static u_int64_t		generate_token(struct proc *);
367static int			remove_token(struct pfioc_remove_token *);
368static void			invalidate_all_tokens(void);
369
370static u_int64_t
371generate_token(struct proc *p)
372{
373	u_int64_t token_value;
374	struct pfioc_kernel_token *new_token;
375
376	new_token = _MALLOC(sizeof (struct pfioc_kernel_token), M_TEMP,
377	    M_WAITOK|M_ZERO);
378
379	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
380
381	if (new_token == NULL) {
382		/* malloc failed! bail! */
383		printf("%s: unable to allocate pf token structure!", __func__);
384		return (0);
385	}
386
387	token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
388
389	new_token->token.token_value = token_value;
390	new_token->token.pid = proc_pid(p);
391	proc_name(new_token->token.pid, new_token->token.proc_name,
392	    sizeof (new_token->token.proc_name));
393	new_token->token.timestamp = pf_calendar_time_second();
394
395	SLIST_INSERT_HEAD(&token_list_head, new_token, next);
396	nr_tokens++;
397
398	return (token_value);
399}
400
401static int
402remove_token(struct pfioc_remove_token *tok)
403{
404	struct pfioc_kernel_token *entry, *tmp;
405
406	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
407
408	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
409		if (tok->token_value == entry->token.token_value) {
410			SLIST_REMOVE(&token_list_head, entry,
411			    pfioc_kernel_token, next);
412			_FREE(entry, M_TEMP);
413			nr_tokens--;
414			return (0);    /* success */
415		}
416	}
417
418	printf("pf : remove failure\n");
419	return (ESRCH);    /* failure */
420}
421
422static void
423invalidate_all_tokens(void)
424{
425	struct pfioc_kernel_token *entry, *tmp;
426
427	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
428
429	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
430		SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
431		_FREE(entry, M_TEMP);
432	}
433
434	nr_tokens = 0;
435}
436
437void
438pfinit(void)
439{
440	u_int32_t *t = pf_default_rule.timeout;
441	int maj;
442
443	pf_perim_lock_grp_attr = lck_grp_attr_alloc_init();
444	pf_perim_lock_grp = lck_grp_alloc_init("pf_perim",
445	    pf_perim_lock_grp_attr);
446	pf_perim_lock_attr = lck_attr_alloc_init();
447	lck_rw_init(pf_perim_lock, pf_perim_lock_grp, pf_perim_lock_attr);
448
449	pf_lock_grp_attr = lck_grp_attr_alloc_init();
450	pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr);
451	pf_lock_attr = lck_attr_alloc_init();
452	lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr);
453
454	pool_init(&pf_rule_pl, sizeof (struct pf_rule), 0, 0, 0, "pfrulepl",
455	    NULL);
456	pool_init(&pf_src_tree_pl, sizeof (struct pf_src_node), 0, 0, 0,
457	    "pfsrctrpl", NULL);
458	pool_init(&pf_state_pl, sizeof (struct pf_state), 0, 0, 0, "pfstatepl",
459	    NULL);
460	pool_init(&pf_state_key_pl, sizeof (struct pf_state_key), 0, 0, 0,
461	    "pfstatekeypl", NULL);
462	pool_init(&pf_app_state_pl, sizeof (struct pf_app_state), 0, 0, 0,
463	    "pfappstatepl", NULL);
464#if PF_ALTQ
465	pool_init(&pf_altq_pl, sizeof (struct pf_altq), 0, 0, 0, "pfaltqpl",
466	    NULL);
467#endif /* PF_ALTQ */
468	pool_init(&pf_pooladdr_pl, sizeof (struct pf_pooladdr), 0, 0, 0,
469	    "pfpooladdrpl", NULL);
470	pfr_initialize();
471	pfi_initialize();
472	pf_osfp_initialize();
473
474	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
475	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
476
477	if (max_mem <= 256*1024*1024)
478		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
479		    PFR_KENTRY_HIWAT_SMALL;
480
481	RB_INIT(&tree_src_tracking);
482	RB_INIT(&pf_anchors);
483	pf_init_ruleset(&pf_main_ruleset);
484	TAILQ_INIT(&pf_pabuf);
485	TAILQ_INIT(&state_list);
486#if PF_ALTQ
487	TAILQ_INIT(&pf_altqs[0]);
488	TAILQ_INIT(&pf_altqs[1]);
489	pf_altqs_active = &pf_altqs[0];
490	pf_altqs_inactive = &pf_altqs[1];
491
492	PE_parse_boot_argn("altq", &altq_allowed, sizeof (altq_allowed));
493
494	_CASSERT(ALTRQ_PURGE == CLASSQRQ_PURGE);
495	_CASSERT(ALTRQ_PURGE_SC == CLASSQRQ_PURGE_SC);
496	_CASSERT(ALTRQ_EVENT == CLASSQRQ_EVENT);
497
498	_CASSERT(ALTDQ_REMOVE == CLASSQDQ_REMOVE);
499	_CASSERT(ALTDQ_POLL == CLASSQDQ_POLL);
500#endif /* PF_ALTQ */
501
502	_CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
503	_CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
504	_CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
505	_CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
506	_CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
507	_CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
508	_CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
509	_CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
510	_CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
511	_CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
512
513	/* default rule should never be garbage collected */
514	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
515	pf_default_rule.action = PF_PASS;
516	pf_default_rule.nr = -1;
517	pf_default_rule.rtableid = IFSCOPE_NONE;
518
519	/* initialize default timeouts */
520	t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
521	t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
522	t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
523	t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
524	t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
525	t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
526	t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
527	t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
528	t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
529	t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
530	t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
531	t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
532	t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
533	t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
534	t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
535	t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
536	t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
537	t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
538	t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
539	t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
540	t[PFTM_FRAG] = PFTM_FRAG_VAL;
541	t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
542	t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
543	t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
544	t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
545	t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
546
547	pf_normalize_init();
548	bzero(&pf_status, sizeof (pf_status));
549	pf_status.debug = PF_DEBUG_URGENT;
550	pf_hash_seed = RandomULong();
551
552	/* XXX do our best to avoid a conflict */
553	pf_status.hostid = random();
554
555	if (kernel_thread_start(pf_purge_thread_fn, NULL,
556	    &pf_purge_thread) != 0) {
557		printf("%s: unable to start purge thread!", __func__);
558		return;
559	}
560
561	maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
562	if (maj == -1) {
563		printf("%s: failed to allocate major number!\n", __func__);
564		return;
565	}
566	(void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
567	    UID_ROOT, GID_WHEEL, 0600, "pf", 0);
568
569	(void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
570	    UID_ROOT, GID_WHEEL, 0600, "pfm", 0);
571
572	pf_attach_hooks();
573}
574
575#if 0
576static void
577pfdetach(void)
578{
579	struct pf_anchor	*anchor;
580	struct pf_state		*state;
581	struct pf_src_node	*node;
582	struct pfioc_table	pt;
583	u_int32_t		ticket;
584	int			i;
585	char			r = '\0';
586
587	pf_detach_hooks();
588
589	pf_status.running = 0;
590	wakeup(pf_purge_thread_fn);
591
592	/* clear the rulesets */
593	for (i = 0; i < PF_RULESET_MAX; i++)
594		if (pf_begin_rules(&ticket, i, &r) == 0)
595				pf_commit_rules(ticket, i, &r);
596#if PF_ALTQ
597	if (pf_begin_altq(&ticket) == 0)
598		pf_commit_altq(ticket);
599#endif /* PF_ALTQ */
600
601	/* clear states */
602	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
603		state->timeout = PFTM_PURGE;
604#if NPFSYNC
605		state->sync_flags = PFSTATE_NOSYNC;
606#endif
607	}
608	pf_purge_expired_states(pf_status.states);
609
610#if NPFSYNC
611	pfsync_clear_states(pf_status.hostid, NULL);
612#endif
613
614	/* clear source nodes */
615	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
616		state->src_node = NULL;
617		state->nat_src_node = NULL;
618	}
619	RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
620		node->expire = 1;
621		node->states = 0;
622	}
623	pf_purge_expired_src_nodes();
624
625	/* clear tables */
626	memset(&pt, '\0', sizeof (pt));
627	pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
628
629	/* destroy anchors */
630	while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
631		for (i = 0; i < PF_RULESET_MAX; i++)
632			if (pf_begin_rules(&ticket, i, anchor->name) == 0)
633				pf_commit_rules(ticket, i, anchor->name);
634	}
635
636	/* destroy main ruleset */
637	pf_remove_if_empty_ruleset(&pf_main_ruleset);
638
639	/* destroy the pools */
640	pool_destroy(&pf_pooladdr_pl);
641#if PF_ALTQ
642	pool_destroy(&pf_altq_pl);
643#endif /* PF_ALTQ */
644	pool_destroy(&pf_state_pl);
645	pool_destroy(&pf_rule_pl);
646	pool_destroy(&pf_src_tree_pl);
647
648	/* destroy subsystems */
649	pf_normalize_destroy();
650	pf_osfp_destroy();
651	pfr_destroy();
652	pfi_destroy();
653}
654#endif
655
656static int
657pfopen(dev_t dev, int flags, int fmt, struct proc *p)
658{
659#pragma unused(flags, fmt, p)
660	if (minor(dev) >= PFDEV_MAX)
661		return (ENXIO);
662
663	if (minor(dev) == PFDEV_PFM) {
664		lck_mtx_lock(pf_lock);
665		if (pfdevcnt != 0) {
666			lck_mtx_unlock(pf_lock);
667			return (EBUSY);
668		}
669		pfdevcnt++;
670		lck_mtx_unlock(pf_lock);
671	}
672	return (0);
673}
674
675static int
676pfclose(dev_t dev, int flags, int fmt, struct proc *p)
677{
678#pragma unused(flags, fmt, p)
679	if (minor(dev) >= PFDEV_MAX)
680		return (ENXIO);
681
682	if (minor(dev) == PFDEV_PFM) {
683		lck_mtx_lock(pf_lock);
684		VERIFY(pfdevcnt > 0);
685		pfdevcnt--;
686		lck_mtx_unlock(pf_lock);
687	}
688	return (0);
689}
690
691static struct pf_pool *
692pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
693    u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
694    u_int8_t check_ticket)
695{
696	struct pf_ruleset	*ruleset;
697	struct pf_rule		*rule;
698	int			 rs_num;
699
700	ruleset = pf_find_ruleset(anchor);
701	if (ruleset == NULL)
702		return (NULL);
703	rs_num = pf_get_ruleset_number(rule_action);
704	if (rs_num >= PF_RULESET_MAX)
705		return (NULL);
706	if (active) {
707		if (check_ticket && ticket !=
708		    ruleset->rules[rs_num].active.ticket)
709			return (NULL);
710		if (r_last)
711			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
712			    pf_rulequeue);
713		else
714			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
715	} else {
716		if (check_ticket && ticket !=
717		    ruleset->rules[rs_num].inactive.ticket)
718			return (NULL);
719		if (r_last)
720			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
721			    pf_rulequeue);
722		else
723			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
724	}
725	if (!r_last) {
726		while ((rule != NULL) && (rule->nr != rule_number))
727			rule = TAILQ_NEXT(rule, entries);
728	}
729	if (rule == NULL)
730		return (NULL);
731
732	return (&rule->rpool);
733}
734
735static void
736pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
737{
738	struct pf_pooladdr	*mv_pool_pa;
739
740	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
741		TAILQ_REMOVE(poola, mv_pool_pa, entries);
742		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
743	}
744}
745
746static void
747pf_empty_pool(struct pf_palist *poola)
748{
749	struct pf_pooladdr	*empty_pool_pa;
750
751	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
752		pfi_dynaddr_remove(&empty_pool_pa->addr);
753		pf_tbladdr_remove(&empty_pool_pa->addr);
754		pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
755		TAILQ_REMOVE(poola, empty_pool_pa, entries);
756		pool_put(&pf_pooladdr_pl, empty_pool_pa);
757	}
758}
759
760void
761pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
762{
763	if (rulequeue != NULL) {
764		if (rule->states <= 0) {
765			/*
766			 * XXX - we need to remove the table *before* detaching
767			 * the rule to make sure the table code does not delete
768			 * the anchor under our feet.
769			 */
770			pf_tbladdr_remove(&rule->src.addr);
771			pf_tbladdr_remove(&rule->dst.addr);
772			if (rule->overload_tbl)
773				pfr_detach_table(rule->overload_tbl);
774		}
775		TAILQ_REMOVE(rulequeue, rule, entries);
776		rule->entries.tqe_prev = NULL;
777		rule->nr = -1;
778	}
779
780	if (rule->states > 0 || rule->src_nodes > 0 ||
781	    rule->entries.tqe_prev != NULL)
782		return;
783	pf_tag_unref(rule->tag);
784	pf_tag_unref(rule->match_tag);
785#if PF_ALTQ
786	if (altq_allowed) {
787		if (rule->pqid != rule->qid)
788			pf_qid_unref(rule->pqid);
789		pf_qid_unref(rule->qid);
790	}
791#endif /* PF_ALTQ */
792	pf_rtlabel_remove(&rule->src.addr);
793	pf_rtlabel_remove(&rule->dst.addr);
794	pfi_dynaddr_remove(&rule->src.addr);
795	pfi_dynaddr_remove(&rule->dst.addr);
796	if (rulequeue == NULL) {
797		pf_tbladdr_remove(&rule->src.addr);
798		pf_tbladdr_remove(&rule->dst.addr);
799		if (rule->overload_tbl)
800			pfr_detach_table(rule->overload_tbl);
801	}
802	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
803	pf_anchor_remove(rule);
804	pf_empty_pool(&rule->rpool.list);
805	pool_put(&pf_rule_pl, rule);
806}
807
808static u_int16_t
809tagname2tag(struct pf_tags *head, char *tagname)
810{
811	struct pf_tagname	*tag, *p = NULL;
812	u_int16_t		 new_tagid = 1;
813
814	TAILQ_FOREACH(tag, head, entries)
815		if (strcmp(tagname, tag->name) == 0) {
816			tag->ref++;
817			return (tag->tag);
818		}
819
820	/*
821	 * to avoid fragmentation, we do a linear search from the beginning
822	 * and take the first free slot we find. if there is none or the list
823	 * is empty, append a new entry at the end.
824	 */
825
826	/* new entry */
827	if (!TAILQ_EMPTY(head))
828		for (p = TAILQ_FIRST(head); p != NULL &&
829		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
830			new_tagid = p->tag + 1;
831
832	if (new_tagid > TAGID_MAX)
833		return (0);
834
835	/* allocate and fill new struct pf_tagname */
836	tag = _MALLOC(sizeof (*tag), M_TEMP, M_WAITOK|M_ZERO);
837	if (tag == NULL)
838		return (0);
839	strlcpy(tag->name, tagname, sizeof (tag->name));
840	tag->tag = new_tagid;
841	tag->ref++;
842
843	if (p != NULL)	/* insert new entry before p */
844		TAILQ_INSERT_BEFORE(p, tag, entries);
845	else	/* either list empty or no free slot in between */
846		TAILQ_INSERT_TAIL(head, tag, entries);
847
848	return (tag->tag);
849}
850
851static void
852tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
853{
854	struct pf_tagname	*tag;
855
856	TAILQ_FOREACH(tag, head, entries)
857		if (tag->tag == tagid) {
858			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
859			return;
860		}
861}
862
863static void
864tag_unref(struct pf_tags *head, u_int16_t tag)
865{
866	struct pf_tagname	*p, *next;
867
868	if (tag == 0)
869		return;
870
871	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
872		next = TAILQ_NEXT(p, entries);
873		if (tag == p->tag) {
874			if (--p->ref == 0) {
875				TAILQ_REMOVE(head, p, entries);
876				_FREE(p, M_TEMP);
877			}
878			break;
879		}
880	}
881}
882
883u_int16_t
884pf_tagname2tag(char *tagname)
885{
886	return (tagname2tag(&pf_tags, tagname));
887}
888
889void
890pf_tag2tagname(u_int16_t tagid, char *p)
891{
892	tag2tagname(&pf_tags, tagid, p);
893}
894
895void
896pf_tag_ref(u_int16_t tag)
897{
898	struct pf_tagname *t;
899
900	TAILQ_FOREACH(t, &pf_tags, entries)
901		if (t->tag == tag)
902			break;
903	if (t != NULL)
904		t->ref++;
905}
906
907void
908pf_tag_unref(u_int16_t tag)
909{
910	tag_unref(&pf_tags, tag);
911}
912
913static int
914pf_rtlabel_add(struct pf_addr_wrap *a)
915{
916#pragma unused(a)
917	return (0);
918}
919
920static void
921pf_rtlabel_remove(struct pf_addr_wrap *a)
922{
923#pragma unused(a)
924}
925
926static void
927pf_rtlabel_copyout(struct pf_addr_wrap *a)
928{
929#pragma unused(a)
930}
931
932#if PF_ALTQ
933u_int32_t
934pf_qname2qid(char *qname)
935{
936	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
937
938	return ((u_int32_t)tagname2tag(&pf_qids, qname));
939}
940
941void
942pf_qid2qname(u_int32_t qid, char *p)
943{
944	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
945
946	tag2tagname(&pf_qids, (u_int16_t)qid, p);
947}
948
949void
950pf_qid_unref(u_int32_t qid)
951{
952	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
953
954	tag_unref(&pf_qids, (u_int16_t)qid);
955}
956
957static int
958pf_begin_altq(u_int32_t *ticket)
959{
960	struct pf_altq	*altq;
961	int		 error = 0;
962
963	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
964
965	/* Purge the old altq list */
966	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
967		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
968		if (altq->qname[0] == '\0') {
969			/* detach and destroy the discipline */
970			error = altq_remove(altq);
971		} else
972			pf_qid_unref(altq->qid);
973		pool_put(&pf_altq_pl, altq);
974	}
975	if (error)
976		return (error);
977	*ticket = ++ticket_altqs_inactive;
978	altqs_inactive_open = 1;
979	return (0);
980}
981
982static int
983pf_rollback_altq(u_int32_t ticket)
984{
985	struct pf_altq	*altq;
986	int		 error = 0;
987
988	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
989
990	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
991		return (0);
992	/* Purge the old altq list */
993	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
994		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
995		if (altq->qname[0] == '\0') {
996			/* detach and destroy the discipline */
997			error = altq_remove(altq);
998		} else
999			pf_qid_unref(altq->qid);
1000		pool_put(&pf_altq_pl, altq);
1001	}
1002	altqs_inactive_open = 0;
1003	return (error);
1004}
1005
1006static int
1007pf_commit_altq(u_int32_t ticket)
1008{
1009	struct pf_altqqueue	*old_altqs;
1010	struct pf_altq		*altq;
1011	int			 err, error = 0;
1012
1013	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1014
1015	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
1016		return (EBUSY);
1017
1018	/* swap altqs, keep the old. */
1019	old_altqs = pf_altqs_active;
1020	pf_altqs_active = pf_altqs_inactive;
1021	pf_altqs_inactive = old_altqs;
1022	ticket_altqs_active = ticket_altqs_inactive;
1023
1024	/* Attach new disciplines */
1025	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1026		if (altq->qname[0] == '\0') {
1027			/* attach the discipline */
1028			error = altq_pfattach(altq);
1029			if (error == 0 && pf_altq_running)
1030				error = pf_enable_altq(altq);
1031			if (error != 0) {
1032				return (error);
1033			}
1034		}
1035	}
1036
1037	/* Purge the old altq list */
1038	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1039		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1040		if (altq->qname[0] == '\0') {
1041			/* detach and destroy the discipline */
1042			if (pf_altq_running)
1043				error = pf_disable_altq(altq);
1044			err = altq_pfdetach(altq);
1045			if (err != 0 && error == 0)
1046				error = err;
1047			err = altq_remove(altq);
1048			if (err != 0 && error == 0)
1049				error = err;
1050		} else
1051			pf_qid_unref(altq->qid);
1052		pool_put(&pf_altq_pl, altq);
1053	}
1054
1055	altqs_inactive_open = 0;
1056	return (error);
1057}
1058
1059static int
1060pf_enable_altq(struct pf_altq *altq)
1061{
1062	struct ifnet		*ifp;
1063	struct ifclassq		*ifq;
1064	int			 error = 0;
1065
1066	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1067
1068	if ((ifp = ifunit(altq->ifname)) == NULL)
1069		return (EINVAL);
1070
1071	ifq = &ifp->if_snd;
1072	IFCQ_LOCK(ifq);
1073	if (IFCQ_ALTQ(ifq)->altq_type != ALTQT_NONE)
1074		error = altq_enable(IFCQ_ALTQ(ifq));
1075
1076	/* set or clear tokenbucket regulator */
1077	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) {
1078		struct tb_profile tb = { 0, 0, 0 };
1079
1080		if (altq->aflags & PF_ALTQF_TBR) {
1081			if (altq->bwtype != PF_ALTQ_BW_ABSOLUTE &&
1082			    altq->bwtype != PF_ALTQ_BW_PERCENT) {
1083				error = EINVAL;
1084			} else {
1085				if (altq->bwtype == PF_ALTQ_BW_ABSOLUTE)
1086					tb.rate = altq->ifbandwidth;
1087				else
1088					tb.percent = altq->ifbandwidth;
1089				tb.depth = altq->tbrsize;
1090				error = ifclassq_tbr_set(ifq, &tb, TRUE);
1091			}
1092		} else if (IFCQ_TBR_IS_ENABLED(ifq)) {
1093			error = ifclassq_tbr_set(ifq, &tb, TRUE);
1094		}
1095	}
1096	IFCQ_UNLOCK(ifq);
1097
1098	return (error);
1099}
1100
1101static int
1102pf_disable_altq(struct pf_altq *altq)
1103{
1104	struct ifnet		*ifp;
1105	struct ifclassq		*ifq;
1106	int			 error;
1107
1108	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1109
1110	if ((ifp = ifunit(altq->ifname)) == NULL)
1111		return (EINVAL);
1112
1113	/*
1114	 * when the discipline is no longer referenced, it was overridden
1115	 * by a new one.  if so, just return.
1116	 */
1117	ifq = &ifp->if_snd;
1118	IFCQ_LOCK(ifq);
1119	if (altq->altq_disc != IFCQ_ALTQ(ifq)->altq_disc) {
1120		IFCQ_UNLOCK(ifq);
1121		return (0);
1122	}
1123
1124	error = altq_disable(IFCQ_ALTQ(ifq));
1125
1126	if (error == 0 && IFCQ_TBR_IS_ENABLED(ifq)) {
1127		/* clear tokenbucket regulator */
1128		struct tb_profile  tb = { 0, 0, 0 };
1129		error = ifclassq_tbr_set(ifq, &tb, TRUE);
1130	}
1131	IFCQ_UNLOCK(ifq);
1132
1133	return (error);
1134}
1135
1136static void
1137pf_altq_copyin(struct pf_altq *src, struct pf_altq *dst)
1138{
1139	bcopy(src, dst, sizeof (struct pf_altq));
1140
1141	dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1142	dst->qname[sizeof (dst->qname) - 1] = '\0';
1143	dst->parent[sizeof (dst->parent) - 1] = '\0';
1144	dst->altq_disc = NULL;
1145	dst->entries.tqe_next = NULL;
1146	dst->entries.tqe_prev = NULL;
1147}
1148
1149static void
1150pf_altq_copyout(struct pf_altq *src, struct pf_altq *dst)
1151{
1152	struct pf_altq pa;
1153
1154	bcopy(src, &pa, sizeof (struct pf_altq));
1155	pa.altq_disc = NULL;
1156	pa.entries.tqe_next = NULL;
1157	pa.entries.tqe_prev = NULL;
1158	bcopy(&pa, dst, sizeof (struct pf_altq));
1159}
1160#endif /* PF_ALTQ */
1161
1162static int
1163pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1164{
1165	struct pf_ruleset	*rs;
1166	struct pf_rule		*rule;
1167
1168	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1169		return (EINVAL);
1170	rs = pf_find_or_create_ruleset(anchor);
1171	if (rs == NULL)
1172		return (EINVAL);
1173	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1174		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1175		rs->rules[rs_num].inactive.rcount--;
1176	}
1177	*ticket = ++rs->rules[rs_num].inactive.ticket;
1178	rs->rules[rs_num].inactive.open = 1;
1179	return (0);
1180}
1181
1182static int
1183pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1184{
1185	struct pf_ruleset	*rs;
1186	struct pf_rule		*rule;
1187
1188	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1189		return (EINVAL);
1190	rs = pf_find_ruleset(anchor);
1191	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1192	    rs->rules[rs_num].inactive.ticket != ticket)
1193		return (0);
1194	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1195		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1196		rs->rules[rs_num].inactive.rcount--;
1197	}
1198	rs->rules[rs_num].inactive.open = 0;
1199	return (0);
1200}
1201
1202#define	PF_MD5_UPD(st, elm)						\
1203	MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
1204
1205#define	PF_MD5_UPD_STR(st, elm)						\
1206	MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
1207
1208#define	PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1209	(stor) = htonl((st)->elm);					\
1210	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t));	\
1211} while (0)
1212
1213#define	PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1214	(stor) = htons((st)->elm);					\
1215	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t));	\
1216} while (0)
1217
1218static void
1219pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
1220{
1221	PF_MD5_UPD(pfr, addr.type);
1222	switch (pfr->addr.type) {
1223	case PF_ADDR_DYNIFTL:
1224		PF_MD5_UPD(pfr, addr.v.ifname);
1225		PF_MD5_UPD(pfr, addr.iflags);
1226		break;
1227	case PF_ADDR_TABLE:
1228		PF_MD5_UPD(pfr, addr.v.tblname);
1229		break;
1230	case PF_ADDR_ADDRMASK:
1231		/* XXX ignore af? */
1232		PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1233		PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1234		break;
1235	case PF_ADDR_RTLABEL:
1236		PF_MD5_UPD(pfr, addr.v.rtlabelname);
1237		break;
1238	}
1239
1240	switch (proto) {
1241	case IPPROTO_TCP:
1242	case IPPROTO_UDP:
1243		PF_MD5_UPD(pfr, xport.range.port[0]);
1244		PF_MD5_UPD(pfr, xport.range.port[1]);
1245		PF_MD5_UPD(pfr, xport.range.op);
1246		break;
1247
1248	default:
1249		break;
1250	}
1251
1252	PF_MD5_UPD(pfr, neg);
1253}
1254
1255static void
1256pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1257{
1258	u_int16_t x;
1259	u_int32_t y;
1260
1261	pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1262	pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1263	PF_MD5_UPD_STR(rule, label);
1264	PF_MD5_UPD_STR(rule, ifname);
1265	PF_MD5_UPD_STR(rule, match_tagname);
1266	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1267	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1268	PF_MD5_UPD_HTONL(rule, prob, y);
1269	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1270	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1271	PF_MD5_UPD(rule, uid.op);
1272	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1273	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1274	PF_MD5_UPD(rule, gid.op);
1275	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1276	PF_MD5_UPD(rule, action);
1277	PF_MD5_UPD(rule, direction);
1278	PF_MD5_UPD(rule, af);
1279	PF_MD5_UPD(rule, quick);
1280	PF_MD5_UPD(rule, ifnot);
1281	PF_MD5_UPD(rule, match_tag_not);
1282	PF_MD5_UPD(rule, natpass);
1283	PF_MD5_UPD(rule, keep_state);
1284	PF_MD5_UPD(rule, proto);
1285	PF_MD5_UPD(rule, type);
1286	PF_MD5_UPD(rule, code);
1287	PF_MD5_UPD(rule, flags);
1288	PF_MD5_UPD(rule, flagset);
1289	PF_MD5_UPD(rule, allow_opts);
1290	PF_MD5_UPD(rule, rt);
1291	PF_MD5_UPD(rule, tos);
1292}
1293
1294static int
1295pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1296{
1297	struct pf_ruleset	*rs;
1298	struct pf_rule		*rule, **old_array, *r;
1299	struct pf_rulequeue	*old_rules;
1300	int			 error;
1301	u_int32_t		 old_rcount;
1302
1303	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1304
1305	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1306		return (EINVAL);
1307	rs = pf_find_ruleset(anchor);
1308	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1309	    ticket != rs->rules[rs_num].inactive.ticket)
1310		return (EBUSY);
1311
1312	/* Calculate checksum for the main ruleset */
1313	if (rs == &pf_main_ruleset) {
1314		error = pf_setup_pfsync_matching(rs);
1315		if (error != 0)
1316			return (error);
1317	}
1318
1319	/* Swap rules, keep the old. */
1320	old_rules = rs->rules[rs_num].active.ptr;
1321	old_rcount = rs->rules[rs_num].active.rcount;
1322	old_array = rs->rules[rs_num].active.ptr_array;
1323
1324	if(old_rcount != 0) {
1325		r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1326		while (r) {
1327			if (r->rule_flag & PFRULE_PFM)
1328				pffwrules--;
1329			r = TAILQ_NEXT(r, entries);
1330		}
1331	}
1332
1333
1334	rs->rules[rs_num].active.ptr =
1335	    rs->rules[rs_num].inactive.ptr;
1336	rs->rules[rs_num].active.ptr_array =
1337	    rs->rules[rs_num].inactive.ptr_array;
1338	rs->rules[rs_num].active.rcount =
1339	    rs->rules[rs_num].inactive.rcount;
1340	rs->rules[rs_num].inactive.ptr = old_rules;
1341	rs->rules[rs_num].inactive.ptr_array = old_array;
1342	rs->rules[rs_num].inactive.rcount = old_rcount;
1343
1344	rs->rules[rs_num].active.ticket =
1345	    rs->rules[rs_num].inactive.ticket;
1346	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1347
1348
1349	/* Purge the old rule list. */
1350	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1351		pf_rm_rule(old_rules, rule);
1352	if (rs->rules[rs_num].inactive.ptr_array)
1353		_FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1354	rs->rules[rs_num].inactive.ptr_array = NULL;
1355	rs->rules[rs_num].inactive.rcount = 0;
1356	rs->rules[rs_num].inactive.open = 0;
1357	pf_remove_if_empty_ruleset(rs);
1358	return (0);
1359}
1360
1361static void
1362pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1363    int minordev)
1364{
1365	bcopy(src, dst, sizeof (struct pf_rule));
1366
1367	dst->label[sizeof (dst->label) - 1] = '\0';
1368	dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1369	dst->qname[sizeof (dst->qname) - 1] = '\0';
1370	dst->pqname[sizeof (dst->pqname) - 1] = '\0';
1371	dst->tagname[sizeof (dst->tagname) - 1] = '\0';
1372	dst->match_tagname[sizeof (dst->match_tagname) - 1] = '\0';
1373	dst->overload_tblname[sizeof (dst->overload_tblname) - 1] = '\0';
1374
1375	dst->cuid = kauth_cred_getuid(p->p_ucred);
1376	dst->cpid = p->p_pid;
1377
1378	dst->anchor = NULL;
1379	dst->kif = NULL;
1380	dst->overload_tbl = NULL;
1381
1382	TAILQ_INIT(&dst->rpool.list);
1383	dst->rpool.cur = NULL;
1384
1385	/* initialize refcounting */
1386	dst->states = 0;
1387	dst->src_nodes = 0;
1388
1389	dst->entries.tqe_prev = NULL;
1390	dst->entries.tqe_next = NULL;
1391	if ((uint8_t)minordev == PFDEV_PFM)
1392		dst->rule_flag |= PFRULE_PFM;
1393}
1394
1395static void
1396pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1397{
1398	bcopy(src, dst, sizeof (struct pf_rule));
1399
1400	dst->anchor = NULL;
1401	dst->kif = NULL;
1402	dst->overload_tbl = NULL;
1403
1404	TAILQ_INIT(&dst->rpool.list);
1405	dst->rpool.cur = NULL;
1406
1407	dst->entries.tqe_prev = NULL;
1408	dst->entries.tqe_next = NULL;
1409}
1410
1411static void
1412pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1413    struct pf_state *s)
1414{
1415	uint64_t secs = pf_time_second();
1416	bzero(sp, sizeof (struct pfsync_state));
1417
1418	/* copy from state key */
1419	sp->lan.addr = sk->lan.addr;
1420	sp->lan.xport = sk->lan.xport;
1421	sp->gwy.addr = sk->gwy.addr;
1422	sp->gwy.xport = sk->gwy.xport;
1423	sp->ext.addr = sk->ext.addr;
1424	sp->ext.xport = sk->ext.xport;
1425	sp->proto_variant = sk->proto_variant;
1426	sp->tag = s->tag;
1427	sp->proto = sk->proto;
1428	sp->af = sk->af;
1429	sp->direction = sk->direction;
1430	sp->flowhash = sk->flowhash;
1431
1432	/* copy from state */
1433	memcpy(&sp->id, &s->id, sizeof (sp->id));
1434	sp->creatorid = s->creatorid;
1435	strlcpy(sp->ifname, s->kif->pfik_name, sizeof (sp->ifname));
1436	pf_state_peer_to_pfsync(&s->src, &sp->src);
1437	pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1438
1439	sp->rule = s->rule.ptr->nr;
1440	sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1441	    (unsigned)-1 : s->nat_rule.ptr->nr;
1442	sp->anchor = (s->anchor.ptr == NULL) ?
1443	    (unsigned)-1 : s->anchor.ptr->nr;
1444
1445	pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1446	pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1447	pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1448	pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1449	sp->creation = secs - s->creation;
1450	sp->expire = pf_state_expires(s);
1451	sp->log = s->log;
1452	sp->allow_opts = s->allow_opts;
1453	sp->timeout = s->timeout;
1454
1455	if (s->src_node)
1456		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1457	if (s->nat_src_node)
1458		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1459
1460	if (sp->expire > secs)
1461		sp->expire -= secs;
1462	else
1463		sp->expire = 0;
1464
1465}
1466
1467static void
1468pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1469    struct pf_state *s)
1470{
1471	/* copy to state key */
1472	sk->lan.addr = sp->lan.addr;
1473	sk->lan.xport = sp->lan.xport;
1474	sk->gwy.addr = sp->gwy.addr;
1475	sk->gwy.xport = sp->gwy.xport;
1476	sk->ext.addr = sp->ext.addr;
1477	sk->ext.xport = sp->ext.xport;
1478	sk->proto_variant = sp->proto_variant;
1479	s->tag = sp->tag;
1480	sk->proto = sp->proto;
1481	sk->af = sp->af;
1482	sk->direction = sp->direction;
1483	sk->flowhash = pf_calc_state_key_flowhash(sk);
1484
1485	/* copy to state */
1486	memcpy(&s->id, &sp->id, sizeof (sp->id));
1487	s->creatorid = sp->creatorid;
1488	pf_state_peer_from_pfsync(&sp->src, &s->src);
1489	pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1490
1491	s->rule.ptr = &pf_default_rule;
1492	s->nat_rule.ptr = NULL;
1493	s->anchor.ptr = NULL;
1494	s->rt_kif = NULL;
1495	s->creation = pf_time_second();
1496	s->expire = pf_time_second();
1497	if (sp->expire > 0)
1498		s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1499	s->pfsync_time = 0;
1500	s->packets[0] = s->packets[1] = 0;
1501	s->bytes[0] = s->bytes[1] = 0;
1502}
1503
1504static void
1505pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1506{
1507	bcopy(src, dst, sizeof (struct pf_pooladdr));
1508
1509	dst->entries.tqe_prev = NULL;
1510	dst->entries.tqe_next = NULL;
1511	dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1512	dst->kif = NULL;
1513}
1514
1515static void
1516pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1517{
1518	bcopy(src, dst, sizeof (struct pf_pooladdr));
1519
1520	dst->entries.tqe_prev = NULL;
1521	dst->entries.tqe_next = NULL;
1522	dst->kif = NULL;
1523}
1524
1525static int
1526pf_setup_pfsync_matching(struct pf_ruleset *rs)
1527{
1528	MD5_CTX			 ctx;
1529	struct pf_rule		*rule;
1530	int			 rs_cnt;
1531	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1532
1533	MD5Init(&ctx);
1534	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1535		/* XXX PF_RULESET_SCRUB as well? */
1536		if (rs_cnt == PF_RULESET_SCRUB)
1537			continue;
1538
1539		if (rs->rules[rs_cnt].inactive.ptr_array)
1540			_FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1541		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1542
1543		if (rs->rules[rs_cnt].inactive.rcount) {
1544			rs->rules[rs_cnt].inactive.ptr_array =
1545			    _MALLOC(sizeof (caddr_t) *
1546			    rs->rules[rs_cnt].inactive.rcount,
1547			    M_TEMP, M_WAITOK);
1548
1549			if (!rs->rules[rs_cnt].inactive.ptr_array)
1550				return (ENOMEM);
1551		}
1552
1553		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1554		    entries) {
1555			pf_hash_rule(&ctx, rule);
1556			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1557		}
1558	}
1559
1560	MD5Final(digest, &ctx);
1561	memcpy(pf_status.pf_chksum, digest, sizeof (pf_status.pf_chksum));
1562	return (0);
1563}
1564
1565static void
1566pf_start(void)
1567{
1568	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1569
1570	VERIFY(pf_is_enabled == 0);
1571
1572	pf_is_enabled = 1;
1573	pf_status.running = 1;
1574	pf_status.since = pf_calendar_time_second();
1575	if (pf_status.stateid == 0) {
1576		pf_status.stateid = pf_time_second();
1577		pf_status.stateid = pf_status.stateid << 32;
1578	}
1579	wakeup(pf_purge_thread_fn);
1580	DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1581}
1582
1583static void
1584pf_stop(void)
1585{
1586	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1587
1588	VERIFY(pf_is_enabled);
1589
1590	pf_status.running = 0;
1591	pf_is_enabled = 0;
1592	pf_status.since = pf_calendar_time_second();
1593	wakeup(pf_purge_thread_fn);
1594	DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1595}
1596
1597static int
1598pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1599{
1600#pragma unused(dev)
1601	int p64 = proc_is64bit(p);
1602	int error = 0;
1603	int minordev = minor(dev);
1604
1605	if (kauth_cred_issuser(kauth_cred_get()) == 0)
1606		return (EPERM);
1607
1608	/* XXX keep in sync with switch() below */
1609	if (securelevel > 1)
1610		switch (cmd) {
1611		case DIOCGETRULES:
1612		case DIOCGETRULE:
1613		case DIOCGETADDRS:
1614		case DIOCGETADDR:
1615		case DIOCGETSTATE:
1616		case DIOCSETSTATUSIF:
1617		case DIOCGETSTATUS:
1618		case DIOCCLRSTATUS:
1619		case DIOCNATLOOK:
1620		case DIOCSETDEBUG:
1621		case DIOCGETSTATES:
1622		case DIOCINSERTRULE:
1623		case DIOCDELETERULE:
1624		case DIOCGETTIMEOUT:
1625		case DIOCCLRRULECTRS:
1626		case DIOCGETLIMIT:
1627		case DIOCGETALTQS:
1628		case DIOCGETALTQ:
1629		case DIOCGETQSTATS:
1630		case DIOCGETRULESETS:
1631		case DIOCGETRULESET:
1632		case DIOCRGETTABLES:
1633		case DIOCRGETTSTATS:
1634		case DIOCRCLRTSTATS:
1635		case DIOCRCLRADDRS:
1636		case DIOCRADDADDRS:
1637		case DIOCRDELADDRS:
1638		case DIOCRSETADDRS:
1639		case DIOCRGETADDRS:
1640		case DIOCRGETASTATS:
1641		case DIOCRCLRASTATS:
1642		case DIOCRTSTADDRS:
1643		case DIOCOSFPGET:
1644		case DIOCGETSRCNODES:
1645		case DIOCCLRSRCNODES:
1646		case DIOCIGETIFACES:
1647		case DIOCGIFSPEED:
1648		case DIOCSETIFFLAG:
1649		case DIOCCLRIFFLAG:
1650			break;
1651		case DIOCRCLRTABLES:
1652		case DIOCRADDTABLES:
1653		case DIOCRDELTABLES:
1654		case DIOCRSETTFLAGS: {
1655			int pfrio_flags;
1656
1657			bcopy(&((struct pfioc_table *)(void *)addr)->
1658			    pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1659
1660			if (pfrio_flags & PFR_FLAG_DUMMY)
1661				break; /* dummy operation ok */
1662			return (EPERM);
1663		}
1664		default:
1665			return (EPERM);
1666		}
1667
1668	if (!(flags & FWRITE))
1669		switch (cmd) {
1670		case DIOCSTART:
1671		case DIOCSTARTREF:
1672		case DIOCSTOP:
1673		case DIOCSTOPREF:
1674		case DIOCGETSTARTERS:
1675		case DIOCGETRULES:
1676		case DIOCGETADDRS:
1677		case DIOCGETADDR:
1678		case DIOCGETSTATE:
1679		case DIOCGETSTATUS:
1680		case DIOCGETSTATES:
1681		case DIOCINSERTRULE:
1682		case DIOCDELETERULE:
1683		case DIOCGETTIMEOUT:
1684		case DIOCGETLIMIT:
1685		case DIOCGETALTQS:
1686		case DIOCGETALTQ:
1687		case DIOCGETQSTATS:
1688		case DIOCGETRULESETS:
1689		case DIOCGETRULESET:
1690		case DIOCNATLOOK:
1691		case DIOCRGETTABLES:
1692		case DIOCRGETTSTATS:
1693		case DIOCRGETADDRS:
1694		case DIOCRGETASTATS:
1695		case DIOCRTSTADDRS:
1696		case DIOCOSFPGET:
1697		case DIOCGETSRCNODES:
1698		case DIOCIGETIFACES:
1699		case DIOCGIFSPEED:
1700			break;
1701		case DIOCRCLRTABLES:
1702		case DIOCRADDTABLES:
1703		case DIOCRDELTABLES:
1704		case DIOCRCLRTSTATS:
1705		case DIOCRCLRADDRS:
1706		case DIOCRADDADDRS:
1707		case DIOCRDELADDRS:
1708		case DIOCRSETADDRS:
1709		case DIOCRSETTFLAGS: {
1710			int pfrio_flags;
1711
1712			bcopy(&((struct pfioc_table *)(void *)addr)->
1713			    pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1714
1715			if (pfrio_flags & PFR_FLAG_DUMMY) {
1716				flags |= FWRITE; /* need write lock for dummy */
1717				break; /* dummy operation ok */
1718			}
1719			return (EACCES);
1720		}
1721		case DIOCGETRULE: {
1722			u_int32_t action;
1723
1724			bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1725			    &action, sizeof (action));
1726
1727			if (action == PF_GET_CLR_CNTR)
1728				return (EACCES);
1729			break;
1730		}
1731		default:
1732			return (EACCES);
1733		}
1734
1735#if PF_ALTQ
1736	switch (cmd) {
1737	case DIOCSTARTALTQ:
1738	case DIOCSTOPALTQ:
1739	case DIOCADDALTQ:
1740	case DIOCGETALTQS:
1741	case DIOCGETALTQ:
1742	case DIOCCHANGEALTQ:
1743	case DIOCGETQSTATS:
1744		/* fail if ALTQ is disabled */
1745		if (!altq_allowed)
1746			return (ENODEV);
1747		break;
1748	}
1749#endif /* PF_ALTQ */
1750
1751	if (flags & FWRITE)
1752		lck_rw_lock_exclusive(pf_perim_lock);
1753	else
1754		lck_rw_lock_shared(pf_perim_lock);
1755
1756	lck_mtx_lock(pf_lock);
1757
1758	switch (cmd) {
1759
1760	case DIOCSTART:
1761		if (pf_status.running) {
1762			/*
1763			 * Increment the reference for a simple -e enable, so
1764			 * that even if other processes drop their references,
1765			 * pf will still be available to processes that turned
1766			 * it on without taking a reference
1767			 */
1768			if (nr_tokens == pf_enabled_ref_count) {
1769				pf_enabled_ref_count++;
1770				VERIFY(pf_enabled_ref_count != 0);
1771			}
1772			error = EEXIST;
1773		} else if (pf_purge_thread == NULL) {
1774			error = ENOMEM;
1775		} else {
1776			pf_start();
1777			pf_enabled_ref_count++;
1778			VERIFY(pf_enabled_ref_count != 0);
1779		}
1780		break;
1781
1782	case DIOCSTARTREF:		/* u_int64_t */
1783		if (pf_purge_thread == NULL) {
1784			error = ENOMEM;
1785		} else {
1786			u_int64_t token;
1787
1788			/* small enough to be on stack */
1789			if ((token = generate_token(p)) != 0) {
1790				if (pf_is_enabled == 0) {
1791					pf_start();
1792				}
1793				pf_enabled_ref_count++;
1794				VERIFY(pf_enabled_ref_count != 0);
1795			} else {
1796				error = ENOMEM;
1797				DPFPRINTF(PF_DEBUG_URGENT,
1798				    ("pf: unable to generate token\n"));
1799			}
1800			bcopy(&token, addr, sizeof (token));
1801		}
1802		break;
1803
1804	case DIOCSTOP:
1805		if (!pf_status.running) {
1806			error = ENOENT;
1807		} else {
1808			pf_stop();
1809			pf_enabled_ref_count = 0;
1810			invalidate_all_tokens();
1811		}
1812		break;
1813
1814	case DIOCSTOPREF:		/* struct pfioc_remove_token */
1815		if (!pf_status.running) {
1816			error = ENOENT;
1817		} else {
1818			struct pfioc_remove_token pfrt;
1819
1820			/* small enough to be on stack */
1821			bcopy(addr, &pfrt, sizeof (pfrt));
1822			if ((error = remove_token(&pfrt)) == 0) {
1823				VERIFY(pf_enabled_ref_count != 0);
1824				pf_enabled_ref_count--;
1825				/* return currently held references */
1826				pfrt.refcount = pf_enabled_ref_count;
1827				DPFPRINTF(PF_DEBUG_MISC,
1828				    ("pf: enabled refcount decremented\n"));
1829			} else {
1830				error = EINVAL;
1831				DPFPRINTF(PF_DEBUG_URGENT,
1832				    ("pf: token mismatch\n"));
1833			}
1834			bcopy(&pfrt, addr, sizeof (pfrt));
1835
1836			if (error == 0 && pf_enabled_ref_count == 0)
1837				pf_stop();
1838		}
1839		break;
1840
1841	case DIOCGETSTARTERS: {		/* struct pfioc_tokens */
1842		PFIOCX_STRUCT_DECL(pfioc_tokens);
1843
1844		PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break;);
1845		error = pfioctl_ioc_tokens(cmd,
1846		    PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1847		    PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1848		PFIOCX_STRUCT_END(pfioc_tokens, addr);
1849		break;
1850	}
1851
1852	case DIOCADDRULE:		/* struct pfioc_rule */
1853	case DIOCGETRULES:		/* struct pfioc_rule */
1854	case DIOCGETRULE:		/* struct pfioc_rule */
1855	case DIOCCHANGERULE:		/* struct pfioc_rule */
1856	case DIOCINSERTRULE:		/* struct pfioc_rule */
1857	case DIOCDELETERULE: {		/* struct pfioc_rule */
1858		struct pfioc_rule *pr = NULL;
1859
1860		PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
1861		error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1862		PFIOC_STRUCT_END(pr, addr);
1863		break;
1864	}
1865
1866	case DIOCCLRSTATES:		/* struct pfioc_state_kill */
1867	case DIOCKILLSTATES: {		/* struct pfioc_state_kill */
1868		struct pfioc_state_kill *psk = NULL;
1869
1870		PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break;);
1871		error = pfioctl_ioc_state_kill(cmd, psk, p);
1872		PFIOC_STRUCT_END(psk, addr);
1873		break;
1874	}
1875
1876	case DIOCADDSTATE:		/* struct pfioc_state */
1877	case DIOCGETSTATE: {		/* struct pfioc_state */
1878		struct pfioc_state *ps = NULL;
1879
1880		PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break;);
1881		error = pfioctl_ioc_state(cmd, ps, p);
1882		PFIOC_STRUCT_END(ps, addr);
1883		break;
1884	}
1885
1886	case DIOCGETSTATES: {		/* struct pfioc_states */
1887		PFIOCX_STRUCT_DECL(pfioc_states);
1888
1889		PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break;);
1890		error = pfioctl_ioc_states(cmd,
1891		    PFIOCX_STRUCT_ADDR32(pfioc_states),
1892		    PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1893		PFIOCX_STRUCT_END(pfioc_states, addr);
1894		break;
1895	}
1896
1897	case DIOCGETSTATUS: {		/* struct pf_status */
1898		struct pf_status *s = NULL;
1899
1900		PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break;);
1901		pfi_update_status(s->ifname, s);
1902		PFIOC_STRUCT_END(s, addr);
1903		break;
1904	}
1905
1906	case DIOCSETSTATUSIF: {		/* struct pfioc_if */
1907		struct pfioc_if	*pi = (struct pfioc_if *)(void *)addr;
1908
1909		/* OK for unaligned accesses */
1910		if (pi->ifname[0] == 0) {
1911			bzero(pf_status.ifname, IFNAMSIZ);
1912			break;
1913		}
1914		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1915		break;
1916	}
1917
1918	case DIOCCLRSTATUS: {
1919		bzero(pf_status.counters, sizeof (pf_status.counters));
1920		bzero(pf_status.fcounters, sizeof (pf_status.fcounters));
1921		bzero(pf_status.scounters, sizeof (pf_status.scounters));
1922		pf_status.since = pf_calendar_time_second();
1923		if (*pf_status.ifname)
1924			pfi_update_status(pf_status.ifname, NULL);
1925		break;
1926	}
1927
1928	case DIOCNATLOOK: {		/* struct pfioc_natlook */
1929		struct pfioc_natlook *pnl = NULL;
1930
1931		PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break;);
1932		error = pfioctl_ioc_natlook(cmd, pnl, p);
1933		PFIOC_STRUCT_END(pnl, addr);
1934		break;
1935	}
1936
1937	case DIOCSETTIMEOUT:		/* struct pfioc_tm */
1938	case DIOCGETTIMEOUT: {		/* struct pfioc_tm */
1939		struct pfioc_tm	pt;
1940
1941		/* small enough to be on stack */
1942		bcopy(addr, &pt, sizeof (pt));
1943		error = pfioctl_ioc_tm(cmd, &pt, p);
1944		bcopy(&pt, addr, sizeof (pt));
1945		break;
1946	}
1947
1948	case DIOCGETLIMIT:		/* struct pfioc_limit */
1949	case DIOCSETLIMIT: {		/* struct pfioc_limit */
1950		struct pfioc_limit pl;
1951
1952		/* small enough to be on stack */
1953		bcopy(addr, &pl, sizeof (pl));
1954		error = pfioctl_ioc_limit(cmd, &pl, p);
1955		bcopy(&pl, addr, sizeof (pl));
1956		break;
1957	}
1958
1959	case DIOCSETDEBUG: {		/* u_int32_t */
1960		bcopy(addr, &pf_status.debug, sizeof (u_int32_t));
1961		break;
1962	}
1963
1964	case DIOCCLRRULECTRS: {
1965		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1966		struct pf_ruleset	*ruleset = &pf_main_ruleset;
1967		struct pf_rule		*rule;
1968
1969		TAILQ_FOREACH(rule,
1970		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1971			rule->evaluations = 0;
1972			rule->packets[0] = rule->packets[1] = 0;
1973			rule->bytes[0] = rule->bytes[1] = 0;
1974		}
1975		break;
1976	}
1977
1978	case DIOCGIFSPEED: {
1979		struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1980		struct pf_ifspeed ps;
1981		struct ifnet *ifp;
1982		u_int64_t baudrate;
1983
1984		if (psp->ifname[0] != '\0') {
1985			/* Can we completely trust user-land? */
1986			strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1987			ps.ifname[IFNAMSIZ - 1] = '\0';
1988			ifp = ifunit(ps.ifname);
1989			if (ifp != NULL) {
1990				baudrate = ifp->if_output_bw.max_bw;
1991				bcopy(&baudrate, &psp->baudrate,
1992				    sizeof (baudrate));
1993			} else {
1994				error = EINVAL;
1995			}
1996		} else {
1997			error = EINVAL;
1998		}
1999		break;
2000	}
2001
2002#if PF_ALTQ
2003	case DIOCSTARTALTQ: {
2004		struct pf_altq		*altq;
2005
2006		VERIFY(altq_allowed);
2007		/* enable all altq interfaces on active list */
2008		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2009			if (altq->qname[0] == '\0') {
2010				error = pf_enable_altq(altq);
2011				if (error != 0)
2012					break;
2013			}
2014		}
2015		if (error == 0)
2016			pf_altq_running = 1;
2017		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2018		break;
2019	}
2020
2021	case DIOCSTOPALTQ: {
2022		struct pf_altq		*altq;
2023
2024		VERIFY(altq_allowed);
2025		/* disable all altq interfaces on active list */
2026		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2027			if (altq->qname[0] == '\0') {
2028				error = pf_disable_altq(altq);
2029				if (error != 0)
2030					break;
2031			}
2032		}
2033		if (error == 0)
2034			pf_altq_running = 0;
2035		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2036		break;
2037	}
2038
2039	case DIOCADDALTQ: {		/* struct pfioc_altq */
2040		struct pfioc_altq	*pa = (struct pfioc_altq *)(void *)addr;
2041		struct pf_altq		*altq, *a;
2042		u_int32_t		ticket;
2043
2044		VERIFY(altq_allowed);
2045		bcopy(&pa->ticket, &ticket, sizeof (ticket));
2046		if (ticket != ticket_altqs_inactive) {
2047			error = EBUSY;
2048			break;
2049		}
2050		altq = pool_get(&pf_altq_pl, PR_WAITOK);
2051		if (altq == NULL) {
2052			error = ENOMEM;
2053			break;
2054		}
2055		pf_altq_copyin(&pa->altq, altq);
2056
2057		/*
2058		 * if this is for a queue, find the discipline and
2059		 * copy the necessary fields
2060		 */
2061		if (altq->qname[0] != '\0') {
2062			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2063				error = EBUSY;
2064				pool_put(&pf_altq_pl, altq);
2065				break;
2066			}
2067			altq->altq_disc = NULL;
2068			TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2069				if (strncmp(a->ifname, altq->ifname,
2070				    IFNAMSIZ) == 0 && a->qname[0] == '\0') {
2071					altq->altq_disc = a->altq_disc;
2072					break;
2073				}
2074			}
2075		}
2076
2077		error = altq_add(altq);
2078		if (error) {
2079			pool_put(&pf_altq_pl, altq);
2080			break;
2081		}
2082
2083		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2084		pf_altq_copyout(altq, &pa->altq);
2085		break;
2086	}
2087
2088	case DIOCGETALTQS: {
2089		struct pfioc_altq	*pa = (struct pfioc_altq *)(void *)addr;
2090		struct pf_altq		*altq;
2091		u_int32_t		nr;
2092
2093		VERIFY(altq_allowed);
2094		nr = 0;
2095		TAILQ_FOREACH(altq, pf_altqs_active, entries)
2096			nr++;
2097		bcopy(&nr, &pa->nr, sizeof (nr));
2098		bcopy(&ticket_altqs_active, &pa->ticket, sizeof (pa->ticket));
2099		break;
2100	}
2101
2102	case DIOCGETALTQ: {
2103		struct pfioc_altq	*pa = (struct pfioc_altq *)(void *)addr;
2104		struct pf_altq		*altq;
2105		u_int32_t		 nr, pa_nr, ticket;
2106
2107		VERIFY(altq_allowed);
2108		bcopy(&pa->ticket, &ticket, sizeof (ticket));
2109		if (ticket != ticket_altqs_active) {
2110			error = EBUSY;
2111			break;
2112		}
2113		bcopy(&pa->nr, &pa_nr, sizeof (pa_nr));
2114		nr = 0;
2115		altq = TAILQ_FIRST(pf_altqs_active);
2116		while ((altq != NULL) && (nr < pa_nr)) {
2117			altq = TAILQ_NEXT(altq, entries);
2118			nr++;
2119		}
2120		if (altq == NULL) {
2121			error = EBUSY;
2122			break;
2123		}
2124		pf_altq_copyout(altq, &pa->altq);
2125		break;
2126	}
2127
2128	case DIOCCHANGEALTQ:
2129		VERIFY(altq_allowed);
2130		/* CHANGEALTQ not supported yet! */
2131		error = ENODEV;
2132		break;
2133
2134	case DIOCGETQSTATS: {
2135		struct pfioc_qstats *pq = (struct pfioc_qstats *)(void *)addr;
2136		struct pf_altq		*altq;
2137		u_int32_t		 nr, pq_nr, ticket;
2138		int			 nbytes;
2139
2140		VERIFY(altq_allowed);
2141		bcopy(&pq->ticket, &ticket, sizeof (ticket));
2142		if (ticket != ticket_altqs_active) {
2143			error = EBUSY;
2144			break;
2145		}
2146		bcopy(&pq->nr, &pq_nr, sizeof (pq_nr));
2147		nr = 0;
2148		altq = TAILQ_FIRST(pf_altqs_active);
2149		while ((altq != NULL) && (nr < pq_nr)) {
2150			altq = TAILQ_NEXT(altq, entries);
2151			nr++;
2152		}
2153		if (altq == NULL) {
2154			error = EBUSY;
2155			break;
2156		}
2157		bcopy(&pq->nbytes, &nbytes, sizeof (nbytes));
2158		error = altq_getqstats(altq, pq->buf, &nbytes);
2159		if (error == 0) {
2160			pq->scheduler = altq->scheduler;
2161			bcopy(&nbytes, &pq->nbytes, sizeof (nbytes));
2162		}
2163		break;
2164	}
2165#endif /* PF_ALTQ */
2166
2167	case DIOCBEGINADDRS:		/* struct pfioc_pooladdr */
2168	case DIOCADDADDR:		/* struct pfioc_pooladdr */
2169	case DIOCGETADDRS:		/* struct pfioc_pooladdr */
2170	case DIOCGETADDR:		/* struct pfioc_pooladdr */
2171	case DIOCCHANGEADDR: {		/* struct pfioc_pooladdr */
2172		struct pfioc_pooladdr *pp = NULL;
2173
2174		PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break;)
2175		error = pfioctl_ioc_pooladdr(cmd, pp, p);
2176		PFIOC_STRUCT_END(pp, addr);
2177		break;
2178	}
2179
2180	case DIOCGETRULESETS:		/* struct pfioc_ruleset */
2181	case DIOCGETRULESET: {		/* struct pfioc_ruleset */
2182		struct pfioc_ruleset *pr = NULL;
2183
2184		PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
2185		error = pfioctl_ioc_ruleset(cmd, pr, p);
2186		PFIOC_STRUCT_END(pr, addr);
2187		break;
2188	}
2189
2190	case DIOCRCLRTABLES:		/* struct pfioc_table */
2191	case DIOCRADDTABLES:		/* struct pfioc_table */
2192	case DIOCRDELTABLES:		/* struct pfioc_table */
2193	case DIOCRGETTABLES:		/* struct pfioc_table */
2194	case DIOCRGETTSTATS:		/* struct pfioc_table */
2195	case DIOCRCLRTSTATS:		/* struct pfioc_table */
2196	case DIOCRSETTFLAGS:		/* struct pfioc_table */
2197	case DIOCRCLRADDRS:		/* struct pfioc_table */
2198	case DIOCRADDADDRS:		/* struct pfioc_table */
2199	case DIOCRDELADDRS:		/* struct pfioc_table */
2200	case DIOCRSETADDRS:		/* struct pfioc_table */
2201	case DIOCRGETADDRS:		/* struct pfioc_table */
2202	case DIOCRGETASTATS:		/* struct pfioc_table */
2203	case DIOCRCLRASTATS:		/* struct pfioc_table */
2204	case DIOCRTSTADDRS:		/* struct pfioc_table */
2205	case DIOCRINADEFINE: {		/* struct pfioc_table */
2206		PFIOCX_STRUCT_DECL(pfioc_table);
2207
2208		PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break;);
2209		error = pfioctl_ioc_table(cmd,
2210		    PFIOCX_STRUCT_ADDR32(pfioc_table),
2211		    PFIOCX_STRUCT_ADDR64(pfioc_table), p);
2212		PFIOCX_STRUCT_END(pfioc_table, addr);
2213		break;
2214	}
2215
2216	case DIOCOSFPADD:		/* struct pf_osfp_ioctl */
2217	case DIOCOSFPGET: {		/* struct pf_osfp_ioctl */
2218		struct pf_osfp_ioctl *io = NULL;
2219
2220		PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break;);
2221		if (cmd == DIOCOSFPADD) {
2222			error = pf_osfp_add(io);
2223		} else {
2224			VERIFY(cmd == DIOCOSFPGET);
2225			error = pf_osfp_get(io);
2226		}
2227		PFIOC_STRUCT_END(io, addr);
2228		break;
2229	}
2230
2231	case DIOCXBEGIN:		/* struct pfioc_trans */
2232	case DIOCXROLLBACK:		/* struct pfioc_trans */
2233	case DIOCXCOMMIT: {		/* struct pfioc_trans */
2234		PFIOCX_STRUCT_DECL(pfioc_trans);
2235
2236		PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break;);
2237		error = pfioctl_ioc_trans(cmd,
2238		    PFIOCX_STRUCT_ADDR32(pfioc_trans),
2239		    PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
2240		PFIOCX_STRUCT_END(pfioc_trans, addr);
2241		break;
2242	}
2243
2244	case DIOCGETSRCNODES: {		/* struct pfioc_src_nodes */
2245		PFIOCX_STRUCT_DECL(pfioc_src_nodes);
2246
2247		PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
2248		    error = ENOMEM; break;);
2249		error = pfioctl_ioc_src_nodes(cmd,
2250		    PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
2251		    PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
2252		PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
2253		break;
2254	}
2255
2256	case DIOCCLRSRCNODES: {
2257		struct pf_src_node	*n;
2258		struct pf_state		*state;
2259
2260		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2261			state->src_node = NULL;
2262			state->nat_src_node = NULL;
2263		}
2264		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2265			n->expire = 1;
2266			n->states = 0;
2267		}
2268		pf_purge_expired_src_nodes();
2269		pf_status.src_nodes = 0;
2270		break;
2271	}
2272
2273	case DIOCKILLSRCNODES: {	/* struct pfioc_src_node_kill */
2274		struct pfioc_src_node_kill *psnk = NULL;
2275
2276		PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break;);
2277		error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
2278		PFIOC_STRUCT_END(psnk, addr);
2279		break;
2280	}
2281
2282	case DIOCSETHOSTID: {		/* u_int32_t */
2283		u_int32_t hid;
2284
2285		/* small enough to be on stack */
2286		bcopy(addr, &hid, sizeof (hid));
2287		if (hid == 0)
2288			pf_status.hostid = random();
2289		else
2290			pf_status.hostid = hid;
2291		break;
2292	}
2293
2294	case DIOCOSFPFLUSH:
2295		pf_osfp_flush();
2296		break;
2297
2298	case DIOCIGETIFACES:		/* struct pfioc_iface */
2299	case DIOCSETIFFLAG:		/* struct pfioc_iface */
2300	case DIOCCLRIFFLAG: {		/* struct pfioc_iface */
2301		PFIOCX_STRUCT_DECL(pfioc_iface);
2302
2303		PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break;);
2304		error = pfioctl_ioc_iface(cmd,
2305		    PFIOCX_STRUCT_ADDR32(pfioc_iface),
2306		    PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
2307		PFIOCX_STRUCT_END(pfioc_iface, addr);
2308		break;
2309	}
2310
2311	default:
2312		error = ENODEV;
2313		break;
2314	}
2315
2316	lck_mtx_unlock(pf_lock);
2317	lck_rw_done(pf_perim_lock);
2318
2319	return (error);
2320}
2321
2322static int
2323pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
2324    struct pfioc_table_64 *io64, struct proc *p)
2325{
2326	int p64 = proc_is64bit(p);
2327	int error = 0;
2328
2329	if (!p64)
2330		goto struct32;
2331
2332	/*
2333	 * 64-bit structure processing
2334	 */
2335	switch (cmd) {
2336	case DIOCRCLRTABLES:
2337		if (io64->pfrio_esize != 0) {
2338			error = ENODEV;
2339			break;
2340		}
2341		pfr_table_copyin_cleanup(&io64->pfrio_table);
2342		error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
2343		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2344		break;
2345
2346	case DIOCRADDTABLES:
2347		if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2348			error = ENODEV;
2349			break;
2350		}
2351		error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
2352		    &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2353		break;
2354
2355	case DIOCRDELTABLES:
2356		if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2357			error = ENODEV;
2358			break;
2359		}
2360		error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
2361		    &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2362		break;
2363
2364	case DIOCRGETTABLES:
2365		if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2366			error = ENODEV;
2367			break;
2368		}
2369		pfr_table_copyin_cleanup(&io64->pfrio_table);
2370		error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
2371		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2372		break;
2373
2374	case DIOCRGETTSTATS:
2375		if (io64->pfrio_esize != sizeof (struct pfr_tstats)) {
2376			error = ENODEV;
2377			break;
2378		}
2379		pfr_table_copyin_cleanup(&io64->pfrio_table);
2380		error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
2381		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2382		break;
2383
2384	case DIOCRCLRTSTATS:
2385		if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2386			error = ENODEV;
2387			break;
2388		}
2389		error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
2390		    &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2391		break;
2392
2393	case DIOCRSETTFLAGS:
2394		if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2395			error = ENODEV;
2396			break;
2397		}
2398		error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
2399		    io64->pfrio_setflag, io64->pfrio_clrflag,
2400		    &io64->pfrio_nchange, &io64->pfrio_ndel,
2401		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2402		break;
2403
2404	case DIOCRCLRADDRS:
2405		if (io64->pfrio_esize != 0) {
2406			error = ENODEV;
2407			break;
2408		}
2409		pfr_table_copyin_cleanup(&io64->pfrio_table);
2410		error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2411		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2412		break;
2413
2414	case DIOCRADDADDRS:
2415		if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2416			error = ENODEV;
2417			break;
2418		}
2419		pfr_table_copyin_cleanup(&io64->pfrio_table);
2420		error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2421		    io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2422		    PFR_FLAG_USERIOCTL);
2423		break;
2424
2425	case DIOCRDELADDRS:
2426		if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2427			error = ENODEV;
2428			break;
2429		}
2430		pfr_table_copyin_cleanup(&io64->pfrio_table);
2431		error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2432		    io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2433		    PFR_FLAG_USERIOCTL);
2434		break;
2435
2436	case DIOCRSETADDRS:
2437		if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2438			error = ENODEV;
2439			break;
2440		}
2441		pfr_table_copyin_cleanup(&io64->pfrio_table);
2442		error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2443		    io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2444		    &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2445		    PFR_FLAG_USERIOCTL, 0);
2446		break;
2447
2448	case DIOCRGETADDRS:
2449		if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2450			error = ENODEV;
2451			break;
2452		}
2453		pfr_table_copyin_cleanup(&io64->pfrio_table);
2454		error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2455		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2456		break;
2457
2458	case DIOCRGETASTATS:
2459		if (io64->pfrio_esize != sizeof (struct pfr_astats)) {
2460			error = ENODEV;
2461			break;
2462		}
2463		pfr_table_copyin_cleanup(&io64->pfrio_table);
2464		error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2465		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2466		break;
2467
2468	case DIOCRCLRASTATS:
2469		if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2470			error = ENODEV;
2471			break;
2472		}
2473		pfr_table_copyin_cleanup(&io64->pfrio_table);
2474		error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2475		    io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2476		    PFR_FLAG_USERIOCTL);
2477		break;
2478
2479	case DIOCRTSTADDRS:
2480		if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2481			error = ENODEV;
2482			break;
2483		}
2484		pfr_table_copyin_cleanup(&io64->pfrio_table);
2485		error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2486		    io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2487		    PFR_FLAG_USERIOCTL);
2488		break;
2489
2490	case DIOCRINADEFINE:
2491		if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2492			error = ENODEV;
2493			break;
2494		}
2495		pfr_table_copyin_cleanup(&io64->pfrio_table);
2496		error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2497		    io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2498		    io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2499		break;
2500
2501	default:
2502		VERIFY(0);
2503		/* NOTREACHED */
2504	}
2505	goto done;
2506
2507struct32:
2508	/*
2509	 * 32-bit structure processing
2510	 */
2511	switch (cmd) {
2512	case DIOCRCLRTABLES:
2513		if (io32->pfrio_esize != 0) {
2514			error = ENODEV;
2515			break;
2516		}
2517		pfr_table_copyin_cleanup(&io32->pfrio_table);
2518		error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2519		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2520		break;
2521
2522	case DIOCRADDTABLES:
2523		if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2524			error = ENODEV;
2525			break;
2526		}
2527		error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2528		    &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2529		break;
2530
2531	case DIOCRDELTABLES:
2532		if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2533			error = ENODEV;
2534			break;
2535		}
2536		error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2537		    &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2538		break;
2539
2540	case DIOCRGETTABLES:
2541		if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2542			error = ENODEV;
2543			break;
2544		}
2545		pfr_table_copyin_cleanup(&io32->pfrio_table);
2546		error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2547		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2548		break;
2549
2550	case DIOCRGETTSTATS:
2551		if (io32->pfrio_esize != sizeof (struct pfr_tstats)) {
2552			error = ENODEV;
2553			break;
2554		}
2555		pfr_table_copyin_cleanup(&io32->pfrio_table);
2556		error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2557		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2558		break;
2559
2560	case DIOCRCLRTSTATS:
2561		if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2562			error = ENODEV;
2563			break;
2564		}
2565		error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2566		    &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2567		break;
2568
2569	case DIOCRSETTFLAGS:
2570		if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2571			error = ENODEV;
2572			break;
2573		}
2574		error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2575		    io32->pfrio_setflag, io32->pfrio_clrflag,
2576		    &io32->pfrio_nchange, &io32->pfrio_ndel,
2577		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2578		break;
2579
2580	case DIOCRCLRADDRS:
2581		if (io32->pfrio_esize != 0) {
2582			error = ENODEV;
2583			break;
2584		}
2585		pfr_table_copyin_cleanup(&io32->pfrio_table);
2586		error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2587		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2588		break;
2589
2590	case DIOCRADDADDRS:
2591		if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2592			error = ENODEV;
2593			break;
2594		}
2595		pfr_table_copyin_cleanup(&io32->pfrio_table);
2596		error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2597		    io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2598		    PFR_FLAG_USERIOCTL);
2599		break;
2600
2601	case DIOCRDELADDRS:
2602		if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2603			error = ENODEV;
2604			break;
2605		}
2606		pfr_table_copyin_cleanup(&io32->pfrio_table);
2607		error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2608		    io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2609		    PFR_FLAG_USERIOCTL);
2610		break;
2611
2612	case DIOCRSETADDRS:
2613		if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2614			error = ENODEV;
2615			break;
2616		}
2617		pfr_table_copyin_cleanup(&io32->pfrio_table);
2618		error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2619		    io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2620		    &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2621		    PFR_FLAG_USERIOCTL, 0);
2622		break;
2623
2624	case DIOCRGETADDRS:
2625		if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2626			error = ENODEV;
2627			break;
2628		}
2629		pfr_table_copyin_cleanup(&io32->pfrio_table);
2630		error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2631		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2632		break;
2633
2634	case DIOCRGETASTATS:
2635		if (io32->pfrio_esize != sizeof (struct pfr_astats)) {
2636			error = ENODEV;
2637			break;
2638		}
2639		pfr_table_copyin_cleanup(&io32->pfrio_table);
2640		error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2641		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2642		break;
2643
2644	case DIOCRCLRASTATS:
2645		if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2646			error = ENODEV;
2647			break;
2648		}
2649		pfr_table_copyin_cleanup(&io32->pfrio_table);
2650		error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2651		    io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2652		    PFR_FLAG_USERIOCTL);
2653		break;
2654
2655	case DIOCRTSTADDRS:
2656		if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2657			error = ENODEV;
2658			break;
2659		}
2660		pfr_table_copyin_cleanup(&io32->pfrio_table);
2661		error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2662		    io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2663		    PFR_FLAG_USERIOCTL);
2664		break;
2665
2666	case DIOCRINADEFINE:
2667		if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2668			error = ENODEV;
2669			break;
2670		}
2671		pfr_table_copyin_cleanup(&io32->pfrio_table);
2672		error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2673		    io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2674		    io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2675		break;
2676
2677	default:
2678		VERIFY(0);
2679		/* NOTREACHED */
2680	}
2681
2682done:
2683	return (error);
2684}
2685
2686static int
2687pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2688    struct pfioc_tokens_64 *tok64, struct proc *p)
2689{
2690	struct pfioc_token *tokens;
2691	struct pfioc_kernel_token *entry, *tmp;
2692	user_addr_t token_buf;
2693	int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2694	char *ptr;
2695
2696	switch (cmd) {
2697	case DIOCGETSTARTERS: {
2698		int size;
2699
2700		if (nr_tokens == 0) {
2701			error = ENOENT;
2702			break;
2703		}
2704
2705		size = sizeof (struct pfioc_token) * nr_tokens;
2706		ocnt = cnt = (p64 ? tok64->size : tok32->size);
2707		if (cnt == 0) {
2708			if (p64)
2709				tok64->size = size;
2710			else
2711				tok32->size = size;
2712			break;
2713		}
2714
2715		token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2716		tokens = _MALLOC(size, M_TEMP, M_WAITOK|M_ZERO);
2717		if (tokens == NULL) {
2718			error = ENOMEM;
2719			break;
2720		}
2721
2722		ptr = (void *)tokens;
2723		SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2724			struct pfioc_token *t;
2725
2726			if ((unsigned)cnt < sizeof (*tokens))
2727				break;    /* no more buffer space left */
2728
2729			t = (struct pfioc_token *)(void *)ptr;
2730			t->token_value	= entry->token.token_value;
2731			t->timestamp	= entry->token.timestamp;
2732			t->pid		= entry->token.pid;
2733			bcopy(entry->token.proc_name, t->proc_name,
2734			    PFTOK_PROCNAME_LEN);
2735			ptr += sizeof (struct pfioc_token);
2736
2737			cnt -= sizeof (struct pfioc_token);
2738		}
2739
2740		if (cnt < ocnt)
2741			error = copyout(tokens, token_buf, ocnt - cnt);
2742
2743		if (p64)
2744			tok64->size = ocnt - cnt;
2745		else
2746			tok32->size = ocnt - cnt;
2747
2748		_FREE(tokens, M_TEMP);
2749		break;
2750	}
2751
2752	default:
2753		VERIFY(0);
2754		/* NOTREACHED */
2755	}
2756
2757	return (error);
2758}
2759
2760static void
2761pf_expire_states_and_src_nodes(struct pf_rule *rule)
2762{
2763	struct pf_state		*state;
2764	struct pf_src_node	*sn;
2765	int			 killed = 0;
2766
2767	/* expire the states */
2768	state = TAILQ_FIRST(&state_list);
2769	while (state) {
2770		if (state->rule.ptr == rule)
2771			state->timeout = PFTM_PURGE;
2772		state = TAILQ_NEXT(state, entry_list);
2773	}
2774	pf_purge_expired_states(pf_status.states);
2775
2776	/* expire the src_nodes */
2777	RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2778		if (sn->rule.ptr != rule)
2779			continue;
2780		if (sn->states != 0) {
2781			RB_FOREACH(state, pf_state_tree_id,
2782			    &tree_id) {
2783				if (state->src_node == sn)
2784					state->src_node = NULL;
2785				if (state->nat_src_node == sn)
2786					state->nat_src_node = NULL;
2787			}
2788			sn->states = 0;
2789		}
2790		sn->expire = 1;
2791		killed++;
2792	}
2793	if (killed)
2794		pf_purge_expired_src_nodes();
2795}
2796
2797static void
2798pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2799    struct pf_rule *rule)
2800{
2801	struct pf_rule *r;
2802	int nr = 0;
2803
2804	pf_expire_states_and_src_nodes(rule);
2805
2806	pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2807	if (ruleset->rules[rs_num].active.rcount-- == 0)
2808		panic("%s: rcount value broken!", __func__);
2809	r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2810
2811	while (r) {
2812		r->nr = nr++;
2813		r = TAILQ_NEXT(r, entries);
2814	}
2815}
2816
2817
2818static void
2819pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2820{
2821	pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2822	ruleset->rules[rs].active.ticket =
2823	    ++ruleset->rules[rs].inactive.ticket;
2824}
2825
2826/*
2827 * req_dev encodes the PF interface. Currently, possible values are
2828 * 0 or PFRULE_PFM
2829 */
2830static int
2831pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2832{
2833	struct pf_ruleset	*ruleset;
2834	struct pf_rule		*rule = NULL;
2835	int			 is_anchor;
2836	int			 error;
2837	int			 i;
2838
2839	is_anchor = (pr->anchor_call[0] != '\0');
2840	if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2841	    pr->rule.owner, is_anchor, &error)) == NULL)
2842		return (error);
2843
2844	for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2845		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2846		while (rule && (rule->ticket != pr->rule.ticket))
2847			rule = TAILQ_NEXT(rule, entries);
2848	}
2849	if (rule == NULL)
2850		return (ENOENT);
2851	else
2852		i--;
2853
2854	if (strcmp(rule->owner, pr->rule.owner))
2855		return (EACCES);
2856
2857delete_rule:
2858	if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2859	    ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2860	    ((ruleset->rules[i].active.rcount - 1) == 0)) {
2861		/* set rule & ruleset to parent and repeat */
2862		struct pf_rule *delete_rule = rule;
2863		struct pf_ruleset *delete_ruleset = ruleset;
2864
2865#define	parent_ruleset		ruleset->anchor->parent->ruleset
2866		if (ruleset->anchor->parent == NULL)
2867			ruleset = &pf_main_ruleset;
2868		else
2869			ruleset = &parent_ruleset;
2870
2871		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2872		while (rule &&
2873		    (rule->anchor != delete_ruleset->anchor))
2874			rule = TAILQ_NEXT(rule, entries);
2875		if (rule == NULL)
2876			panic("%s: rule not found!", __func__);
2877
2878		/*
2879		 * if reqest device != rule's device, bail :
2880		 * with error if ticket matches;
2881		 * without error if ticket doesn't match (i.e. its just cleanup)
2882		 */
2883		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2884			if (rule->ticket != pr->rule.ticket) {
2885				return (0);
2886			} else {
2887				return EACCES;
2888			}
2889		}
2890
2891		if (delete_rule->rule_flag & PFRULE_PFM) {
2892			pffwrules--;
2893		}
2894
2895		pf_delete_rule_from_ruleset(delete_ruleset,
2896		    i, delete_rule);
2897		delete_ruleset->rules[i].active.ticket =
2898		    ++delete_ruleset->rules[i].inactive.ticket;
2899		goto delete_rule;
2900	} else {
2901		/*
2902		 * process deleting rule only if device that added the
2903		 * rule matches device that issued the request
2904		 */
2905		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev)
2906			return EACCES;
2907		if (rule->rule_flag & PFRULE_PFM)
2908			pffwrules--;
2909		pf_delete_rule_from_ruleset(ruleset, i,
2910		    rule);
2911		pf_ruleset_cleanup(ruleset, i);
2912	}
2913
2914	return (0);
2915}
2916
2917/*
2918 * req_dev encodes the PF interface. Currently, possible values are
2919 * 0 or PFRULE_PFM
2920 */
2921static void
2922pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2923{
2924	struct pf_ruleset	*ruleset;
2925	struct pf_rule		*rule, *next;
2926	int			 deleted = 0;
2927
2928	for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2929		rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2930		ruleset = &pf_main_ruleset;
2931		while (rule) {
2932			next = TAILQ_NEXT(rule, entries);
2933			/*
2934			 * process deleting rule only if device that added the
2935			 * rule matches device that issued the request
2936			 */
2937			if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2938				rule = next;
2939				continue;
2940			}
2941			if (rule->anchor) {
2942				if (((strcmp(rule->owner, owner)) == 0) ||
2943				    ((strcmp(rule->owner, "")) == 0)) {
2944					if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2945						if (deleted) {
2946							pf_ruleset_cleanup(ruleset, rs);
2947							deleted = 0;
2948						}
2949						/* step into anchor */
2950						ruleset =
2951						    &rule->anchor->ruleset;
2952						rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2953						continue;
2954					} else {
2955						if (rule->rule_flag &
2956						    PFRULE_PFM)
2957							pffwrules--;
2958						pf_delete_rule_from_ruleset(ruleset, rs, rule);
2959						deleted = 1;
2960						rule = next;
2961					}
2962				} else
2963					rule = next;
2964			} else {
2965				if (((strcmp(rule->owner, owner)) == 0)) {
2966					/* delete rule */
2967					if (rule->rule_flag & PFRULE_PFM)
2968						pffwrules--;
2969					pf_delete_rule_from_ruleset(ruleset,
2970					    rs, rule);
2971					deleted = 1;
2972				}
2973				rule = next;
2974			}
2975			if (rule == NULL) {
2976				if (deleted) {
2977					pf_ruleset_cleanup(ruleset, rs);
2978					deleted = 0;
2979				}
2980				if (ruleset != &pf_main_ruleset)
2981					pf_deleterule_anchor_step_out(&ruleset,
2982					    rs, &rule);
2983			}
2984		}
2985	}
2986}
2987
2988static void
2989pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2990    int rs, struct pf_rule **rule_ptr)
2991{
2992	struct pf_ruleset *ruleset = *ruleset_ptr;
2993	struct pf_rule *rule = *rule_ptr;
2994
2995	/* step out of anchor */
2996	struct pf_ruleset *rs_copy = ruleset;
2997	ruleset = ruleset->anchor->parent?
2998	    &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2999
3000	rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
3001	while (rule && (rule->anchor != rs_copy->anchor))
3002		rule = TAILQ_NEXT(rule, entries);
3003	if (rule == NULL)
3004		panic("%s: parent rule of anchor not found!", __func__);
3005	if (rule->anchor->ruleset.rules[rs].active.rcount > 0)
3006		rule = TAILQ_NEXT(rule, entries);
3007
3008	*ruleset_ptr = ruleset;
3009	*rule_ptr = rule;
3010}
3011
3012static void
3013pf_addrwrap_setup(struct pf_addr_wrap *aw)
3014{
3015	VERIFY(aw);
3016	bzero(&aw->p, sizeof aw->p);
3017}
3018
3019static int
3020pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
3021    struct pf_ruleset *ruleset) {
3022	struct pf_pooladdr 	*apa;
3023	int			 error = 0;
3024
3025	if (rule->ifname[0]) {
3026		rule->kif = pfi_kif_get(rule->ifname);
3027		if (rule->kif == NULL) {
3028			pool_put(&pf_rule_pl, rule);
3029			return (EINVAL);
3030		}
3031		pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
3032	}
3033#if PF_ALTQ
3034	/* set queue IDs */
3035	if (altq_allowed && rule->qname[0] != '\0') {
3036		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3037			error = EBUSY;
3038		else if (rule->pqname[0] != '\0') {
3039			if ((rule->pqid =
3040			    pf_qname2qid(rule->pqname)) == 0)
3041				error = EBUSY;
3042		} else
3043			rule->pqid = rule->qid;
3044	}
3045#endif /* PF_ALTQ */
3046	if (rule->tagname[0])
3047		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3048			error = EBUSY;
3049	if (rule->match_tagname[0])
3050		if ((rule->match_tag =
3051		    pf_tagname2tag(rule->match_tagname)) == 0)
3052			error = EBUSY;
3053	if (rule->rt && !rule->direction)
3054		error = EINVAL;
3055#if PFLOG
3056	if (!rule->log)
3057		rule->logif = 0;
3058	if (rule->logif >= PFLOGIFS_MAX)
3059		error = EINVAL;
3060#endif /* PFLOG */
3061	pf_addrwrap_setup(&rule->src.addr);
3062	pf_addrwrap_setup(&rule->dst.addr);
3063	if (pf_rtlabel_add(&rule->src.addr) ||
3064	    pf_rtlabel_add(&rule->dst.addr))
3065		error = EBUSY;
3066	if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
3067		error = EINVAL;
3068	if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
3069		error = EINVAL;
3070	if (pf_tbladdr_setup(ruleset, &rule->src.addr))
3071		error = EINVAL;
3072	if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
3073		error = EINVAL;
3074	if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
3075		error = EINVAL;
3076	TAILQ_FOREACH(apa, &pf_pabuf, entries)
3077		if (pf_tbladdr_setup(ruleset, &apa->addr))
3078			error = EINVAL;
3079
3080	if (rule->overload_tblname[0]) {
3081		if ((rule->overload_tbl = pfr_attach_table(ruleset,
3082		    rule->overload_tblname)) == NULL)
3083			error = EINVAL;
3084		else
3085			rule->overload_tbl->pfrkt_flags |=
3086			    PFR_TFLAG_ACTIVE;
3087	}
3088
3089	pf_mv_pool(&pf_pabuf, &rule->rpool.list);
3090	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
3091	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
3092	    (rule->rt > PF_FASTROUTE)) &&
3093	    (TAILQ_FIRST(&rule->rpool.list) == NULL))
3094		error = EINVAL;
3095
3096	if (error) {
3097		pf_rm_rule(NULL, rule);
3098		return (error);
3099	}
3100	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
3101	rule->evaluations = rule->packets[0] = rule->packets[1] =
3102	    rule->bytes[0] = rule->bytes[1] = 0;
3103
3104	return (0);
3105}
3106
3107static int
3108pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
3109{
3110	int error = 0;
3111	u_int32_t req_dev = 0;
3112
3113	switch (cmd) {
3114	case DIOCADDRULE: {
3115		struct pf_ruleset	*ruleset;
3116		struct pf_rule		*rule, *tail;
3117		int			rs_num;
3118
3119		pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3120		pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3121		ruleset = pf_find_ruleset(pr->anchor);
3122		if (ruleset == NULL) {
3123			error = EINVAL;
3124			break;
3125		}
3126		rs_num = pf_get_ruleset_number(pr->rule.action);
3127		if (rs_num >= PF_RULESET_MAX) {
3128			error = EINVAL;
3129			break;
3130		}
3131		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3132			error = EINVAL;
3133			break;
3134		}
3135		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
3136			error = EBUSY;
3137			break;
3138		}
3139		if (pr->pool_ticket != ticket_pabuf) {
3140			error = EBUSY;
3141			break;
3142		}
3143		rule = pool_get(&pf_rule_pl, PR_WAITOK);
3144		if (rule == NULL) {
3145			error = ENOMEM;
3146			break;
3147		}
3148		pf_rule_copyin(&pr->rule, rule, p, minordev);
3149#if !INET
3150		if (rule->af == AF_INET) {
3151			pool_put(&pf_rule_pl, rule);
3152			error = EAFNOSUPPORT;
3153			break;
3154		}
3155#endif /* INET */
3156#if !INET6
3157		if (rule->af == AF_INET6) {
3158			pool_put(&pf_rule_pl, rule);
3159			error = EAFNOSUPPORT;
3160			break;
3161		}
3162#endif /* INET6 */
3163		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
3164		    pf_rulequeue);
3165		if (tail)
3166			rule->nr = tail->nr + 1;
3167		else
3168			rule->nr = 0;
3169
3170		if ((error = pf_rule_setup(pr, rule, ruleset)))
3171			break;
3172
3173		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
3174		    rule, entries);
3175		ruleset->rules[rs_num].inactive.rcount++;
3176		if (rule->rule_flag & PFRULE_PFM)
3177			pffwrules++;
3178		break;
3179	}
3180
3181	case DIOCGETRULES: {
3182		struct pf_ruleset	*ruleset;
3183		struct pf_rule		*tail;
3184		int			 rs_num;
3185
3186		pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3187		pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3188		ruleset = pf_find_ruleset(pr->anchor);
3189		if (ruleset == NULL) {
3190			error = EINVAL;
3191			break;
3192		}
3193		rs_num = pf_get_ruleset_number(pr->rule.action);
3194		if (rs_num >= PF_RULESET_MAX) {
3195			error = EINVAL;
3196			break;
3197		}
3198		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3199		    pf_rulequeue);
3200		if (tail)
3201			pr->nr = tail->nr + 1;
3202		else
3203			pr->nr = 0;
3204		pr->ticket = ruleset->rules[rs_num].active.ticket;
3205		break;
3206	}
3207
3208	case DIOCGETRULE: {
3209		struct pf_ruleset	*ruleset;
3210		struct pf_rule		*rule;
3211		int			 rs_num, i;
3212
3213		pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3214		pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3215		ruleset = pf_find_ruleset(pr->anchor);
3216		if (ruleset == NULL) {
3217			error = EINVAL;
3218			break;
3219		}
3220		rs_num = pf_get_ruleset_number(pr->rule.action);
3221		if (rs_num >= PF_RULESET_MAX) {
3222			error = EINVAL;
3223			break;
3224		}
3225		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3226			error = EBUSY;
3227			break;
3228		}
3229		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3230		while ((rule != NULL) && (rule->nr != pr->nr))
3231			rule = TAILQ_NEXT(rule, entries);
3232		if (rule == NULL) {
3233			error = EBUSY;
3234			break;
3235		}
3236		pf_rule_copyout(rule, &pr->rule);
3237		if (pf_anchor_copyout(ruleset, rule, pr)) {
3238			error = EBUSY;
3239			break;
3240		}
3241		pfi_dynaddr_copyout(&pr->rule.src.addr);
3242		pfi_dynaddr_copyout(&pr->rule.dst.addr);
3243		pf_tbladdr_copyout(&pr->rule.src.addr);
3244		pf_tbladdr_copyout(&pr->rule.dst.addr);
3245		pf_rtlabel_copyout(&pr->rule.src.addr);
3246		pf_rtlabel_copyout(&pr->rule.dst.addr);
3247		for (i = 0; i < PF_SKIP_COUNT; ++i)
3248			if (rule->skip[i].ptr == NULL)
3249				pr->rule.skip[i].nr = -1;
3250			else
3251				pr->rule.skip[i].nr =
3252				    rule->skip[i].ptr->nr;
3253
3254		if (pr->action == PF_GET_CLR_CNTR) {
3255			rule->evaluations = 0;
3256			rule->packets[0] = rule->packets[1] = 0;
3257			rule->bytes[0] = rule->bytes[1] = 0;
3258		}
3259		break;
3260	}
3261
3262	case DIOCCHANGERULE: {
3263		struct pfioc_rule	*pcr = pr;
3264		struct pf_ruleset	*ruleset;
3265		struct pf_rule		*oldrule = NULL, *newrule = NULL;
3266		struct pf_pooladdr	*pa;
3267		u_int32_t		 nr = 0;
3268		int			 rs_num;
3269
3270		if (!(pcr->action == PF_CHANGE_REMOVE ||
3271		    pcr->action == PF_CHANGE_GET_TICKET) &&
3272		    pcr->pool_ticket != ticket_pabuf) {
3273			error = EBUSY;
3274			break;
3275		}
3276
3277		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3278		    pcr->action > PF_CHANGE_GET_TICKET) {
3279			error = EINVAL;
3280			break;
3281		}
3282		pcr->anchor[sizeof (pcr->anchor) - 1] = '\0';
3283		pcr->anchor_call[sizeof (pcr->anchor_call) - 1] = '\0';
3284		ruleset = pf_find_ruleset(pcr->anchor);
3285		if (ruleset == NULL) {
3286			error = EINVAL;
3287			break;
3288		}
3289		rs_num = pf_get_ruleset_number(pcr->rule.action);
3290		if (rs_num >= PF_RULESET_MAX) {
3291			error = EINVAL;
3292			break;
3293		}
3294
3295		if (pcr->action == PF_CHANGE_GET_TICKET) {
3296			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3297			break;
3298		} else {
3299			if (pcr->ticket !=
3300			    ruleset->rules[rs_num].active.ticket) {
3301				error = EINVAL;
3302				break;
3303			}
3304			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3305				error = EINVAL;
3306				break;
3307			}
3308		}
3309
3310		if (pcr->action != PF_CHANGE_REMOVE) {
3311			newrule = pool_get(&pf_rule_pl, PR_WAITOK);
3312			if (newrule == NULL) {
3313				error = ENOMEM;
3314				break;
3315			}
3316			pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3317#if !INET
3318			if (newrule->af == AF_INET) {
3319				pool_put(&pf_rule_pl, newrule);
3320				error = EAFNOSUPPORT;
3321				break;
3322			}
3323#endif /* INET */
3324#if !INET6
3325			if (newrule->af == AF_INET6) {
3326				pool_put(&pf_rule_pl, newrule);
3327				error = EAFNOSUPPORT;
3328				break;
3329			}
3330#endif /* INET6 */
3331			if (newrule->ifname[0]) {
3332				newrule->kif = pfi_kif_get(newrule->ifname);
3333				if (newrule->kif == NULL) {
3334					pool_put(&pf_rule_pl, newrule);
3335					error = EINVAL;
3336					break;
3337				}
3338				pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3339			} else
3340				newrule->kif = NULL;
3341
3342#if PF_ALTQ
3343			/* set queue IDs */
3344			if (altq_allowed && newrule->qname[0] != '\0') {
3345				if ((newrule->qid =
3346				    pf_qname2qid(newrule->qname)) == 0)
3347					error = EBUSY;
3348				else if (newrule->pqname[0] != '\0') {
3349					if ((newrule->pqid =
3350					    pf_qname2qid(newrule->pqname)) == 0)
3351						error = EBUSY;
3352				} else
3353					newrule->pqid = newrule->qid;
3354			}
3355#endif /* PF_ALTQ */
3356			if (newrule->tagname[0])
3357				if ((newrule->tag =
3358				    pf_tagname2tag(newrule->tagname)) == 0)
3359					error = EBUSY;
3360			if (newrule->match_tagname[0])
3361				if ((newrule->match_tag = pf_tagname2tag(
3362				    newrule->match_tagname)) == 0)
3363					error = EBUSY;
3364			if (newrule->rt && !newrule->direction)
3365				error = EINVAL;
3366#if PFLOG
3367			if (!newrule->log)
3368				newrule->logif = 0;
3369			if (newrule->logif >= PFLOGIFS_MAX)
3370				error = EINVAL;
3371#endif /* PFLOG */
3372			pf_addrwrap_setup(&newrule->src.addr);
3373			pf_addrwrap_setup(&newrule->dst.addr);
3374			if (pf_rtlabel_add(&newrule->src.addr) ||
3375			    pf_rtlabel_add(&newrule->dst.addr))
3376				error = EBUSY;
3377			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
3378				error = EINVAL;
3379			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
3380				error = EINVAL;
3381			if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
3382				error = EINVAL;
3383			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
3384				error = EINVAL;
3385			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
3386				error = EINVAL;
3387			TAILQ_FOREACH(pa, &pf_pabuf, entries)
3388				if (pf_tbladdr_setup(ruleset, &pa->addr))
3389					error = EINVAL;
3390
3391			if (newrule->overload_tblname[0]) {
3392				if ((newrule->overload_tbl = pfr_attach_table(
3393				    ruleset, newrule->overload_tblname)) ==
3394				    NULL)
3395					error = EINVAL;
3396				else
3397					newrule->overload_tbl->pfrkt_flags |=
3398					    PFR_TFLAG_ACTIVE;
3399			}
3400
3401			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3402			if (((((newrule->action == PF_NAT) ||
3403			    (newrule->action == PF_RDR) ||
3404			    (newrule->action == PF_BINAT) ||
3405			    (newrule->rt > PF_FASTROUTE)) &&
3406			    !newrule->anchor)) &&
3407			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3408				error = EINVAL;
3409
3410			if (error) {
3411				pf_rm_rule(NULL, newrule);
3412				break;
3413			}
3414			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3415			newrule->evaluations = 0;
3416			newrule->packets[0] = newrule->packets[1] = 0;
3417			newrule->bytes[0] = newrule->bytes[1] = 0;
3418		}
3419		pf_empty_pool(&pf_pabuf);
3420
3421		if (pcr->action == PF_CHANGE_ADD_HEAD)
3422			oldrule = TAILQ_FIRST(
3423			    ruleset->rules[rs_num].active.ptr);
3424		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3425			oldrule = TAILQ_LAST(
3426			    ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3427		else {
3428			oldrule = TAILQ_FIRST(
3429			    ruleset->rules[rs_num].active.ptr);
3430			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3431				oldrule = TAILQ_NEXT(oldrule, entries);
3432			if (oldrule == NULL) {
3433				if (newrule != NULL)
3434					pf_rm_rule(NULL, newrule);
3435				error = EINVAL;
3436				break;
3437			}
3438		}
3439
3440		if (pcr->action == PF_CHANGE_REMOVE) {
3441			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3442			ruleset->rules[rs_num].active.rcount--;
3443		} else {
3444			if (oldrule == NULL)
3445				TAILQ_INSERT_TAIL(
3446				    ruleset->rules[rs_num].active.ptr,
3447				    newrule, entries);
3448			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3449			    pcr->action == PF_CHANGE_ADD_BEFORE)
3450				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3451			else
3452				TAILQ_INSERT_AFTER(
3453				    ruleset->rules[rs_num].active.ptr,
3454				    oldrule, newrule, entries);
3455			ruleset->rules[rs_num].active.rcount++;
3456		}
3457
3458		nr = 0;
3459		TAILQ_FOREACH(oldrule,
3460		    ruleset->rules[rs_num].active.ptr, entries)
3461			oldrule->nr = nr++;
3462
3463		ruleset->rules[rs_num].active.ticket++;
3464
3465		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3466		pf_remove_if_empty_ruleset(ruleset);
3467
3468		break;
3469	}
3470
3471	case DIOCINSERTRULE: {
3472		struct pf_ruleset	*ruleset;
3473		struct pf_rule		*rule, *tail, *r;
3474		int			rs_num;
3475		int			is_anchor;
3476
3477		pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3478		pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3479		is_anchor = (pr->anchor_call[0] != '\0');
3480
3481		if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3482		    pr->rule.owner, is_anchor, &error)) == NULL)
3483			break;
3484
3485		rs_num = pf_get_ruleset_number(pr->rule.action);
3486		if (rs_num >= PF_RULESET_MAX) {
3487			error = EINVAL;
3488			break;
3489		}
3490		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3491			error = EINVAL;
3492			break;
3493		}
3494
3495		/* make sure this anchor rule doesn't exist already */
3496		if (is_anchor) {
3497			r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3498			while (r) {
3499				if (r->anchor &&
3500				    ((strcmp(r->anchor->name,
3501				    pr->anchor_call)) == 0)) {
3502					if (((strcmp(pr->rule.owner,
3503					    r->owner)) == 0) ||
3504					    ((strcmp(r->owner, "")) == 0))
3505						error = EEXIST;
3506					else
3507						error = EPERM;
3508					break;
3509				}
3510				r = TAILQ_NEXT(r, entries);
3511			}
3512			if (error != 0)
3513				return (error);
3514		}
3515
3516		rule = pool_get(&pf_rule_pl, PR_WAITOK);
3517		if (rule == NULL) {
3518			error = ENOMEM;
3519			break;
3520		}
3521		pf_rule_copyin(&pr->rule, rule, p, minordev);
3522#if !INET
3523		if (rule->af == AF_INET) {
3524			pool_put(&pf_rule_pl, rule);
3525			error = EAFNOSUPPORT;
3526			break;
3527		}
3528#endif /* INET */
3529#if !INET6
3530		if (rule->af == AF_INET6) {
3531			pool_put(&pf_rule_pl, rule);
3532			error = EAFNOSUPPORT;
3533			break;
3534		}
3535
3536#endif /* INET6 */
3537		r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3538		while ((r != NULL) && (rule->priority >= (unsigned)r->priority))
3539			r = TAILQ_NEXT(r, entries);
3540		if (r == NULL) {
3541			if ((tail =
3542			    TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3543			    pf_rulequeue)) != NULL)
3544				rule->nr = tail->nr + 1;
3545			else
3546				rule->nr = 0;
3547		} else {
3548			rule->nr = r->nr;
3549		}
3550
3551		if ((error = pf_rule_setup(pr, rule, ruleset)))
3552			break;
3553
3554		if (rule->anchor != NULL)
3555			strncpy(rule->anchor->owner, rule->owner,
3556			    PF_OWNER_NAME_SIZE);
3557
3558		if (r) {
3559			TAILQ_INSERT_BEFORE(r, rule, entries);
3560			while (r && ++r->nr)
3561				r = TAILQ_NEXT(r, entries);
3562		} else
3563			TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3564			    rule, entries);
3565		ruleset->rules[rs_num].active.rcount++;
3566
3567		/* Calculate checksum for the main ruleset */
3568		if (ruleset == &pf_main_ruleset)
3569			error = pf_setup_pfsync_matching(ruleset);
3570
3571		pf_ruleset_cleanup(ruleset, rs_num);
3572		rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3573
3574		pr->rule.ticket = rule->ticket;
3575		pf_rule_copyout(rule, &pr->rule);
3576		if (rule->rule_flag & PFRULE_PFM)
3577			pffwrules++;
3578		break;
3579	}
3580
3581	case DIOCDELETERULE: {
3582		pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3583		pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3584
3585		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3586			error = EINVAL;
3587			break;
3588		}
3589
3590		/* get device through which request is made */
3591		if ((uint8_t)minordev == PFDEV_PFM)
3592			req_dev |= PFRULE_PFM;
3593
3594		if (pr->rule.ticket) {
3595			if ((error = pf_delete_rule_by_ticket(pr, req_dev)))
3596				break;
3597		} else
3598			pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3599		pr->nr = pffwrules;
3600		break;
3601	}
3602
3603	default:
3604		VERIFY(0);
3605		/* NOTREACHED */
3606	}
3607
3608	return (error);
3609}
3610
3611static int
3612pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3613{
3614#pragma unused(p)
3615	int error = 0;
3616
3617	switch (cmd) {
3618	case DIOCCLRSTATES: {
3619		struct pf_state		*s, *nexts;
3620		int			 killed = 0;
3621
3622		psk->psk_ifname[sizeof (psk->psk_ifname) - 1] = '\0';
3623		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3624			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3625
3626			if (!psk->psk_ifname[0] || strcmp(psk->psk_ifname,
3627			    s->kif->pfik_name) == 0) {
3628#if NPFSYNC
3629				/* don't send out individual delete messages */
3630				s->sync_flags = PFSTATE_NOSYNC;
3631#endif
3632				pf_unlink_state(s);
3633				killed++;
3634			}
3635		}
3636		psk->psk_af = killed;
3637#if NPFSYNC
3638		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3639#endif
3640		break;
3641	}
3642
3643	case DIOCKILLSTATES: {
3644		struct pf_state		*s, *nexts;
3645		struct pf_state_key	*sk;
3646		struct pf_state_host	*src, *dst;
3647		int			 killed = 0;
3648
3649		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3650		    s = nexts) {
3651			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3652			sk = s->state_key;
3653
3654			if (sk->direction == PF_OUT) {
3655				src = &sk->lan;
3656				dst = &sk->ext;
3657			} else {
3658				src = &sk->ext;
3659				dst = &sk->lan;
3660			}
3661			if ((!psk->psk_af || sk->af == psk->psk_af) &&
3662			    (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3663			    PF_MATCHA(psk->psk_src.neg,
3664			    &psk->psk_src.addr.v.a.addr,
3665			    &psk->psk_src.addr.v.a.mask,
3666			    &src->addr, sk->af) &&
3667			    PF_MATCHA(psk->psk_dst.neg,
3668			    &psk->psk_dst.addr.v.a.addr,
3669			    &psk->psk_dst.addr.v.a.mask,
3670			    &dst->addr, sk->af) &&
3671			    (pf_match_xport(psk->psk_proto,
3672			    psk->psk_proto_variant, &psk->psk_src.xport,
3673			    &src->xport)) &&
3674			    (pf_match_xport(psk->psk_proto,
3675			    psk->psk_proto_variant, &psk->psk_dst.xport,
3676			    &dst->xport)) &&
3677			    (!psk->psk_ifname[0] || strcmp(psk->psk_ifname,
3678			    s->kif->pfik_name) == 0)) {
3679#if NPFSYNC
3680				/* send immediate delete of state */
3681				pfsync_delete_state(s);
3682				s->sync_flags |= PFSTATE_NOSYNC;
3683#endif
3684				pf_unlink_state(s);
3685				killed++;
3686			}
3687		}
3688		psk->psk_af = killed;
3689		break;
3690	}
3691
3692	default:
3693		VERIFY(0);
3694		/* NOTREACHED */
3695	}
3696
3697	return (error);
3698}
3699
3700static int
3701pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3702{
3703#pragma unused(p)
3704	int error = 0;
3705
3706	switch (cmd) {
3707	case DIOCADDSTATE: {
3708		struct pfsync_state	*sp = &ps->state;
3709		struct pf_state		*s;
3710		struct pf_state_key	*sk;
3711		struct pfi_kif		*kif;
3712
3713		if (sp->timeout >= PFTM_MAX &&
3714		    sp->timeout != PFTM_UNTIL_PACKET) {
3715			error = EINVAL;
3716			break;
3717		}
3718		s = pool_get(&pf_state_pl, PR_WAITOK);
3719		if (s == NULL) {
3720			error = ENOMEM;
3721			break;
3722		}
3723		bzero(s, sizeof (struct pf_state));
3724		if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3725			pool_put(&pf_state_pl, s);
3726			error = ENOMEM;
3727			break;
3728		}
3729		pf_state_import(sp, sk, s);
3730		kif = pfi_kif_get(sp->ifname);
3731		if (kif == NULL) {
3732			pool_put(&pf_state_pl, s);
3733			pool_put(&pf_state_key_pl, sk);
3734			error = ENOENT;
3735			break;
3736		}
3737		TAILQ_INIT(&s->unlink_hooks);
3738		s->state_key->app_state = 0;
3739		if (pf_insert_state(kif, s)) {
3740			pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3741			pool_put(&pf_state_pl, s);
3742			error = EEXIST;
3743			break;
3744		}
3745		pf_default_rule.states++;
3746		VERIFY(pf_default_rule.states != 0);
3747		break;
3748	}
3749
3750	case DIOCGETSTATE: {
3751		struct pf_state		*s;
3752		struct pf_state_cmp	 id_key;
3753
3754		bcopy(ps->state.id, &id_key.id, sizeof (id_key.id));
3755		id_key.creatorid = ps->state.creatorid;
3756
3757		s = pf_find_state_byid(&id_key);
3758		if (s == NULL) {
3759			error = ENOENT;
3760			break;
3761		}
3762
3763		pf_state_export(&ps->state, s->state_key, s);
3764		break;
3765	}
3766
3767	default:
3768		VERIFY(0);
3769		/* NOTREACHED */
3770	}
3771
3772	return (error);
3773}
3774
3775static int
3776pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3777    struct pfioc_states_64 *ps64, struct proc *p)
3778{
3779	int p64 = proc_is64bit(p);
3780	int error = 0;
3781
3782	switch (cmd) {
3783	case DIOCGETSTATES: {		/* struct pfioc_states */
3784		struct pf_state		*state;
3785		struct pfsync_state	*pstore;
3786		user_addr_t		 buf;
3787		u_int32_t		 nr = 0;
3788		int			 len, size;
3789
3790		len = (p64 ? ps64->ps_len : ps32->ps_len);
3791		if (len == 0) {
3792			size = sizeof (struct pfsync_state) * pf_status.states;
3793			if (p64)
3794				ps64->ps_len = size;
3795			else
3796				ps32->ps_len = size;
3797			break;
3798		}
3799
3800		pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
3801		if (pstore == NULL) {
3802			error = ENOMEM;
3803			break;
3804		}
3805		buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3806
3807		state = TAILQ_FIRST(&state_list);
3808		while (state) {
3809			if (state->timeout != PFTM_UNLINKED) {
3810				if ((nr + 1) * sizeof (*pstore) > (unsigned)len)
3811					break;
3812
3813				pf_state_export(pstore,
3814				    state->state_key, state);
3815				error = copyout(pstore, buf, sizeof (*pstore));
3816				if (error) {
3817					_FREE(pstore, M_TEMP);
3818					goto fail;
3819				}
3820				buf += sizeof (*pstore);
3821				nr++;
3822			}
3823			state = TAILQ_NEXT(state, entry_list);
3824		}
3825
3826		size = sizeof (struct pfsync_state) * nr;
3827		if (p64)
3828			ps64->ps_len = size;
3829		else
3830			ps32->ps_len = size;
3831
3832		_FREE(pstore, M_TEMP);
3833		break;
3834	}
3835
3836	default:
3837		VERIFY(0);
3838		/* NOTREACHED */
3839	}
3840fail:
3841	return (error);
3842}
3843
3844static int
3845pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3846{
3847#pragma unused(p)
3848	int error = 0;
3849
3850	switch (cmd) {
3851	case DIOCNATLOOK: {
3852		struct pf_state_key	*sk;
3853		struct pf_state		*state;
3854		struct pf_state_key_cmp	 key;
3855		int			 m = 0, direction = pnl->direction;
3856
3857		key.af = pnl->af;
3858		key.proto = pnl->proto;
3859		key.proto_variant = pnl->proto_variant;
3860
3861		if (!pnl->proto ||
3862		    PF_AZERO(&pnl->saddr, pnl->af) ||
3863		    PF_AZERO(&pnl->daddr, pnl->af) ||
3864		    ((pnl->proto == IPPROTO_TCP ||
3865		    pnl->proto == IPPROTO_UDP) &&
3866		    (!pnl->dxport.port || !pnl->sxport.port)))
3867			error = EINVAL;
3868		else {
3869			/*
3870			 * userland gives us source and dest of connection,
3871			 * reverse the lookup so we ask for what happens with
3872			 * the return traffic, enabling us to find it in the
3873			 * state tree.
3874			 */
3875			if (direction == PF_IN) {
3876				PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
3877				memcpy(&key.ext.xport, &pnl->dxport,
3878				    sizeof (key.ext.xport));
3879				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3880				memcpy(&key.gwy.xport, &pnl->sxport,
3881				    sizeof (key.gwy.xport));
3882				state = pf_find_state_all(&key, PF_IN, &m);
3883			} else {
3884				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3885				memcpy(&key.lan.xport, &pnl->dxport,
3886				    sizeof (key.lan.xport));
3887				PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
3888				memcpy(&key.ext.xport, &pnl->sxport,
3889				    sizeof (key.ext.xport));
3890				state = pf_find_state_all(&key, PF_OUT, &m);
3891			}
3892			if (m > 1)
3893				error = E2BIG;	/* more than one state */
3894			else if (state != NULL) {
3895				sk = state->state_key;
3896				if (direction == PF_IN) {
3897					PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3898					    sk->af);
3899					memcpy(&pnl->rsxport, &sk->lan.xport,
3900					    sizeof (pnl->rsxport));
3901					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3902					    pnl->af);
3903					memcpy(&pnl->rdxport, &pnl->dxport,
3904					    sizeof (pnl->rdxport));
3905				} else {
3906					PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3907					    sk->af);
3908					memcpy(&pnl->rdxport, &sk->gwy.xport,
3909					    sizeof (pnl->rdxport));
3910					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3911					    pnl->af);
3912					memcpy(&pnl->rsxport, &pnl->sxport,
3913					    sizeof (pnl->rsxport));
3914				}
3915			} else
3916				error = ENOENT;
3917		}
3918		break;
3919	}
3920
3921	default:
3922		VERIFY(0);
3923		/* NOTREACHED */
3924	}
3925
3926	return (error);
3927}
3928
3929static int
3930pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3931{
3932#pragma unused(p)
3933	int error = 0;
3934
3935	switch (cmd) {
3936	case DIOCSETTIMEOUT: {
3937		int old;
3938
3939		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3940		    pt->seconds < 0) {
3941			error = EINVAL;
3942			goto fail;
3943		}
3944		old = pf_default_rule.timeout[pt->timeout];
3945		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3946			pt->seconds = 1;
3947		pf_default_rule.timeout[pt->timeout] = pt->seconds;
3948		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3949			wakeup(pf_purge_thread_fn);
3950		pt->seconds = old;
3951		break;
3952	}
3953
3954	case DIOCGETTIMEOUT: {
3955		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3956			error = EINVAL;
3957			goto fail;
3958		}
3959		pt->seconds = pf_default_rule.timeout[pt->timeout];
3960		break;
3961	}
3962
3963	default:
3964		VERIFY(0);
3965		/* NOTREACHED */
3966	}
3967fail:
3968	return (error);
3969}
3970
3971static int
3972pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3973{
3974#pragma unused(p)
3975	int error = 0;
3976
3977	switch (cmd) {
3978	case DIOCGETLIMIT: {
3979
3980		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3981			error = EINVAL;
3982			goto fail;
3983		}
3984		pl->limit = pf_pool_limits[pl->index].limit;
3985		break;
3986	}
3987
3988	case DIOCSETLIMIT: {
3989		int old_limit;
3990
3991		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3992		    pf_pool_limits[pl->index].pp == NULL) {
3993			error = EINVAL;
3994			goto fail;
3995		}
3996		pool_sethardlimit(pf_pool_limits[pl->index].pp,
3997		    pl->limit, NULL, 0);
3998		old_limit = pf_pool_limits[pl->index].limit;
3999		pf_pool_limits[pl->index].limit = pl->limit;
4000		pl->limit = old_limit;
4001		break;
4002	}
4003
4004	default:
4005		VERIFY(0);
4006		/* NOTREACHED */
4007	}
4008fail:
4009	return (error);
4010}
4011
4012static int
4013pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
4014{
4015#pragma unused(p)
4016	struct pf_pooladdr *pa = NULL;
4017	struct pf_pool *pool = NULL;
4018	int error = 0;
4019
4020	switch (cmd) {
4021	case DIOCBEGINADDRS: {
4022		pf_empty_pool(&pf_pabuf);
4023		pp->ticket = ++ticket_pabuf;
4024		break;
4025	}
4026
4027	case DIOCADDADDR: {
4028		pp->anchor[sizeof (pp->anchor) - 1] = '\0';
4029		if (pp->ticket != ticket_pabuf) {
4030			error = EBUSY;
4031			break;
4032		}
4033#if !INET
4034		if (pp->af == AF_INET) {
4035			error = EAFNOSUPPORT;
4036			break;
4037		}
4038#endif /* INET */
4039#if !INET6
4040		if (pp->af == AF_INET6) {
4041			error = EAFNOSUPPORT;
4042			break;
4043		}
4044#endif /* INET6 */
4045		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4046		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4047		    pp->addr.addr.type != PF_ADDR_TABLE) {
4048			error = EINVAL;
4049			break;
4050		}
4051		pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
4052		if (pa == NULL) {
4053			error = ENOMEM;
4054			break;
4055		}
4056		pf_pooladdr_copyin(&pp->addr, pa);
4057		if (pa->ifname[0]) {
4058			pa->kif = pfi_kif_get(pa->ifname);
4059			if (pa->kif == NULL) {
4060				pool_put(&pf_pooladdr_pl, pa);
4061				error = EINVAL;
4062				break;
4063			}
4064			pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
4065		}
4066		pf_addrwrap_setup(&pa->addr);
4067		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
4068			pfi_dynaddr_remove(&pa->addr);
4069			pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
4070			pool_put(&pf_pooladdr_pl, pa);
4071			error = EINVAL;
4072			break;
4073		}
4074		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
4075		break;
4076	}
4077
4078	case DIOCGETADDRS: {
4079		pp->nr = 0;
4080		pp->anchor[sizeof (pp->anchor) - 1] = '\0';
4081		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
4082		    pp->r_num, 0, 1, 0);
4083		if (pool == NULL) {
4084			error = EBUSY;
4085			break;
4086		}
4087		TAILQ_FOREACH(pa, &pool->list, entries)
4088			pp->nr++;
4089		break;
4090	}
4091
4092	case DIOCGETADDR: {
4093		u_int32_t		 nr = 0;
4094
4095		pp->anchor[sizeof (pp->anchor) - 1] = '\0';
4096		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
4097		    pp->r_num, 0, 1, 1);
4098		if (pool == NULL) {
4099			error = EBUSY;
4100			break;
4101		}
4102		pa = TAILQ_FIRST(&pool->list);
4103		while ((pa != NULL) && (nr < pp->nr)) {
4104			pa = TAILQ_NEXT(pa, entries);
4105			nr++;
4106		}
4107		if (pa == NULL) {
4108			error = EBUSY;
4109			break;
4110		}
4111		pf_pooladdr_copyout(pa, &pp->addr);
4112		pfi_dynaddr_copyout(&pp->addr.addr);
4113		pf_tbladdr_copyout(&pp->addr.addr);
4114		pf_rtlabel_copyout(&pp->addr.addr);
4115		break;
4116	}
4117
4118	case DIOCCHANGEADDR: {
4119		struct pfioc_pooladdr	*pca = pp;
4120		struct pf_pooladdr	*oldpa = NULL, *newpa = NULL;
4121		struct pf_ruleset	*ruleset;
4122
4123		if (pca->action < PF_CHANGE_ADD_HEAD ||
4124		    pca->action > PF_CHANGE_REMOVE) {
4125			error = EINVAL;
4126			break;
4127		}
4128		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4129		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4130		    pca->addr.addr.type != PF_ADDR_TABLE) {
4131			error = EINVAL;
4132			break;
4133		}
4134
4135		pca->anchor[sizeof (pca->anchor) - 1] = '\0';
4136		ruleset = pf_find_ruleset(pca->anchor);
4137		if (ruleset == NULL) {
4138			error = EBUSY;
4139			break;
4140		}
4141		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
4142		    pca->r_num, pca->r_last, 1, 1);
4143		if (pool == NULL) {
4144			error = EBUSY;
4145			break;
4146		}
4147		if (pca->action != PF_CHANGE_REMOVE) {
4148			newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
4149			if (newpa == NULL) {
4150				error = ENOMEM;
4151				break;
4152			}
4153			pf_pooladdr_copyin(&pca->addr, newpa);
4154#if !INET
4155			if (pca->af == AF_INET) {
4156				pool_put(&pf_pooladdr_pl, newpa);
4157				error = EAFNOSUPPORT;
4158				break;
4159			}
4160#endif /* INET */
4161#if !INET6
4162			if (pca->af == AF_INET6) {
4163				pool_put(&pf_pooladdr_pl, newpa);
4164				error = EAFNOSUPPORT;
4165				break;
4166			}
4167#endif /* INET6 */
4168			if (newpa->ifname[0]) {
4169				newpa->kif = pfi_kif_get(newpa->ifname);
4170				if (newpa->kif == NULL) {
4171					pool_put(&pf_pooladdr_pl, newpa);
4172					error = EINVAL;
4173					break;
4174				}
4175				pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
4176			} else
4177				newpa->kif = NULL;
4178			pf_addrwrap_setup(&newpa->addr);
4179			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
4180			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
4181				pfi_dynaddr_remove(&newpa->addr);
4182				pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
4183				pool_put(&pf_pooladdr_pl, newpa);
4184				error = EINVAL;
4185				break;
4186			}
4187		}
4188
4189		if (pca->action == PF_CHANGE_ADD_HEAD)
4190			oldpa = TAILQ_FIRST(&pool->list);
4191		else if (pca->action == PF_CHANGE_ADD_TAIL)
4192			oldpa = TAILQ_LAST(&pool->list, pf_palist);
4193		else {
4194			int	i = 0;
4195
4196			oldpa = TAILQ_FIRST(&pool->list);
4197			while ((oldpa != NULL) && (i < (int)pca->nr)) {
4198				oldpa = TAILQ_NEXT(oldpa, entries);
4199				i++;
4200			}
4201			if (oldpa == NULL) {
4202				error = EINVAL;
4203				break;
4204			}
4205		}
4206
4207		if (pca->action == PF_CHANGE_REMOVE) {
4208			TAILQ_REMOVE(&pool->list, oldpa, entries);
4209			pfi_dynaddr_remove(&oldpa->addr);
4210			pf_tbladdr_remove(&oldpa->addr);
4211			pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
4212			pool_put(&pf_pooladdr_pl, oldpa);
4213		} else {
4214			if (oldpa == NULL)
4215				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4216			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4217			    pca->action == PF_CHANGE_ADD_BEFORE)
4218				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4219			else
4220				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4221				    newpa, entries);
4222		}
4223
4224		pool->cur = TAILQ_FIRST(&pool->list);
4225		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
4226		    pca->af);
4227		break;
4228	}
4229
4230	default:
4231		VERIFY(0);
4232		/* NOTREACHED */
4233	}
4234
4235	return (error);
4236}
4237
4238static int
4239pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4240{
4241#pragma unused(p)
4242	int error = 0;
4243
4244	switch (cmd) {
4245	case DIOCGETRULESETS: {
4246		struct pf_ruleset	*ruleset;
4247		struct pf_anchor	*anchor;
4248
4249		pr->path[sizeof (pr->path) - 1] = '\0';
4250		pr->name[sizeof (pr->name) - 1] = '\0';
4251		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4252			error = EINVAL;
4253			break;
4254		}
4255		pr->nr = 0;
4256		if (ruleset->anchor == NULL) {
4257			/* XXX kludge for pf_main_ruleset */
4258			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4259				if (anchor->parent == NULL)
4260					pr->nr++;
4261		} else {
4262			RB_FOREACH(anchor, pf_anchor_node,
4263			    &ruleset->anchor->children)
4264				pr->nr++;
4265		}
4266		break;
4267	}
4268
4269	case DIOCGETRULESET: {
4270		struct pf_ruleset	*ruleset;
4271		struct pf_anchor	*anchor;
4272		u_int32_t		 nr = 0;
4273
4274		pr->path[sizeof (pr->path) - 1] = '\0';
4275		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4276			error = EINVAL;
4277			break;
4278		}
4279		pr->name[0] = 0;
4280		if (ruleset->anchor == NULL) {
4281			/* XXX kludge for pf_main_ruleset */
4282			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4283				if (anchor->parent == NULL && nr++ == pr->nr) {
4284					strlcpy(pr->name, anchor->name,
4285					    sizeof (pr->name));
4286					break;
4287				}
4288		} else {
4289			RB_FOREACH(anchor, pf_anchor_node,
4290			    &ruleset->anchor->children)
4291				if (nr++ == pr->nr) {
4292					strlcpy(pr->name, anchor->name,
4293					    sizeof (pr->name));
4294					break;
4295				}
4296		}
4297		if (!pr->name[0])
4298			error = EBUSY;
4299		break;
4300	}
4301
4302	default:
4303		VERIFY(0);
4304		/* NOTREACHED */
4305	}
4306
4307	return (error);
4308}
4309
4310static int
4311pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4312    struct pfioc_trans_64 *io64, struct proc *p)
4313{
4314	int p64 = proc_is64bit(p);
4315	int error = 0, esize, size;
4316	user_addr_t buf;
4317
4318	esize = (p64 ? io64->esize : io32->esize);
4319	size = (p64 ? io64->size : io32->size);
4320	buf = (p64 ? io64->array : io32->array);
4321
4322	switch (cmd) {
4323	case DIOCXBEGIN: {
4324		struct pfioc_trans_e	*ioe;
4325		struct pfr_table	*table;
4326		int			 i;
4327
4328		if (esize != sizeof (*ioe)) {
4329			error = ENODEV;
4330			goto fail;
4331		}
4332		ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4333		table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4334		for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4335			if (copyin(buf, ioe, sizeof (*ioe))) {
4336				_FREE(table, M_TEMP);
4337				_FREE(ioe, M_TEMP);
4338				error = EFAULT;
4339				goto fail;
4340			}
4341			ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4342			switch (ioe->rs_num) {
4343			case PF_RULESET_ALTQ:
4344#if PF_ALTQ
4345				if (altq_allowed) {
4346					if (ioe->anchor[0]) {
4347						_FREE(table, M_TEMP);
4348						_FREE(ioe, M_TEMP);
4349						error = EINVAL;
4350						goto fail;
4351					}
4352					error = pf_begin_altq(&ioe->ticket);
4353					if (error != 0) {
4354						_FREE(table, M_TEMP);
4355						_FREE(ioe, M_TEMP);
4356						goto fail;
4357					}
4358				}
4359#endif /* PF_ALTQ */
4360				break;
4361			case PF_RULESET_TABLE:
4362				bzero(table, sizeof (*table));
4363				strlcpy(table->pfrt_anchor, ioe->anchor,
4364				    sizeof (table->pfrt_anchor));
4365				if ((error = pfr_ina_begin(table,
4366				    &ioe->ticket, NULL, 0))) {
4367					_FREE(table, M_TEMP);
4368					_FREE(ioe, M_TEMP);
4369					goto fail;
4370				}
4371				break;
4372			default:
4373				if ((error = pf_begin_rules(&ioe->ticket,
4374				    ioe->rs_num, ioe->anchor))) {
4375					_FREE(table, M_TEMP);
4376					_FREE(ioe, M_TEMP);
4377					goto fail;
4378				}
4379				break;
4380			}
4381			if (copyout(ioe, buf, sizeof (*ioe))) {
4382				_FREE(table, M_TEMP);
4383				_FREE(ioe, M_TEMP);
4384				error = EFAULT;
4385				goto fail;
4386			}
4387		}
4388		_FREE(table, M_TEMP);
4389		_FREE(ioe, M_TEMP);
4390		break;
4391	}
4392
4393	case DIOCXROLLBACK: {
4394		struct pfioc_trans_e	*ioe;
4395		struct pfr_table	*table;
4396		int			 i;
4397
4398		if (esize != sizeof (*ioe)) {
4399			error = ENODEV;
4400			goto fail;
4401		}
4402		ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4403		table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4404		for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4405			if (copyin(buf, ioe, sizeof (*ioe))) {
4406				_FREE(table, M_TEMP);
4407				_FREE(ioe, M_TEMP);
4408				error = EFAULT;
4409				goto fail;
4410			}
4411			ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4412			switch (ioe->rs_num) {
4413			case PF_RULESET_ALTQ:
4414#if PF_ALTQ
4415				if (altq_allowed) {
4416					if (ioe->anchor[0]) {
4417						_FREE(table, M_TEMP);
4418						_FREE(ioe, M_TEMP);
4419						error = EINVAL;
4420						goto fail;
4421					}
4422					error = pf_rollback_altq(ioe->ticket);
4423					if (error != 0) {
4424						_FREE(table, M_TEMP);
4425						_FREE(ioe, M_TEMP);
4426						goto fail; /* really bad */
4427					}
4428				}
4429#endif /* PF_ALTQ */
4430				break;
4431			case PF_RULESET_TABLE:
4432				bzero(table, sizeof (*table));
4433				strlcpy(table->pfrt_anchor, ioe->anchor,
4434				    sizeof (table->pfrt_anchor));
4435				if ((error = pfr_ina_rollback(table,
4436				    ioe->ticket, NULL, 0))) {
4437					_FREE(table, M_TEMP);
4438					_FREE(ioe, M_TEMP);
4439					goto fail; /* really bad */
4440				}
4441				break;
4442			default:
4443				if ((error = pf_rollback_rules(ioe->ticket,
4444				    ioe->rs_num, ioe->anchor))) {
4445					_FREE(table, M_TEMP);
4446					_FREE(ioe, M_TEMP);
4447					goto fail; /* really bad */
4448				}
4449				break;
4450			}
4451		}
4452		_FREE(table, M_TEMP);
4453		_FREE(ioe, M_TEMP);
4454		break;
4455	}
4456
4457	case DIOCXCOMMIT: {
4458		struct pfioc_trans_e	*ioe;
4459		struct pfr_table	*table;
4460		struct pf_ruleset	*rs;
4461		user_addr_t		 _buf = buf;
4462		int			 i;
4463
4464		if (esize != sizeof (*ioe)) {
4465			error = ENODEV;
4466			goto fail;
4467		}
4468		ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4469		table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4470		/* first makes sure everything will succeed */
4471		for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4472			if (copyin(buf, ioe, sizeof (*ioe))) {
4473				_FREE(table, M_TEMP);
4474				_FREE(ioe, M_TEMP);
4475				error = EFAULT;
4476				goto fail;
4477			}
4478			ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4479			switch (ioe->rs_num) {
4480			case PF_RULESET_ALTQ:
4481#if PF_ALTQ
4482				if (altq_allowed) {
4483					if (ioe->anchor[0]) {
4484						_FREE(table, M_TEMP);
4485						_FREE(ioe, M_TEMP);
4486						error = EINVAL;
4487						goto fail;
4488					}
4489					if (!altqs_inactive_open ||
4490					    ioe->ticket !=
4491					    ticket_altqs_inactive) {
4492						_FREE(table, M_TEMP);
4493						_FREE(ioe, M_TEMP);
4494						error = EBUSY;
4495						goto fail;
4496					}
4497				}
4498#endif /* PF_ALTQ */
4499				break;
4500			case PF_RULESET_TABLE:
4501				rs = pf_find_ruleset(ioe->anchor);
4502				if (rs == NULL || !rs->topen || ioe->ticket !=
4503				    rs->tticket) {
4504					_FREE(table, M_TEMP);
4505					_FREE(ioe, M_TEMP);
4506					error = EBUSY;
4507					goto fail;
4508				}
4509				break;
4510			default:
4511				if (ioe->rs_num < 0 || ioe->rs_num >=
4512				    PF_RULESET_MAX) {
4513					_FREE(table, M_TEMP);
4514					_FREE(ioe, M_TEMP);
4515					error = EINVAL;
4516					goto fail;
4517				}
4518				rs = pf_find_ruleset(ioe->anchor);
4519				if (rs == NULL ||
4520				    !rs->rules[ioe->rs_num].inactive.open ||
4521				    rs->rules[ioe->rs_num].inactive.ticket !=
4522				    ioe->ticket) {
4523					_FREE(table, M_TEMP);
4524					_FREE(ioe, M_TEMP);
4525					error = EBUSY;
4526					goto fail;
4527				}
4528				break;
4529			}
4530		}
4531		buf = _buf;
4532		/* now do the commit - no errors should happen here */
4533		for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4534			if (copyin(buf, ioe, sizeof (*ioe))) {
4535				_FREE(table, M_TEMP);
4536				_FREE(ioe, M_TEMP);
4537				error = EFAULT;
4538				goto fail;
4539			}
4540			ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4541			switch (ioe->rs_num) {
4542			case PF_RULESET_ALTQ:
4543#if PF_ALTQ
4544				if (altq_allowed &&
4545				    (error = pf_commit_altq(ioe->ticket))) {
4546					_FREE(table, M_TEMP);
4547					_FREE(ioe, M_TEMP);
4548					goto fail; /* really bad */
4549				}
4550#endif /* PF_ALTQ */
4551				break;
4552			case PF_RULESET_TABLE:
4553				bzero(table, sizeof (*table));
4554				strlcpy(table->pfrt_anchor, ioe->anchor,
4555				    sizeof (table->pfrt_anchor));
4556				if ((error = pfr_ina_commit(table, ioe->ticket,
4557				    NULL, NULL, 0))) {
4558					_FREE(table, M_TEMP);
4559					_FREE(ioe, M_TEMP);
4560					goto fail; /* really bad */
4561				}
4562				break;
4563			default:
4564				if ((error = pf_commit_rules(ioe->ticket,
4565				    ioe->rs_num, ioe->anchor))) {
4566					_FREE(table, M_TEMP);
4567					_FREE(ioe, M_TEMP);
4568					goto fail; /* really bad */
4569				}
4570				break;
4571			}
4572		}
4573		_FREE(table, M_TEMP);
4574		_FREE(ioe, M_TEMP);
4575		break;
4576	}
4577
4578	default:
4579		VERIFY(0);
4580		/* NOTREACHED */
4581	}
4582fail:
4583	return (error);
4584}
4585
4586static int
4587pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4588    struct pfioc_src_nodes_64 *psn64, struct proc *p)
4589{
4590	int p64 = proc_is64bit(p);
4591	int error = 0;
4592
4593	switch (cmd) {
4594	case DIOCGETSRCNODES: {
4595		struct pf_src_node	*n, *pstore;
4596		user_addr_t		 buf;
4597		u_int32_t		 nr = 0;
4598		int			 space, size;
4599
4600		space = (p64 ? psn64->psn_len : psn32->psn_len);
4601		if (space == 0) {
4602			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4603				nr++;
4604
4605			size = sizeof (struct pf_src_node) * nr;
4606			if (p64)
4607				psn64->psn_len = size;
4608			else
4609				psn32->psn_len = size;
4610			break;
4611		}
4612
4613		pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
4614		if (pstore == NULL) {
4615			error = ENOMEM;
4616			break;
4617		}
4618		buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4619
4620		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4621			uint64_t secs = pf_time_second(), diff;
4622
4623			if ((nr + 1) * sizeof (*pstore) > (unsigned)space)
4624				break;
4625
4626			bcopy(n, pstore, sizeof (*pstore));
4627			if (n->rule.ptr != NULL)
4628				pstore->rule.nr = n->rule.ptr->nr;
4629			pstore->creation = secs - pstore->creation;
4630			if (pstore->expire > secs)
4631				pstore->expire -= secs;
4632			else
4633				pstore->expire = 0;
4634
4635			/* adjust the connection rate estimate */
4636			diff = secs - n->conn_rate.last;
4637			if (diff >= n->conn_rate.seconds)
4638				pstore->conn_rate.count = 0;
4639			else
4640				pstore->conn_rate.count -=
4641				    n->conn_rate.count * diff /
4642				    n->conn_rate.seconds;
4643
4644			_RB_PARENT(pstore, entry) = NULL;
4645			RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4646			pstore->kif = NULL;
4647
4648			error = copyout(pstore, buf, sizeof (*pstore));
4649			if (error) {
4650				_FREE(pstore, M_TEMP);
4651				goto fail;
4652			}
4653			buf += sizeof (*pstore);
4654			nr++;
4655		}
4656
4657		size = sizeof (struct pf_src_node) * nr;
4658		if (p64)
4659			psn64->psn_len = size;
4660		else
4661			psn32->psn_len = size;
4662
4663		_FREE(pstore, M_TEMP);
4664		break;
4665	}
4666
4667	default:
4668		VERIFY(0);
4669		/* NOTREACHED */
4670	}
4671fail:
4672	return (error);
4673
4674}
4675
4676static int
4677pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4678    struct proc *p)
4679{
4680#pragma unused(p)
4681	int error = 0;
4682
4683	switch (cmd) {
4684	case DIOCKILLSRCNODES: {
4685		struct pf_src_node	*sn;
4686		struct pf_state		*s;
4687		int			killed = 0;
4688
4689		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4690			if (PF_MATCHA(psnk->psnk_src.neg,
4691			    &psnk->psnk_src.addr.v.a.addr,
4692			    &psnk->psnk_src.addr.v.a.mask,
4693			    &sn->addr, sn->af) &&
4694			    PF_MATCHA(psnk->psnk_dst.neg,
4695			    &psnk->psnk_dst.addr.v.a.addr,
4696			    &psnk->psnk_dst.addr.v.a.mask,
4697			    &sn->raddr, sn->af)) {
4698				/* Handle state to src_node linkage */
4699				if (sn->states != 0) {
4700					RB_FOREACH(s, pf_state_tree_id,
4701					    &tree_id) {
4702						if (s->src_node == sn)
4703							s->src_node = NULL;
4704						if (s->nat_src_node == sn)
4705							s->nat_src_node = NULL;
4706					}
4707					sn->states = 0;
4708				}
4709				sn->expire = 1;
4710				killed++;
4711			}
4712		}
4713
4714		if (killed > 0)
4715			pf_purge_expired_src_nodes();
4716
4717		psnk->psnk_af = killed;
4718		break;
4719	}
4720
4721	default:
4722		VERIFY(0);
4723		/* NOTREACHED */
4724	}
4725
4726	return (error);
4727}
4728
4729static int
4730pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4731    struct pfioc_iface_64 *io64, struct proc *p)
4732{
4733	int p64 = proc_is64bit(p);
4734	int error = 0;
4735
4736	switch (cmd) {
4737	case DIOCIGETIFACES: {
4738		user_addr_t buf;
4739		int esize;
4740
4741		buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4742		esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4743
4744		/* esize must be that of the user space version of pfi_kif */
4745		if (esize != sizeof (struct pfi_uif)) {
4746			error = ENODEV;
4747			break;
4748		}
4749		if (p64)
4750			io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4751		else
4752			io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4753		error = pfi_get_ifaces(
4754		    p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4755		    p64 ? &io64->pfiio_size : &io32->pfiio_size);
4756		break;
4757	}
4758
4759	case DIOCSETIFFLAG: {
4760		if (p64)
4761			io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4762		else
4763			io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4764
4765		error = pfi_set_flags(
4766		    p64 ? io64->pfiio_name : io32->pfiio_name,
4767		    p64 ? io64->pfiio_flags : io32->pfiio_flags);
4768		break;
4769	}
4770
4771	case DIOCCLRIFFLAG: {
4772		if (p64)
4773			io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4774		else
4775			io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4776
4777		error = pfi_clear_flags(
4778		    p64 ? io64->pfiio_name : io32->pfiio_name,
4779		    p64 ? io64->pfiio_flags : io32->pfiio_flags);
4780		break;
4781	}
4782
4783	default:
4784		VERIFY(0);
4785		/* NOTREACHED */
4786	}
4787
4788	return (error);
4789}
4790
4791int
4792pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4793    unsigned int af, int input, struct ip_fw_args *fwa)
4794{
4795	int error = 0;
4796	struct mbuf *nextpkt;
4797	net_thread_marks_t marks;
4798
4799	marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4800
4801	if (marks != net_thread_marks_none) {
4802		lck_rw_lock_shared(pf_perim_lock);
4803		if (!pf_is_enabled)
4804			goto done;
4805		lck_mtx_lock(pf_lock);
4806	}
4807
4808	if (mppn != NULL && *mppn != NULL)
4809		VERIFY(*mppn == *mp);
4810	if ((nextpkt = (*mp)->m_nextpkt) != NULL)
4811		(*mp)->m_nextpkt = NULL;
4812
4813	switch (af) {
4814#if INET
4815	case AF_INET: {
4816		error = pf_inet_hook(ifp, mp, input, fwa);
4817		break;
4818	}
4819#endif /* INET */
4820#if INET6
4821	case AF_INET6:
4822		error = pf_inet6_hook(ifp, mp, input, fwa);
4823		break;
4824#endif /* INET6 */
4825	default:
4826		break;
4827	}
4828
4829	/* When packet valid, link to the next packet */
4830	if (*mp != NULL && nextpkt != NULL) {
4831		struct mbuf *m = *mp;
4832		while (m->m_nextpkt != NULL)
4833			m = m->m_nextpkt;
4834		m->m_nextpkt = nextpkt;
4835	}
4836	/* Fix up linkage of previous packet in the chain */
4837	if (mppn != NULL) {
4838		if (*mp != NULL)
4839			*mppn = *mp;
4840		else
4841			*mppn = nextpkt;
4842	}
4843
4844	if (marks != net_thread_marks_none)
4845		lck_mtx_unlock(pf_lock);
4846
4847done:
4848	if (marks != net_thread_marks_none)
4849		lck_rw_done(pf_perim_lock);
4850
4851	net_thread_marks_pop(marks);
4852	return (error);
4853}
4854
4855
4856#if INET
4857static int
4858pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4859    struct ip_fw_args *fwa)
4860{
4861	struct mbuf *m = *mp;
4862#if BYTE_ORDER != BIG_ENDIAN
4863	struct ip *ip = mtod(m, struct ip *);
4864#endif
4865	int error = 0;
4866
4867	/*
4868	 * If the packet is outbound, is originated locally, is flagged for
4869	 * delayed UDP/TCP checksum calculation, and is about to be processed
4870	 * for an interface that doesn't support the appropriate checksum
4871	 * offloading, then calculated the checksum here so that PF can adjust
4872	 * it properly.
4873	 */
4874	if (!input && m->m_pkthdr.rcvif == NULL) {
4875		static const int mask = CSUM_DELAY_DATA;
4876		const int flags = m->m_pkthdr.csum_flags &
4877		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4878
4879		if (flags & mask) {
4880			in_delayed_cksum(m);
4881			m->m_pkthdr.csum_flags &= ~mask;
4882		}
4883	}
4884
4885#if BYTE_ORDER != BIG_ENDIAN
4886	HTONS(ip->ip_len);
4887	HTONS(ip->ip_off);
4888#endif
4889	if (pf_test(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4890		if (*mp != NULL) {
4891			m_freem(*mp);
4892			*mp = NULL;
4893			error = EHOSTUNREACH;
4894		} else {
4895			error = ENOBUFS;
4896		}
4897	}
4898#if BYTE_ORDER != BIG_ENDIAN
4899	else {
4900		if (*mp != NULL) {
4901			ip = mtod(*mp, struct ip *);
4902			NTOHS(ip->ip_len);
4903			NTOHS(ip->ip_off);
4904		}
4905	}
4906#endif
4907	return (error);
4908}
4909#endif /* INET */
4910
4911#if INET6
4912int
4913pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4914    struct ip_fw_args *fwa)
4915{
4916	int error = 0;
4917
4918	/*
4919	 * If the packet is outbound, is originated locally, is flagged for
4920	 * delayed UDP/TCP checksum calculation, and is about to be processed
4921	 * for an interface that doesn't support the appropriate checksum
4922	 * offloading, then calculated the checksum here so that PF can adjust
4923	 * it properly.
4924	 */
4925	if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4926		static const int mask = CSUM_DELAY_IPV6_DATA;
4927		const int flags = (*mp)->m_pkthdr.csum_flags &
4928		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4929
4930		if (flags & mask) {
4931			/*
4932			 * Checksum offload should not have been enabled
4933			 * when extension headers exist, thus 0 for optlen.
4934			 */
4935			in6_delayed_cksum(*mp);
4936			(*mp)->m_pkthdr.csum_flags &= ~mask;
4937		}
4938	}
4939
4940	if (pf_test6(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4941		if (*mp != NULL) {
4942			m_freem(*mp);
4943			*mp = NULL;
4944			error = EHOSTUNREACH;
4945		} else {
4946			error = ENOBUFS;
4947		}
4948	}
4949	return (error);
4950}
4951#endif /* INET6 */
4952
4953int
4954pf_ifaddr_hook(struct ifnet *ifp)
4955{
4956	struct pfi_kif *kif = ifp->if_pf_kif;
4957
4958	if (kif != NULL) {
4959		lck_rw_lock_shared(pf_perim_lock);
4960		lck_mtx_lock(pf_lock);
4961
4962		pfi_kifaddr_update(kif);
4963
4964		lck_mtx_unlock(pf_lock);
4965		lck_rw_done(pf_perim_lock);
4966	}
4967	return (0);
4968}
4969
4970/*
4971 * Caller acquires dlil lock as writer (exclusive)
4972 */
4973void
4974pf_ifnet_hook(struct ifnet *ifp, int attach)
4975{
4976	lck_rw_lock_shared(pf_perim_lock);
4977	lck_mtx_lock(pf_lock);
4978	if (attach)
4979		pfi_attach_ifnet(ifp);
4980	else
4981		pfi_detach_ifnet(ifp);
4982	lck_mtx_unlock(pf_lock);
4983	lck_rw_done(pf_perim_lock);
4984}
4985
4986static void
4987pf_attach_hooks(void)
4988{
4989	ifnet_head_lock_shared();
4990	/*
4991	 * Check against ifnet_addrs[] before proceeding, in case this
4992	 * is called very early on, e.g. during dlil_init() before any
4993	 * network interface is attached.
4994	 */
4995	if (ifnet_addrs != NULL) {
4996		int i;
4997
4998		for (i = 0; i <= if_index; i++) {
4999			struct ifnet *ifp = ifindex2ifnet[i];
5000			if (ifp != NULL) {
5001				pfi_attach_ifnet(ifp);
5002			}
5003		}
5004	}
5005	ifnet_head_done();
5006}
5007
5008#if 0
5009/* currently unused along with pfdetach() */
5010static void
5011pf_detach_hooks(void)
5012{
5013	ifnet_head_lock_shared();
5014	if (ifnet_addrs != NULL) {
5015		for (i = 0; i <= if_index; i++) {
5016			int i;
5017
5018			struct ifnet *ifp = ifindex2ifnet[i];
5019			if (ifp != NULL && ifp->if_pf_kif != NULL) {
5020				pfi_detach_ifnet(ifp);
5021			}
5022		}
5023	}
5024	ifnet_head_done();
5025}
5026#endif
5027
5028/*
5029 * 'D' group ioctls.
5030 *
5031 * The switch statement below does nothing at runtime, as it serves as a
5032 * compile time check to ensure that all of the socket 'D' ioctls (those
5033 * in the 'D' group going thru soo_ioctl) that are made available by the
5034 * networking stack is unique.  This works as long as this routine gets
5035 * updated each time a new interface ioctl gets added.
5036 *
5037 * Any failures at compile time indicates duplicated ioctl values.
5038 */
5039static __attribute__((unused)) void
5040pfioctl_cassert(void)
5041{
5042	/*
5043	 * This is equivalent to _CASSERT() and the compiler wouldn't
5044	 * generate any instructions, thus for compile time only.
5045	 */
5046	switch ((u_long)0) {
5047	case 0:
5048
5049	/* bsd/net/pfvar.h */
5050	case DIOCSTART:
5051	case DIOCSTOP:
5052	case DIOCADDRULE:
5053	case DIOCGETSTARTERS:
5054	case DIOCGETRULES:
5055	case DIOCGETRULE:
5056	case DIOCSTARTREF:
5057	case DIOCSTOPREF:
5058	case DIOCCLRSTATES:
5059	case DIOCGETSTATE:
5060	case DIOCSETSTATUSIF:
5061	case DIOCGETSTATUS:
5062	case DIOCCLRSTATUS:
5063	case DIOCNATLOOK:
5064	case DIOCSETDEBUG:
5065	case DIOCGETSTATES:
5066	case DIOCCHANGERULE:
5067	case DIOCINSERTRULE:
5068	case DIOCDELETERULE:
5069	case DIOCSETTIMEOUT:
5070	case DIOCGETTIMEOUT:
5071	case DIOCADDSTATE:
5072	case DIOCCLRRULECTRS:
5073	case DIOCGETLIMIT:
5074	case DIOCSETLIMIT:
5075	case DIOCKILLSTATES:
5076	case DIOCSTARTALTQ:
5077	case DIOCSTOPALTQ:
5078	case DIOCADDALTQ:
5079	case DIOCGETALTQS:
5080	case DIOCGETALTQ:
5081	case DIOCCHANGEALTQ:
5082	case DIOCGETQSTATS:
5083	case DIOCBEGINADDRS:
5084	case DIOCADDADDR:
5085	case DIOCGETADDRS:
5086	case DIOCGETADDR:
5087	case DIOCCHANGEADDR:
5088	case DIOCGETRULESETS:
5089	case DIOCGETRULESET:
5090	case DIOCRCLRTABLES:
5091	case DIOCRADDTABLES:
5092	case DIOCRDELTABLES:
5093	case DIOCRGETTABLES:
5094	case DIOCRGETTSTATS:
5095	case DIOCRCLRTSTATS:
5096	case DIOCRCLRADDRS:
5097	case DIOCRADDADDRS:
5098	case DIOCRDELADDRS:
5099	case DIOCRSETADDRS:
5100	case DIOCRGETADDRS:
5101	case DIOCRGETASTATS:
5102	case DIOCRCLRASTATS:
5103	case DIOCRTSTADDRS:
5104	case DIOCRSETTFLAGS:
5105	case DIOCRINADEFINE:
5106	case DIOCOSFPFLUSH:
5107	case DIOCOSFPADD:
5108	case DIOCOSFPGET:
5109	case DIOCXBEGIN:
5110	case DIOCXCOMMIT:
5111	case DIOCXROLLBACK:
5112	case DIOCGETSRCNODES:
5113	case DIOCCLRSRCNODES:
5114	case DIOCSETHOSTID:
5115	case DIOCIGETIFACES:
5116	case DIOCSETIFFLAG:
5117	case DIOCCLRIFFLAG:
5118	case DIOCKILLSRCNODES:
5119	case DIOCGIFSPEED:
5120		;
5121	}
5122}
5123