pf_ioctl.c revision 196019
1/*	$OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
2
3/*
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 *    - Redistributions of source code must retain the above copyright
13 *      notice, this list of conditions and the following disclaimer.
14 *    - Redistributions in binary form must reproduce the above
15 *      copyright notice, this list of conditions and the following
16 *      disclaimer in the documentation and/or other materials provided
17 *      with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 * Effort sponsored in part by the Defense Advanced Research Projects
33 * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 *
36 */
37
38#ifdef __FreeBSD__
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 196019 2009-08-01 19:26:27Z rwatson $");
41
42#include "opt_inet.h"
43#include "opt_inet6.h"
44#include "opt_bpf.h"
45#include "opt_pf.h"
46
47#ifdef DEV_BPF
48#define	NBPFILTER	DEV_BPF
49#else
50#define	NBPFILTER	0
51#endif
52
53#ifdef DEV_PFLOG
54#define	NPFLOG		DEV_PFLOG
55#else
56#define	NPFLOG		0
57#endif
58
59#ifdef DEV_PFSYNC
60#define	NPFSYNC		DEV_PFSYNC
61#else
62#define	NPFSYNC		0
63#endif
64
65#else
66#include "bpfilter.h"
67#include "pflog.h"
68#include "pfsync.h"
69#endif
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/mbuf.h>
74#include <sys/filio.h>
75#include <sys/fcntl.h>
76#include <sys/socket.h>
77#include <sys/socketvar.h>
78#include <sys/kernel.h>
79#include <sys/time.h>
80#include <sys/malloc.h>
81#ifdef __FreeBSD__
82#include <sys/module.h>
83#include <sys/conf.h>
84#include <sys/proc.h>
85#include <sys/sysctl.h>
86#else
87#include <sys/timeout.h>
88#include <sys/pool.h>
89#endif
90#include <sys/proc.h>
91#include <sys/malloc.h>
92#include <sys/kthread.h>
93#ifndef __FreeBSD__
94#include <sys/rwlock.h>
95#include <uvm/uvm_extern.h>
96#endif
97
98#include <net/if.h>
99#include <net/if_types.h>
100#ifdef __FreeBSD__
101#include <net/vnet.h>
102#endif
103
104#include <netinet/in.h>
105#include <netinet/in_var.h>
106#include <netinet/in_systm.h>
107#include <netinet/ip.h>
108#include <netinet/ip_var.h>
109#include <netinet/ip_icmp.h>
110
111#ifdef __FreeBSD__
112#include <sys/md5.h>
113#else
114#include <dev/rndvar.h>
115#include <crypto/md5.h>
116#endif
117#include <net/pfvar.h>
118
119#if NPFSYNC > 0
120#include <net/if_pfsync.h>
121#endif /* NPFSYNC > 0 */
122
123#include <net/if_pflog.h>
124
125#ifdef INET6
126#include <netinet/ip6.h>
127#include <netinet/in_pcb.h>
128#endif /* INET6 */
129
130#ifdef ALTQ
131#include <altq/altq.h>
132#endif
133
134#ifdef __FreeBSD__
135#include <sys/limits.h>
136#include <sys/lock.h>
137#include <sys/mutex.h>
138#include <net/pfil.h>
139#endif /* __FreeBSD__ */
140
141#ifdef __FreeBSD__
142void			 init_zone_var(void);
143void			 cleanup_pf_zone(void);
144int			 pfattach(void);
145#else
146void			 pfattach(int);
147void			 pf_thread_create(void *);
148int			 pfopen(dev_t, int, int, struct proc *);
149int			 pfclose(dev_t, int, int, struct proc *);
150#endif
151struct pf_pool		*pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
152			    u_int8_t, u_int8_t, u_int8_t);
153
154void			 pf_mv_pool(struct pf_palist *, struct pf_palist *);
155void			 pf_empty_pool(struct pf_palist *);
156#ifdef __FreeBSD__
157int			 pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *);
158#else
159int			 pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *);
160#endif
161#ifdef ALTQ
162int			 pf_begin_altq(u_int32_t *);
163int			 pf_rollback_altq(u_int32_t);
164int			 pf_commit_altq(u_int32_t);
165int			 pf_enable_altq(struct pf_altq *);
166int			 pf_disable_altq(struct pf_altq *);
167#endif /* ALTQ */
168int			 pf_begin_rules(u_int32_t *, int, const char *);
169int			 pf_rollback_rules(u_int32_t, int, char *);
170int			 pf_setup_pfsync_matching(struct pf_ruleset *);
171void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
172void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
173int			 pf_commit_rules(u_int32_t, int, char *);
174
175struct pf_rule		 pf_default_rule;
176#ifdef __FreeBSD__
177struct sx		 pf_consistency_lock;
178SX_SYSINIT(pf_consistency_lock, &pf_consistency_lock, "pf_statetbl_lock");
179#else
180struct rwlock		 pf_consistency_lock = RWLOCK_INITIALIZER;
181#endif
182#ifdef ALTQ
183static int		 pf_altq_running;
184#endif
185
186#define	TAGID_MAX	 50000
187TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
188				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
189
190#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
191#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
192#endif
193u_int16_t		 tagname2tag(struct pf_tags *, char *);
194void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
195void			 tag_unref(struct pf_tags *, u_int16_t);
196int			 pf_rtlabel_add(struct pf_addr_wrap *);
197void			 pf_rtlabel_remove(struct pf_addr_wrap *);
198void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
199
200#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
201
202
203#ifdef __FreeBSD__
204static struct cdev	*pf_dev;
205
206/*
207 * XXX - These are new and need to be checked when moveing to a new version
208 */
209static void		 pf_clear_states(void);
210static int		 pf_clear_tables(void);
211static void		 pf_clear_srcnodes(void);
212/*
213 * XXX - These are new and need to be checked when moveing to a new version
214 */
215
216/*
217 * Wrapper functions for pfil(9) hooks
218 */
219static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
220		int dir, struct inpcb *inp);
221static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
222		int dir, struct inpcb *inp);
223#ifdef INET6
224static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
225		int dir, struct inpcb *inp);
226static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
227		int dir, struct inpcb *inp);
228#endif
229
230static int 		 hook_pf(void);
231static int 		 dehook_pf(void);
232static int 		 shutdown_pf(void);
233static int 		 pf_load(void);
234static int 		 pf_unload(void);
235
236static struct cdevsw pf_cdevsw = {
237	.d_ioctl =	pfioctl,
238	.d_name =	PF_NAME,
239	.d_version =	D_VERSION,
240};
241
242static volatile int pf_pfil_hooked = 0;
243int pf_end_threads = 0;
244struct mtx pf_task_mtx;
245pflog_packet_t *pflog_packet_ptr = NULL;
246
247int debug_pfugidhack = 0;
248SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0,
249    "Enable/disable pf user/group rules mpsafe hack");
250
251void
252init_pf_mutex(void)
253{
254	mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF);
255}
256
257void
258destroy_pf_mutex(void)
259{
260	mtx_destroy(&pf_task_mtx);
261}
262
263void
264init_zone_var(void)
265{
266	pf_src_tree_pl = pf_rule_pl = NULL;
267	pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL;
268	pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL;
269	pf_state_scrub_pl = NULL;
270	pfr_ktable_pl = pfr_kentry_pl = NULL;
271}
272
273void
274cleanup_pf_zone(void)
275{
276	UMA_DESTROY(pf_src_tree_pl);
277	UMA_DESTROY(pf_rule_pl);
278	UMA_DESTROY(pf_state_pl);
279	UMA_DESTROY(pf_altq_pl);
280	UMA_DESTROY(pf_pooladdr_pl);
281	UMA_DESTROY(pf_frent_pl);
282	UMA_DESTROY(pf_frag_pl);
283	UMA_DESTROY(pf_cache_pl);
284	UMA_DESTROY(pf_cent_pl);
285	UMA_DESTROY(pfr_ktable_pl);
286	UMA_DESTROY(pfr_kentry_pl2);
287	UMA_DESTROY(pfr_kentry_pl);
288	UMA_DESTROY(pf_state_scrub_pl);
289	UMA_DESTROY(pfi_addr_pl);
290}
291
292int
293pfattach(void)
294{
295	u_int32_t *my_timeout = pf_default_rule.timeout;
296	int error = 1;
297
298	do {
299		UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl");
300		UMA_CREATE(pf_rule_pl,	  struct pf_rule, "pfrulepl");
301		UMA_CREATE(pf_state_pl,	  struct pf_state, "pfstatepl");
302		UMA_CREATE(pf_altq_pl,	  struct pf_altq, "pfaltqpl");
303		UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl");
304		UMA_CREATE(pfr_ktable_pl,  struct pfr_ktable, "pfrktable");
305		UMA_CREATE(pfr_kentry_pl,  struct pfr_kentry, "pfrkentry");
306		UMA_CREATE(pfr_kentry_pl2,  struct pfr_kentry, "pfrkentry2");
307		UMA_CREATE(pf_frent_pl,	  struct pf_frent, "pffrent");
308		UMA_CREATE(pf_frag_pl,	  struct pf_fragment, "pffrag");
309		UMA_CREATE(pf_cache_pl,	  struct pf_fragment, "pffrcache");
310		UMA_CREATE(pf_cent_pl,	  struct pf_frcache, "pffrcent");
311		UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub,
312		    "pfstatescrub");
313		UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl");
314		error = 0;
315	} while(0);
316	if (error) {
317		cleanup_pf_zone();
318		return (error);
319	}
320	pfr_initialize();
321	pfi_initialize();
322	if ( (error = pf_osfp_initialize()) ) {
323		cleanup_pf_zone();
324		pf_osfp_cleanup();
325		return (error);
326	}
327
328	pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
329	pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
330	pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl;
331	pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
332	pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
333	pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
334	pf_pool_limits[PF_LIMIT_TABLES].pp = pfr_ktable_pl;
335	pf_pool_limits[PF_LIMIT_TABLES].limit = PFR_KTABLE_HIWAT;
336	pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = pfr_kentry_pl;
337	pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
338	uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp,
339		pf_pool_limits[PF_LIMIT_STATES].limit);
340
341	RB_INIT(&tree_src_tracking);
342	RB_INIT(&pf_anchors);
343	pf_init_ruleset(&pf_main_ruleset);
344	TAILQ_INIT(&pf_altqs[0]);
345	TAILQ_INIT(&pf_altqs[1]);
346	TAILQ_INIT(&pf_pabuf);
347	pf_altqs_active = &pf_altqs[0];
348	pf_altqs_inactive = &pf_altqs[1];
349	TAILQ_INIT(&state_list);
350
351	/* default rule should never be garbage collected */
352	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
353	pf_default_rule.action = PF_PASS;
354	pf_default_rule.nr = -1;
355	pf_default_rule.rtableid = -1;
356
357	/* initialize default timeouts */
358	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
359	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
360	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
361	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
362	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
363	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
364	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
365	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
366	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
367	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
368	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
369	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
370	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
371	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
372	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
373	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
374	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
375	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
376	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
377	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
378
379	pf_normalize_init();
380	bzero(&pf_status, sizeof(pf_status));
381	pf_status.debug = PF_DEBUG_URGENT;
382
383	pf_pfil_hooked = 0;
384
385	/* XXX do our best to avoid a conflict */
386	pf_status.hostid = arc4random();
387
388	if (kproc_create(pf_purge_thread, NULL, NULL, 0, 0, "pfpurge"))
389		return (ENXIO);
390
391	return (error);
392}
393#else /* !__FreeBSD__ */
394void
395pfattach(int num)
396{
397	u_int32_t *timeout = pf_default_rule.timeout;
398
399	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
400	    &pool_allocator_nointr);
401	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
402	    "pfsrctrpl", NULL);
403	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
404	    NULL);
405	pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
406	    &pool_allocator_nointr);
407	pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
408	    "pfpooladdrpl", &pool_allocator_nointr);
409	pfr_initialize();
410	pfi_initialize();
411	pf_osfp_initialize();
412
413	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
414	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
415
416	if (ctob(physmem) <= 100*1024*1024)
417		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
418		    PFR_KENTRY_HIWAT_SMALL;
419
420	RB_INIT(&tree_src_tracking);
421	RB_INIT(&pf_anchors);
422	pf_init_ruleset(&pf_main_ruleset);
423	TAILQ_INIT(&pf_altqs[0]);
424	TAILQ_INIT(&pf_altqs[1]);
425	TAILQ_INIT(&pf_pabuf);
426	pf_altqs_active = &pf_altqs[0];
427	pf_altqs_inactive = &pf_altqs[1];
428	TAILQ_INIT(&state_list);
429
430	/* default rule should never be garbage collected */
431	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
432	pf_default_rule.action = PF_PASS;
433	pf_default_rule.nr = -1;
434	pf_default_rule.rtableid = -1;
435
436	/* initialize default timeouts */
437	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
438	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
439	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
440	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
441	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
442	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
443	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
444	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
445	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
446	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
447	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
448	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
449	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
450	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
451	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
452	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
453	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
454	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
455	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
456	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
457
458	pf_normalize_init();
459	bzero(&pf_status, sizeof(pf_status));
460	pf_status.debug = PF_DEBUG_URGENT;
461
462	/* XXX do our best to avoid a conflict */
463	pf_status.hostid = arc4random();
464
465	/* require process context to purge states, so perform in a thread */
466	kproc_create_deferred(pf_thread_create, NULL);
467}
468
469void
470pf_thread_create(void *v)
471{
472	if (kproc_create(pf_purge_thread, NULL, NULL, "pfpurge"))
473		panic("pfpurge thread");
474}
475
476int
477pfopen(struct cdev *dev, int flags, int fmt, struct proc *p)
478{
479	if (dev2unit(dev) >= 1)
480		return (ENXIO);
481	return (0);
482}
483
484int
485pfclose(struct cdev *dev, int flags, int fmt, struct proc *p)
486{
487	if (dev2unit(dev) >= 1)
488		return (ENXIO);
489	return (0);
490}
491#endif /* __FreeBSD__ */
492
493struct pf_pool *
494pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
495    u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
496    u_int8_t check_ticket)
497{
498	struct pf_ruleset	*ruleset;
499	struct pf_rule		*rule;
500	int			 rs_num;
501
502	ruleset = pf_find_ruleset(anchor);
503	if (ruleset == NULL)
504		return (NULL);
505	rs_num = pf_get_ruleset_number(rule_action);
506	if (rs_num >= PF_RULESET_MAX)
507		return (NULL);
508	if (active) {
509		if (check_ticket && ticket !=
510		    ruleset->rules[rs_num].active.ticket)
511			return (NULL);
512		if (r_last)
513			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
514			    pf_rulequeue);
515		else
516			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
517	} else {
518		if (check_ticket && ticket !=
519		    ruleset->rules[rs_num].inactive.ticket)
520			return (NULL);
521		if (r_last)
522			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
523			    pf_rulequeue);
524		else
525			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
526	}
527	if (!r_last) {
528		while ((rule != NULL) && (rule->nr != rule_number))
529			rule = TAILQ_NEXT(rule, entries);
530	}
531	if (rule == NULL)
532		return (NULL);
533
534	return (&rule->rpool);
535}
536
537void
538pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
539{
540	struct pf_pooladdr	*mv_pool_pa;
541
542	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
543		TAILQ_REMOVE(poola, mv_pool_pa, entries);
544		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
545	}
546}
547
548void
549pf_empty_pool(struct pf_palist *poola)
550{
551	struct pf_pooladdr	*empty_pool_pa;
552
553	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
554		pfi_dynaddr_remove(&empty_pool_pa->addr);
555		pf_tbladdr_remove(&empty_pool_pa->addr);
556		pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
557		TAILQ_REMOVE(poola, empty_pool_pa, entries);
558		pool_put(&pf_pooladdr_pl, empty_pool_pa);
559	}
560}
561
562void
563pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
564{
565	if (rulequeue != NULL) {
566		if (rule->states <= 0) {
567			/*
568			 * XXX - we need to remove the table *before* detaching
569			 * the rule to make sure the table code does not delete
570			 * the anchor under our feet.
571			 */
572			pf_tbladdr_remove(&rule->src.addr);
573			pf_tbladdr_remove(&rule->dst.addr);
574			if (rule->overload_tbl)
575				pfr_detach_table(rule->overload_tbl);
576		}
577		TAILQ_REMOVE(rulequeue, rule, entries);
578		rule->entries.tqe_prev = NULL;
579		rule->nr = -1;
580	}
581
582	if (rule->states > 0 || rule->src_nodes > 0 ||
583	    rule->entries.tqe_prev != NULL)
584		return;
585	pf_tag_unref(rule->tag);
586	pf_tag_unref(rule->match_tag);
587#ifdef ALTQ
588	if (rule->pqid != rule->qid)
589		pf_qid_unref(rule->pqid);
590	pf_qid_unref(rule->qid);
591#endif
592	pf_rtlabel_remove(&rule->src.addr);
593	pf_rtlabel_remove(&rule->dst.addr);
594	pfi_dynaddr_remove(&rule->src.addr);
595	pfi_dynaddr_remove(&rule->dst.addr);
596	if (rulequeue == NULL) {
597		pf_tbladdr_remove(&rule->src.addr);
598		pf_tbladdr_remove(&rule->dst.addr);
599		if (rule->overload_tbl)
600			pfr_detach_table(rule->overload_tbl);
601	}
602	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
603	pf_anchor_remove(rule);
604	pf_empty_pool(&rule->rpool.list);
605	pool_put(&pf_rule_pl, rule);
606}
607
608u_int16_t
609tagname2tag(struct pf_tags *head, char *tagname)
610{
611	struct pf_tagname	*tag, *p = NULL;
612	u_int16_t		 new_tagid = 1;
613
614	TAILQ_FOREACH(tag, head, entries)
615		if (strcmp(tagname, tag->name) == 0) {
616			tag->ref++;
617			return (tag->tag);
618		}
619
620	/*
621	 * to avoid fragmentation, we do a linear search from the beginning
622	 * and take the first free slot we find. if there is none or the list
623	 * is empty, append a new entry at the end.
624	 */
625
626	/* new entry */
627	if (!TAILQ_EMPTY(head))
628		for (p = TAILQ_FIRST(head); p != NULL &&
629		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
630			new_tagid = p->tag + 1;
631
632	if (new_tagid > TAGID_MAX)
633		return (0);
634
635	/* allocate and fill new struct pf_tagname */
636	tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
637	    M_TEMP, M_NOWAIT);
638	if (tag == NULL)
639		return (0);
640	bzero(tag, sizeof(struct pf_tagname));
641	strlcpy(tag->name, tagname, sizeof(tag->name));
642	tag->tag = new_tagid;
643	tag->ref++;
644
645	if (p != NULL)	/* insert new entry before p */
646		TAILQ_INSERT_BEFORE(p, tag, entries);
647	else	/* either list empty or no free slot in between */
648		TAILQ_INSERT_TAIL(head, tag, entries);
649
650	return (tag->tag);
651}
652
653void
654tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
655{
656	struct pf_tagname	*tag;
657
658	TAILQ_FOREACH(tag, head, entries)
659		if (tag->tag == tagid) {
660			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
661			return;
662		}
663}
664
665void
666tag_unref(struct pf_tags *head, u_int16_t tag)
667{
668	struct pf_tagname	*p, *next;
669
670	if (tag == 0)
671		return;
672
673	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
674		next = TAILQ_NEXT(p, entries);
675		if (tag == p->tag) {
676			if (--p->ref == 0) {
677				TAILQ_REMOVE(head, p, entries);
678				free(p, M_TEMP);
679			}
680			break;
681		}
682	}
683}
684
685u_int16_t
686pf_tagname2tag(char *tagname)
687{
688	return (tagname2tag(&pf_tags, tagname));
689}
690
691void
692pf_tag2tagname(u_int16_t tagid, char *p)
693{
694	tag2tagname(&pf_tags, tagid, p);
695}
696
697void
698pf_tag_ref(u_int16_t tag)
699{
700	struct pf_tagname *t;
701
702	TAILQ_FOREACH(t, &pf_tags, entries)
703		if (t->tag == tag)
704			break;
705	if (t != NULL)
706		t->ref++;
707}
708
709void
710pf_tag_unref(u_int16_t tag)
711{
712	tag_unref(&pf_tags, tag);
713}
714
715int
716pf_rtlabel_add(struct pf_addr_wrap *a)
717{
718#ifdef __FreeBSD__
719	/* XXX_IMPORT: later */
720	return (0);
721#else
722	if (a->type == PF_ADDR_RTLABEL &&
723	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
724		return (-1);
725	return (0);
726#endif
727}
728
729void
730pf_rtlabel_remove(struct pf_addr_wrap *a)
731{
732#ifdef __FreeBSD__
733	/* XXX_IMPORT: later */
734#else
735	if (a->type == PF_ADDR_RTLABEL)
736		rtlabel_unref(a->v.rtlabel);
737#endif
738}
739
740void
741pf_rtlabel_copyout(struct pf_addr_wrap *a)
742{
743#ifdef __FreeBSD__
744	/* XXX_IMPORT: later */
745	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
746		strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
747#else
748	const char	*name;
749
750	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
751		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
752			strlcpy(a->v.rtlabelname, "?",
753			    sizeof(a->v.rtlabelname));
754		else
755			strlcpy(a->v.rtlabelname, name,
756			    sizeof(a->v.rtlabelname));
757	}
758#endif
759}
760
761#ifdef ALTQ
762u_int32_t
763pf_qname2qid(char *qname)
764{
765	return ((u_int32_t)tagname2tag(&pf_qids, qname));
766}
767
768void
769pf_qid2qname(u_int32_t qid, char *p)
770{
771	tag2tagname(&pf_qids, (u_int16_t)qid, p);
772}
773
774void
775pf_qid_unref(u_int32_t qid)
776{
777	tag_unref(&pf_qids, (u_int16_t)qid);
778}
779
780int
781pf_begin_altq(u_int32_t *ticket)
782{
783	struct pf_altq	*altq;
784	int		 error = 0;
785
786	/* Purge the old altq list */
787	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
788		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
789#ifdef __FreeBSD__
790		if (altq->qname[0] == 0 &&
791		    (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
792#else
793		if (altq->qname[0] == 0) {
794#endif
795			/* detach and destroy the discipline */
796			error = altq_remove(altq);
797		} else
798			pf_qid_unref(altq->qid);
799		pool_put(&pf_altq_pl, altq);
800	}
801	if (error)
802		return (error);
803	*ticket = ++ticket_altqs_inactive;
804	altqs_inactive_open = 1;
805	return (0);
806}
807
808int
809pf_rollback_altq(u_int32_t ticket)
810{
811	struct pf_altq	*altq;
812	int		 error = 0;
813
814	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
815		return (0);
816	/* Purge the old altq list */
817	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
818		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
819#ifdef __FreeBSD__
820		if (altq->qname[0] == 0 &&
821		    (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
822#else
823		if (altq->qname[0] == 0) {
824#endif
825			/* detach and destroy the discipline */
826			error = altq_remove(altq);
827		} else
828			pf_qid_unref(altq->qid);
829		pool_put(&pf_altq_pl, altq);
830	}
831	altqs_inactive_open = 0;
832	return (error);
833}
834
835int
836pf_commit_altq(u_int32_t ticket)
837{
838	struct pf_altqqueue	*old_altqs;
839	struct pf_altq		*altq;
840	int			 s, err, error = 0;
841
842	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
843		return (EBUSY);
844
845	/* swap altqs, keep the old. */
846	s = splsoftnet();
847	old_altqs = pf_altqs_active;
848	pf_altqs_active = pf_altqs_inactive;
849	pf_altqs_inactive = old_altqs;
850	ticket_altqs_active = ticket_altqs_inactive;
851
852	/* Attach new disciplines */
853	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
854#ifdef __FreeBSD__
855		if (altq->qname[0] == 0 &&
856		    (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
857#else
858		if (altq->qname[0] == 0) {
859#endif
860			/* attach the discipline */
861			error = altq_pfattach(altq);
862			if (error == 0 && pf_altq_running)
863				error = pf_enable_altq(altq);
864			if (error != 0) {
865				splx(s);
866				return (error);
867			}
868		}
869	}
870
871	/* Purge the old altq list */
872	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
873		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
874#ifdef __FreeBSD__
875		if (altq->qname[0] == 0 &&
876		    (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
877#else
878		if (altq->qname[0] == 0) {
879#endif
880			/* detach and destroy the discipline */
881			if (pf_altq_running)
882				error = pf_disable_altq(altq);
883			err = altq_pfdetach(altq);
884			if (err != 0 && error == 0)
885				error = err;
886			err = altq_remove(altq);
887			if (err != 0 && error == 0)
888				error = err;
889		} else
890			pf_qid_unref(altq->qid);
891		pool_put(&pf_altq_pl, altq);
892	}
893	splx(s);
894
895	altqs_inactive_open = 0;
896	return (error);
897}
898
899int
900pf_enable_altq(struct pf_altq *altq)
901{
902	struct ifnet		*ifp;
903	struct tb_profile	 tb;
904	int			 s, error = 0;
905
906	if ((ifp = ifunit(altq->ifname)) == NULL)
907		return (EINVAL);
908
909	if (ifp->if_snd.altq_type != ALTQT_NONE)
910		error = altq_enable(&ifp->if_snd);
911
912	/* set tokenbucket regulator */
913	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
914		tb.rate = altq->ifbandwidth;
915		tb.depth = altq->tbrsize;
916		s = splnet();
917#ifdef __FreeBSD__
918		PF_UNLOCK();
919#endif
920		error = tbr_set(&ifp->if_snd, &tb);
921#ifdef __FreeBSD__
922		PF_LOCK();
923#endif
924		splx(s);
925	}
926
927	return (error);
928}
929
930int
931pf_disable_altq(struct pf_altq *altq)
932{
933	struct ifnet		*ifp;
934	struct tb_profile	 tb;
935	int			 s, error;
936
937	if ((ifp = ifunit(altq->ifname)) == NULL)
938		return (EINVAL);
939
940	/*
941	 * when the discipline is no longer referenced, it was overridden
942	 * by a new one.  if so, just return.
943	 */
944	if (altq->altq_disc != ifp->if_snd.altq_disc)
945		return (0);
946
947	error = altq_disable(&ifp->if_snd);
948
949	if (error == 0) {
950		/* clear tokenbucket regulator */
951		tb.rate = 0;
952		s = splnet();
953#ifdef __FreeBSD__
954		PF_UNLOCK();
955#endif
956		error = tbr_set(&ifp->if_snd, &tb);
957#ifdef __FreeBSD__
958		PF_LOCK();
959#endif
960		splx(s);
961	}
962
963	return (error);
964}
965
966#ifdef __FreeBSD__
967void
968pf_altq_ifnet_event(struct ifnet *ifp, int remove)
969{
970	struct ifnet		*ifp1;
971	struct pf_altq		*a1, *a2, *a3;
972	u_int32_t		 ticket;
973	int			 error = 0;
974
975	/* Interrupt userland queue modifications */
976	if (altqs_inactive_open)
977		pf_rollback_altq(ticket_altqs_inactive);
978
979	/* Start new altq ruleset */
980	if (pf_begin_altq(&ticket))
981		return;
982
983	/* Copy the current active set */
984	TAILQ_FOREACH(a1, pf_altqs_active, entries) {
985		a2 = pool_get(&pf_altq_pl, PR_NOWAIT);
986		if (a2 == NULL) {
987			error = ENOMEM;
988			break;
989		}
990		bcopy(a1, a2, sizeof(struct pf_altq));
991
992		if (a2->qname[0] != 0) {
993			if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
994				error = EBUSY;
995				pool_put(&pf_altq_pl, a2);
996				break;
997			}
998			a2->altq_disc = NULL;
999			TAILQ_FOREACH(a3, pf_altqs_inactive, entries) {
1000				if (strncmp(a3->ifname, a2->ifname,
1001				    IFNAMSIZ) == 0 && a3->qname[0] == 0) {
1002					a2->altq_disc = a3->altq_disc;
1003					break;
1004				}
1005			}
1006		}
1007		/* Deactivate the interface in question */
1008		a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1009		if ((ifp1 = ifunit(a2->ifname)) == NULL ||
1010		    (remove && ifp1 == ifp)) {
1011			a2->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1012		} else {
1013			PF_UNLOCK();
1014			error = altq_add(a2);
1015			PF_LOCK();
1016
1017			if (ticket != ticket_altqs_inactive)
1018				error = EBUSY;
1019
1020			if (error) {
1021				pool_put(&pf_altq_pl, a2);
1022				break;
1023			}
1024		}
1025
1026		TAILQ_INSERT_TAIL(pf_altqs_inactive, a2, entries);
1027	}
1028
1029	if (error != 0)
1030		pf_rollback_altq(ticket);
1031	else
1032		pf_commit_altq(ticket);
1033}
1034#endif
1035#endif /* ALTQ */
1036
1037int
1038pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1039{
1040	struct pf_ruleset	*rs;
1041	struct pf_rule		*rule;
1042
1043	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1044		return (EINVAL);
1045	rs = pf_find_or_create_ruleset(anchor);
1046	if (rs == NULL)
1047		return (EINVAL);
1048	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1049		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1050		rs->rules[rs_num].inactive.rcount--;
1051	}
1052	*ticket = ++rs->rules[rs_num].inactive.ticket;
1053	rs->rules[rs_num].inactive.open = 1;
1054	return (0);
1055}
1056
1057int
1058pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1059{
1060	struct pf_ruleset	*rs;
1061	struct pf_rule		*rule;
1062
1063	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1064		return (EINVAL);
1065	rs = pf_find_ruleset(anchor);
1066	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1067	    rs->rules[rs_num].inactive.ticket != ticket)
1068		return (0);
1069	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1070		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1071		rs->rules[rs_num].inactive.rcount--;
1072	}
1073	rs->rules[rs_num].inactive.open = 0;
1074	return (0);
1075}
1076
1077#define PF_MD5_UPD(st, elm)						\
1078		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1079
1080#define PF_MD5_UPD_STR(st, elm)						\
1081		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1082
1083#define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1084		(stor) = htonl((st)->elm);				\
1085		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1086} while (0)
1087
1088#define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1089		(stor) = htons((st)->elm);				\
1090		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1091} while (0)
1092
1093void
1094pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1095{
1096	PF_MD5_UPD(pfr, addr.type);
1097	switch (pfr->addr.type) {
1098		case PF_ADDR_DYNIFTL:
1099			PF_MD5_UPD(pfr, addr.v.ifname);
1100			PF_MD5_UPD(pfr, addr.iflags);
1101			break;
1102		case PF_ADDR_TABLE:
1103			PF_MD5_UPD(pfr, addr.v.tblname);
1104			break;
1105		case PF_ADDR_ADDRMASK:
1106			/* XXX ignore af? */
1107			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1108			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1109			break;
1110		case PF_ADDR_RTLABEL:
1111			PF_MD5_UPD(pfr, addr.v.rtlabelname);
1112			break;
1113	}
1114
1115	PF_MD5_UPD(pfr, port[0]);
1116	PF_MD5_UPD(pfr, port[1]);
1117	PF_MD5_UPD(pfr, neg);
1118	PF_MD5_UPD(pfr, port_op);
1119}
1120
1121void
1122pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1123{
1124	u_int16_t x;
1125	u_int32_t y;
1126
1127	pf_hash_rule_addr(ctx, &rule->src);
1128	pf_hash_rule_addr(ctx, &rule->dst);
1129	PF_MD5_UPD_STR(rule, label);
1130	PF_MD5_UPD_STR(rule, ifname);
1131	PF_MD5_UPD_STR(rule, match_tagname);
1132	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1133	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1134	PF_MD5_UPD_HTONL(rule, prob, y);
1135	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1136	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1137	PF_MD5_UPD(rule, uid.op);
1138	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1139	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1140	PF_MD5_UPD(rule, gid.op);
1141	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1142	PF_MD5_UPD(rule, action);
1143	PF_MD5_UPD(rule, direction);
1144	PF_MD5_UPD(rule, af);
1145	PF_MD5_UPD(rule, quick);
1146	PF_MD5_UPD(rule, ifnot);
1147	PF_MD5_UPD(rule, match_tag_not);
1148	PF_MD5_UPD(rule, natpass);
1149	PF_MD5_UPD(rule, keep_state);
1150	PF_MD5_UPD(rule, proto);
1151	PF_MD5_UPD(rule, type);
1152	PF_MD5_UPD(rule, code);
1153	PF_MD5_UPD(rule, flags);
1154	PF_MD5_UPD(rule, flagset);
1155	PF_MD5_UPD(rule, allow_opts);
1156	PF_MD5_UPD(rule, rt);
1157	PF_MD5_UPD(rule, tos);
1158}
1159
1160int
1161pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1162{
1163	struct pf_ruleset	*rs;
1164	struct pf_rule		*rule, **old_array;
1165	struct pf_rulequeue	*old_rules;
1166	int			 s, error;
1167	u_int32_t		 old_rcount;
1168
1169	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1170		return (EINVAL);
1171	rs = pf_find_ruleset(anchor);
1172	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1173	    ticket != rs->rules[rs_num].inactive.ticket)
1174		return (EBUSY);
1175
1176	/* Calculate checksum for the main ruleset */
1177	if (rs == &pf_main_ruleset) {
1178		error = pf_setup_pfsync_matching(rs);
1179		if (error != 0)
1180			return (error);
1181	}
1182
1183	/* Swap rules, keep the old. */
1184	s = splsoftnet();
1185	old_rules = rs->rules[rs_num].active.ptr;
1186	old_rcount = rs->rules[rs_num].active.rcount;
1187	old_array = rs->rules[rs_num].active.ptr_array;
1188
1189	rs->rules[rs_num].active.ptr =
1190	    rs->rules[rs_num].inactive.ptr;
1191	rs->rules[rs_num].active.ptr_array =
1192	    rs->rules[rs_num].inactive.ptr_array;
1193	rs->rules[rs_num].active.rcount =
1194	    rs->rules[rs_num].inactive.rcount;
1195	rs->rules[rs_num].inactive.ptr = old_rules;
1196	rs->rules[rs_num].inactive.ptr_array = old_array;
1197	rs->rules[rs_num].inactive.rcount = old_rcount;
1198
1199	rs->rules[rs_num].active.ticket =
1200	    rs->rules[rs_num].inactive.ticket;
1201	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1202
1203
1204	/* Purge the old rule list. */
1205	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1206		pf_rm_rule(old_rules, rule);
1207	if (rs->rules[rs_num].inactive.ptr_array)
1208		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1209	rs->rules[rs_num].inactive.ptr_array = NULL;
1210	rs->rules[rs_num].inactive.rcount = 0;
1211	rs->rules[rs_num].inactive.open = 0;
1212	pf_remove_if_empty_ruleset(rs);
1213	splx(s);
1214	return (0);
1215}
1216
1217int
1218pf_setup_pfsync_matching(struct pf_ruleset *rs)
1219{
1220	MD5_CTX			 ctx;
1221	struct pf_rule		*rule;
1222	int			 rs_cnt;
1223	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1224
1225	MD5Init(&ctx);
1226	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1227		/* XXX PF_RULESET_SCRUB as well? */
1228		if (rs_cnt == PF_RULESET_SCRUB)
1229			continue;
1230
1231		if (rs->rules[rs_cnt].inactive.ptr_array)
1232			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1233		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1234
1235		if (rs->rules[rs_cnt].inactive.rcount) {
1236			rs->rules[rs_cnt].inactive.ptr_array =
1237			    malloc(sizeof(caddr_t) *
1238			    rs->rules[rs_cnt].inactive.rcount,
1239			    M_TEMP, M_NOWAIT);
1240
1241			if (!rs->rules[rs_cnt].inactive.ptr_array)
1242				return (ENOMEM);
1243		}
1244
1245		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1246		    entries) {
1247			pf_hash_rule(&ctx, rule);
1248			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1249		}
1250	}
1251
1252	MD5Final(digest, &ctx);
1253	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1254	return (0);
1255}
1256
1257int
1258#ifdef __FreeBSD__
1259pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1260#else
1261pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1262#endif
1263{
1264	struct pf_pooladdr	*pa = NULL;
1265	struct pf_pool		*pool = NULL;
1266#ifndef __FreeBSD__
1267	int			 s;
1268#endif
1269	int			 error = 0;
1270
1271	/* XXX keep in sync with switch() below */
1272#ifdef __FreeBSD__
1273	if (securelevel_gt(td->td_ucred, 2))
1274#else
1275	if (securelevel > 1)
1276#endif
1277		switch (cmd) {
1278		case DIOCGETRULES:
1279		case DIOCGETRULE:
1280		case DIOCGETADDRS:
1281		case DIOCGETADDR:
1282		case DIOCGETSTATE:
1283		case DIOCSETSTATUSIF:
1284		case DIOCGETSTATUS:
1285		case DIOCCLRSTATUS:
1286		case DIOCNATLOOK:
1287		case DIOCSETDEBUG:
1288		case DIOCGETSTATES:
1289		case DIOCGETTIMEOUT:
1290		case DIOCCLRRULECTRS:
1291		case DIOCGETLIMIT:
1292		case DIOCGETALTQS:
1293		case DIOCGETALTQ:
1294		case DIOCGETQSTATS:
1295		case DIOCGETRULESETS:
1296		case DIOCGETRULESET:
1297		case DIOCRGETTABLES:
1298		case DIOCRGETTSTATS:
1299		case DIOCRCLRTSTATS:
1300		case DIOCRCLRADDRS:
1301		case DIOCRADDADDRS:
1302		case DIOCRDELADDRS:
1303		case DIOCRSETADDRS:
1304		case DIOCRGETADDRS:
1305		case DIOCRGETASTATS:
1306		case DIOCRCLRASTATS:
1307		case DIOCRTSTADDRS:
1308		case DIOCOSFPGET:
1309		case DIOCGETSRCNODES:
1310		case DIOCCLRSRCNODES:
1311		case DIOCIGETIFACES:
1312#ifdef __FreeBSD__
1313		case DIOCGIFSPEED:
1314#endif
1315		case DIOCSETIFFLAG:
1316		case DIOCCLRIFFLAG:
1317			break;
1318		case DIOCRCLRTABLES:
1319		case DIOCRADDTABLES:
1320		case DIOCRDELTABLES:
1321		case DIOCRSETTFLAGS:
1322			if (((struct pfioc_table *)addr)->pfrio_flags &
1323			    PFR_FLAG_DUMMY)
1324				break; /* dummy operation ok */
1325			return (EPERM);
1326		default:
1327			return (EPERM);
1328		}
1329
1330	if (!(flags & FWRITE))
1331		switch (cmd) {
1332		case DIOCGETRULES:
1333		case DIOCGETADDRS:
1334		case DIOCGETADDR:
1335		case DIOCGETSTATE:
1336		case DIOCGETSTATUS:
1337		case DIOCGETSTATES:
1338		case DIOCGETTIMEOUT:
1339		case DIOCGETLIMIT:
1340		case DIOCGETALTQS:
1341		case DIOCGETALTQ:
1342		case DIOCGETQSTATS:
1343		case DIOCGETRULESETS:
1344		case DIOCGETRULESET:
1345		case DIOCNATLOOK:
1346		case DIOCRGETTABLES:
1347		case DIOCRGETTSTATS:
1348		case DIOCRGETADDRS:
1349		case DIOCRGETASTATS:
1350		case DIOCRTSTADDRS:
1351		case DIOCOSFPGET:
1352		case DIOCGETSRCNODES:
1353		case DIOCIGETIFACES:
1354#ifdef __FreeBSD__
1355		case DIOCGIFSPEED:
1356#endif
1357			break;
1358		case DIOCRCLRTABLES:
1359		case DIOCRADDTABLES:
1360		case DIOCRDELTABLES:
1361		case DIOCRCLRTSTATS:
1362		case DIOCRCLRADDRS:
1363		case DIOCRADDADDRS:
1364		case DIOCRDELADDRS:
1365		case DIOCRSETADDRS:
1366		case DIOCRSETTFLAGS:
1367			if (((struct pfioc_table *)addr)->pfrio_flags &
1368			    PFR_FLAG_DUMMY) {
1369				flags |= FWRITE; /* need write lock for dummy */
1370				break; /* dummy operation ok */
1371			}
1372			return (EACCES);
1373		case DIOCGETRULE:
1374			if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR)
1375				return (EACCES);
1376			break;
1377		default:
1378			return (EACCES);
1379		}
1380
1381	if (flags & FWRITE)
1382#ifdef __FreeBSD__
1383		sx_xlock(&pf_consistency_lock);
1384	else
1385		sx_slock(&pf_consistency_lock);
1386#else
1387		rw_enter_write(&pf_consistency_lock);
1388	else
1389		rw_enter_read(&pf_consistency_lock);
1390#endif
1391
1392#ifdef __FreeBSD__
1393	PF_LOCK();
1394#else
1395	s = splsoftnet();
1396#endif
1397	switch (cmd) {
1398
1399	case DIOCSTART:
1400		if (pf_status.running)
1401			error = EEXIST;
1402		else {
1403#ifdef __FreeBSD__
1404			PF_UNLOCK();
1405			error = hook_pf();
1406			PF_LOCK();
1407			if (error) {
1408				DPFPRINTF(PF_DEBUG_MISC,
1409				    ("pf: pfil registeration fail\n"));
1410				break;
1411			}
1412#endif
1413			pf_status.running = 1;
1414			pf_status.since = time_second;
1415			if (pf_status.stateid == 0) {
1416				pf_status.stateid = time_second;
1417				pf_status.stateid = pf_status.stateid << 32;
1418			}
1419			DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1420		}
1421		break;
1422
1423	case DIOCSTOP:
1424		if (!pf_status.running)
1425			error = ENOENT;
1426		else {
1427			pf_status.running = 0;
1428#ifdef __FreeBSD__
1429			PF_UNLOCK();
1430			error = dehook_pf();
1431			PF_LOCK();
1432			if (error) {
1433				pf_status.running = 1;
1434				DPFPRINTF(PF_DEBUG_MISC,
1435					("pf: pfil unregisteration failed\n"));
1436			}
1437#endif
1438			pf_status.since = time_second;
1439			DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1440		}
1441		break;
1442
1443	case DIOCADDRULE: {
1444		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1445		struct pf_ruleset	*ruleset;
1446		struct pf_rule		*rule, *tail;
1447		struct pf_pooladdr	*pa;
1448		int			 rs_num;
1449
1450		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1451		ruleset = pf_find_ruleset(pr->anchor);
1452		if (ruleset == NULL) {
1453			error = EINVAL;
1454			break;
1455		}
1456		rs_num = pf_get_ruleset_number(pr->rule.action);
1457		if (rs_num >= PF_RULESET_MAX) {
1458			error = EINVAL;
1459			break;
1460		}
1461		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1462			error = EINVAL;
1463			break;
1464		}
1465		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1466#ifdef __FreeBSD__
1467			DPFPRINTF(PF_DEBUG_MISC,
1468			    ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
1469			    ruleset->rules[rs_num].inactive.ticket));
1470#endif
1471			error = EBUSY;
1472			break;
1473		}
1474		if (pr->pool_ticket != ticket_pabuf) {
1475#ifdef __FreeBSD__
1476			DPFPRINTF(PF_DEBUG_MISC,
1477			    ("pool_ticket: %d != %d\n", pr->pool_ticket,
1478			    ticket_pabuf));
1479#endif
1480			error = EBUSY;
1481			break;
1482		}
1483		rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1484		if (rule == NULL) {
1485			error = ENOMEM;
1486			break;
1487		}
1488		bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1489#ifdef __FreeBSD__
1490		rule->cuid = td->td_ucred->cr_ruid;
1491		rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1492#else
1493		rule->cuid = p->p_cred->p_ruid;
1494		rule->cpid = p->p_pid;
1495#endif
1496		rule->anchor = NULL;
1497		rule->kif = NULL;
1498		TAILQ_INIT(&rule->rpool.list);
1499		/* initialize refcounting */
1500		rule->states = 0;
1501		rule->src_nodes = 0;
1502		rule->entries.tqe_prev = NULL;
1503#ifndef INET
1504		if (rule->af == AF_INET) {
1505			pool_put(&pf_rule_pl, rule);
1506			error = EAFNOSUPPORT;
1507			break;
1508		}
1509#endif /* INET */
1510#ifndef INET6
1511		if (rule->af == AF_INET6) {
1512			pool_put(&pf_rule_pl, rule);
1513			error = EAFNOSUPPORT;
1514			break;
1515		}
1516#endif /* INET6 */
1517		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1518		    pf_rulequeue);
1519		if (tail)
1520			rule->nr = tail->nr + 1;
1521		else
1522			rule->nr = 0;
1523		if (rule->ifname[0]) {
1524			rule->kif = pfi_kif_get(rule->ifname);
1525			if (rule->kif == NULL) {
1526				pool_put(&pf_rule_pl, rule);
1527				error = EINVAL;
1528				break;
1529			}
1530			pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1531		}
1532
1533#ifdef __FreeBSD__ /* ROUTING */
1534		if (rule->rtableid > 0 && rule->rtableid > rt_numfibs)
1535#else
1536		if (rule->rtableid > 0 && !rtable_exists(rule->rtableid))
1537#endif
1538			error = EBUSY;
1539
1540#ifdef ALTQ
1541		/* set queue IDs */
1542		if (rule->qname[0] != 0) {
1543			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1544				error = EBUSY;
1545			else if (rule->pqname[0] != 0) {
1546				if ((rule->pqid =
1547				    pf_qname2qid(rule->pqname)) == 0)
1548					error = EBUSY;
1549			} else
1550				rule->pqid = rule->qid;
1551		}
1552#endif
1553		if (rule->tagname[0])
1554			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1555				error = EBUSY;
1556		if (rule->match_tagname[0])
1557			if ((rule->match_tag =
1558			    pf_tagname2tag(rule->match_tagname)) == 0)
1559				error = EBUSY;
1560		if (rule->rt && !rule->direction)
1561			error = EINVAL;
1562#if NPFLOG > 0
1563#ifdef __FreeBSD__
1564		if (!rule->log)
1565			rule->logif = 0;
1566#endif
1567		if (rule->logif >= PFLOGIFS_MAX)
1568			error = EINVAL;
1569#endif
1570		if (pf_rtlabel_add(&rule->src.addr) ||
1571		    pf_rtlabel_add(&rule->dst.addr))
1572			error = EBUSY;
1573		if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1574			error = EINVAL;
1575		if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1576			error = EINVAL;
1577		if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1578			error = EINVAL;
1579		if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1580			error = EINVAL;
1581		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1582			error = EINVAL;
1583		TAILQ_FOREACH(pa, &pf_pabuf, entries)
1584			if (pf_tbladdr_setup(ruleset, &pa->addr))
1585				error = EINVAL;
1586
1587		if (rule->overload_tblname[0]) {
1588			if ((rule->overload_tbl = pfr_attach_table(ruleset,
1589			    rule->overload_tblname)) == NULL)
1590				error = EINVAL;
1591			else
1592				rule->overload_tbl->pfrkt_flags |=
1593				    PFR_TFLAG_ACTIVE;
1594		}
1595
1596		pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1597		if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1598		    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1599		    (rule->rt > PF_FASTROUTE)) &&
1600		    (TAILQ_FIRST(&rule->rpool.list) == NULL))
1601			error = EINVAL;
1602
1603		if (error) {
1604			pf_rm_rule(NULL, rule);
1605			break;
1606		}
1607
1608#ifdef __FreeBSD__
1609		if (!debug_pfugidhack && (rule->uid.op || rule->gid.op ||
1610		    rule->log & PF_LOG_SOCKET_LOOKUP)) {
1611			DPFPRINTF(PF_DEBUG_MISC,
1612			    ("pf: debug.pfugidhack enabled\n"));
1613			debug_pfugidhack = 1;
1614		}
1615#endif
1616
1617		rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1618		rule->evaluations = rule->packets[0] = rule->packets[1] =
1619		    rule->bytes[0] = rule->bytes[1] = 0;
1620		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1621		    rule, entries);
1622		ruleset->rules[rs_num].inactive.rcount++;
1623		break;
1624	}
1625
1626	case DIOCGETRULES: {
1627		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1628		struct pf_ruleset	*ruleset;
1629		struct pf_rule		*tail;
1630		int			 rs_num;
1631
1632		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1633		ruleset = pf_find_ruleset(pr->anchor);
1634		if (ruleset == NULL) {
1635			error = EINVAL;
1636			break;
1637		}
1638		rs_num = pf_get_ruleset_number(pr->rule.action);
1639		if (rs_num >= PF_RULESET_MAX) {
1640			error = EINVAL;
1641			break;
1642		}
1643		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1644		    pf_rulequeue);
1645		if (tail)
1646			pr->nr = tail->nr + 1;
1647		else
1648			pr->nr = 0;
1649		pr->ticket = ruleset->rules[rs_num].active.ticket;
1650		break;
1651	}
1652
1653	case DIOCGETRULE: {
1654		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1655		struct pf_ruleset	*ruleset;
1656		struct pf_rule		*rule;
1657		int			 rs_num, i;
1658
1659		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1660		ruleset = pf_find_ruleset(pr->anchor);
1661		if (ruleset == NULL) {
1662			error = EINVAL;
1663			break;
1664		}
1665		rs_num = pf_get_ruleset_number(pr->rule.action);
1666		if (rs_num >= PF_RULESET_MAX) {
1667			error = EINVAL;
1668			break;
1669		}
1670		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1671			error = EBUSY;
1672			break;
1673		}
1674		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1675		while ((rule != NULL) && (rule->nr != pr->nr))
1676			rule = TAILQ_NEXT(rule, entries);
1677		if (rule == NULL) {
1678			error = EBUSY;
1679			break;
1680		}
1681		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1682		if (pf_anchor_copyout(ruleset, rule, pr)) {
1683			error = EBUSY;
1684			break;
1685		}
1686		pfi_dynaddr_copyout(&pr->rule.src.addr);
1687		pfi_dynaddr_copyout(&pr->rule.dst.addr);
1688		pf_tbladdr_copyout(&pr->rule.src.addr);
1689		pf_tbladdr_copyout(&pr->rule.dst.addr);
1690		pf_rtlabel_copyout(&pr->rule.src.addr);
1691		pf_rtlabel_copyout(&pr->rule.dst.addr);
1692		for (i = 0; i < PF_SKIP_COUNT; ++i)
1693			if (rule->skip[i].ptr == NULL)
1694				pr->rule.skip[i].nr = -1;
1695			else
1696				pr->rule.skip[i].nr =
1697				    rule->skip[i].ptr->nr;
1698
1699		if (pr->action == PF_GET_CLR_CNTR) {
1700			rule->evaluations = 0;
1701			rule->packets[0] = rule->packets[1] = 0;
1702			rule->bytes[0] = rule->bytes[1] = 0;
1703		}
1704		break;
1705	}
1706
1707	case DIOCCHANGERULE: {
1708		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1709		struct pf_ruleset	*ruleset;
1710		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1711		u_int32_t		 nr = 0;
1712		int			 rs_num;
1713
1714		if (!(pcr->action == PF_CHANGE_REMOVE ||
1715		    pcr->action == PF_CHANGE_GET_TICKET) &&
1716		    pcr->pool_ticket != ticket_pabuf) {
1717			error = EBUSY;
1718			break;
1719		}
1720
1721		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1722		    pcr->action > PF_CHANGE_GET_TICKET) {
1723			error = EINVAL;
1724			break;
1725		}
1726		ruleset = pf_find_ruleset(pcr->anchor);
1727		if (ruleset == NULL) {
1728			error = EINVAL;
1729			break;
1730		}
1731		rs_num = pf_get_ruleset_number(pcr->rule.action);
1732		if (rs_num >= PF_RULESET_MAX) {
1733			error = EINVAL;
1734			break;
1735		}
1736
1737		if (pcr->action == PF_CHANGE_GET_TICKET) {
1738			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1739			break;
1740		} else {
1741			if (pcr->ticket !=
1742			    ruleset->rules[rs_num].active.ticket) {
1743				error = EINVAL;
1744				break;
1745			}
1746			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1747				error = EINVAL;
1748				break;
1749			}
1750		}
1751
1752		if (pcr->action != PF_CHANGE_REMOVE) {
1753			newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1754			if (newrule == NULL) {
1755				error = ENOMEM;
1756				break;
1757			}
1758			bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1759#ifdef __FreeBSD__
1760			newrule->cuid = td->td_ucred->cr_ruid;
1761			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1762#else
1763			newrule->cuid = p->p_cred->p_ruid;
1764			newrule->cpid = p->p_pid;
1765#endif
1766			TAILQ_INIT(&newrule->rpool.list);
1767			/* initialize refcounting */
1768			newrule->states = 0;
1769			newrule->entries.tqe_prev = NULL;
1770#ifndef INET
1771			if (newrule->af == AF_INET) {
1772				pool_put(&pf_rule_pl, newrule);
1773				error = EAFNOSUPPORT;
1774				break;
1775			}
1776#endif /* INET */
1777#ifndef INET6
1778			if (newrule->af == AF_INET6) {
1779				pool_put(&pf_rule_pl, newrule);
1780				error = EAFNOSUPPORT;
1781				break;
1782			}
1783#endif /* INET6 */
1784			if (newrule->ifname[0]) {
1785				newrule->kif = pfi_kif_get(newrule->ifname);
1786				if (newrule->kif == NULL) {
1787					pool_put(&pf_rule_pl, newrule);
1788					error = EINVAL;
1789					break;
1790				}
1791				pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1792			} else
1793				newrule->kif = NULL;
1794
1795			if (newrule->rtableid > 0 &&
1796#ifdef __FreeBSD__ /* ROUTING */
1797			    newrule->rtableid > rt_numfibs)
1798#else
1799			    !rtable_exists(newrule->rtableid))
1800#endif
1801				error = EBUSY;
1802
1803#ifdef ALTQ
1804			/* set queue IDs */
1805			if (newrule->qname[0] != 0) {
1806				if ((newrule->qid =
1807				    pf_qname2qid(newrule->qname)) == 0)
1808					error = EBUSY;
1809				else if (newrule->pqname[0] != 0) {
1810					if ((newrule->pqid =
1811					    pf_qname2qid(newrule->pqname)) == 0)
1812						error = EBUSY;
1813				} else
1814					newrule->pqid = newrule->qid;
1815			}
1816#endif /* ALTQ */
1817			if (newrule->tagname[0])
1818				if ((newrule->tag =
1819				    pf_tagname2tag(newrule->tagname)) == 0)
1820					error = EBUSY;
1821			if (newrule->match_tagname[0])
1822				if ((newrule->match_tag = pf_tagname2tag(
1823				    newrule->match_tagname)) == 0)
1824					error = EBUSY;
1825			if (newrule->rt && !newrule->direction)
1826				error = EINVAL;
1827#ifdef __FreeBSD__
1828#if NPFLOG > 0
1829			if (!newrule->log)
1830				newrule->logif = 0;
1831			if (newrule->logif >= PFLOGIFS_MAX)
1832				error = EINVAL;
1833#endif
1834#endif
1835			if (pf_rtlabel_add(&newrule->src.addr) ||
1836			    pf_rtlabel_add(&newrule->dst.addr))
1837				error = EBUSY;
1838			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1839				error = EINVAL;
1840			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1841				error = EINVAL;
1842			if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1843				error = EINVAL;
1844			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1845				error = EINVAL;
1846			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1847				error = EINVAL;
1848			TAILQ_FOREACH(pa, &pf_pabuf, entries)
1849				if (pf_tbladdr_setup(ruleset, &pa->addr))
1850					error = EINVAL;
1851
1852			if (newrule->overload_tblname[0]) {
1853				if ((newrule->overload_tbl = pfr_attach_table(
1854				    ruleset, newrule->overload_tblname)) ==
1855				    NULL)
1856					error = EINVAL;
1857				else
1858					newrule->overload_tbl->pfrkt_flags |=
1859					    PFR_TFLAG_ACTIVE;
1860			}
1861
1862			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1863			if (((((newrule->action == PF_NAT) ||
1864			    (newrule->action == PF_RDR) ||
1865			    (newrule->action == PF_BINAT) ||
1866			    (newrule->rt > PF_FASTROUTE)) &&
1867			    !newrule->anchor)) &&
1868			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1869				error = EINVAL;
1870
1871			if (error) {
1872				pf_rm_rule(NULL, newrule);
1873				break;
1874			}
1875
1876#ifdef __FreeBSD__
1877			if (!debug_pfugidhack && (newrule->uid.op ||
1878			    newrule->gid.op ||
1879			    newrule->log & PF_LOG_SOCKET_LOOKUP)) {
1880				DPFPRINTF(PF_DEBUG_MISC,
1881				    ("pf: debug.pfugidhack enabled\n"));
1882				debug_pfugidhack = 1;
1883			}
1884#endif
1885
1886			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1887			newrule->evaluations = 0;
1888			newrule->packets[0] = newrule->packets[1] = 0;
1889			newrule->bytes[0] = newrule->bytes[1] = 0;
1890		}
1891		pf_empty_pool(&pf_pabuf);
1892
1893		if (pcr->action == PF_CHANGE_ADD_HEAD)
1894			oldrule = TAILQ_FIRST(
1895			    ruleset->rules[rs_num].active.ptr);
1896		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1897			oldrule = TAILQ_LAST(
1898			    ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1899		else {
1900			oldrule = TAILQ_FIRST(
1901			    ruleset->rules[rs_num].active.ptr);
1902			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1903				oldrule = TAILQ_NEXT(oldrule, entries);
1904			if (oldrule == NULL) {
1905				if (newrule != NULL)
1906					pf_rm_rule(NULL, newrule);
1907				error = EINVAL;
1908				break;
1909			}
1910		}
1911
1912		if (pcr->action == PF_CHANGE_REMOVE) {
1913			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1914			ruleset->rules[rs_num].active.rcount--;
1915		} else {
1916			if (oldrule == NULL)
1917				TAILQ_INSERT_TAIL(
1918				    ruleset->rules[rs_num].active.ptr,
1919				    newrule, entries);
1920			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1921			    pcr->action == PF_CHANGE_ADD_BEFORE)
1922				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1923			else
1924				TAILQ_INSERT_AFTER(
1925				    ruleset->rules[rs_num].active.ptr,
1926				    oldrule, newrule, entries);
1927			ruleset->rules[rs_num].active.rcount++;
1928		}
1929
1930		nr = 0;
1931		TAILQ_FOREACH(oldrule,
1932		    ruleset->rules[rs_num].active.ptr, entries)
1933			oldrule->nr = nr++;
1934
1935		ruleset->rules[rs_num].active.ticket++;
1936
1937		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1938		pf_remove_if_empty_ruleset(ruleset);
1939
1940		break;
1941	}
1942
1943	case DIOCCLRSTATES: {
1944		struct pf_state		*state, *nexts;
1945		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1946		int			 killed = 0;
1947
1948		for (state = RB_MIN(pf_state_tree_id, &tree_id); state;
1949		    state = nexts) {
1950			nexts = RB_NEXT(pf_state_tree_id, &tree_id, state);
1951
1952			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1953			    state->u.s.kif->pfik_name)) {
1954#if NPFSYNC
1955				/* don't send out individual delete messages */
1956				state->sync_flags = PFSTATE_NOSYNC;
1957#endif
1958				pf_unlink_state(state);
1959				killed++;
1960			}
1961		}
1962		psk->psk_af = killed;
1963#if NPFSYNC
1964		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1965#endif
1966		break;
1967	}
1968
1969	case DIOCKILLSTATES: {
1970		struct pf_state		*state, *nexts;
1971		struct pf_state_host	*src, *dst;
1972		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1973		int			 killed = 0;
1974
1975		for (state = RB_MIN(pf_state_tree_id, &tree_id); state;
1976		    state = nexts) {
1977			nexts = RB_NEXT(pf_state_tree_id, &tree_id, state);
1978
1979			if (state->direction == PF_OUT) {
1980				src = &state->lan;
1981				dst = &state->ext;
1982			} else {
1983				src = &state->ext;
1984				dst = &state->lan;
1985			}
1986			if ((!psk->psk_af || state->af == psk->psk_af)
1987			    && (!psk->psk_proto || psk->psk_proto ==
1988			    state->proto) &&
1989			    PF_MATCHA(psk->psk_src.neg,
1990			    &psk->psk_src.addr.v.a.addr,
1991			    &psk->psk_src.addr.v.a.mask,
1992			    &src->addr, state->af) &&
1993			    PF_MATCHA(psk->psk_dst.neg,
1994			    &psk->psk_dst.addr.v.a.addr,
1995			    &psk->psk_dst.addr.v.a.mask,
1996			    &dst->addr, state->af) &&
1997			    (psk->psk_src.port_op == 0 ||
1998			    pf_match_port(psk->psk_src.port_op,
1999			    psk->psk_src.port[0], psk->psk_src.port[1],
2000			    src->port)) &&
2001			    (psk->psk_dst.port_op == 0 ||
2002			    pf_match_port(psk->psk_dst.port_op,
2003			    psk->psk_dst.port[0], psk->psk_dst.port[1],
2004			    dst->port)) &&
2005			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
2006			    state->u.s.kif->pfik_name))) {
2007#if NPFSYNC > 0
2008				/* send immediate delete of state */
2009				pfsync_delete_state(state);
2010				state->sync_flags |= PFSTATE_NOSYNC;
2011#endif
2012				pf_unlink_state(state);
2013				killed++;
2014			}
2015		}
2016		psk->psk_af = killed;
2017		break;
2018	}
2019
2020	case DIOCADDSTATE: {
2021		struct pfioc_state	*ps = (struct pfioc_state *)addr;
2022		struct pf_state		*state;
2023		struct pfi_kif		*kif;
2024
2025		if (ps->state.timeout >= PFTM_MAX &&
2026		    ps->state.timeout != PFTM_UNTIL_PACKET) {
2027			error = EINVAL;
2028			break;
2029		}
2030		state = pool_get(&pf_state_pl, PR_NOWAIT);
2031		if (state == NULL) {
2032			error = ENOMEM;
2033			break;
2034		}
2035		kif = pfi_kif_get(ps->state.u.ifname);
2036		if (kif == NULL) {
2037			pool_put(&pf_state_pl, state);
2038			error = ENOENT;
2039			break;
2040		}
2041		bcopy(&ps->state, state, sizeof(struct pf_state));
2042		bzero(&state->u, sizeof(state->u));
2043		state->rule.ptr = &pf_default_rule;
2044		state->nat_rule.ptr = NULL;
2045		state->anchor.ptr = NULL;
2046		state->rt_kif = NULL;
2047		state->creation = time_second;
2048		state->pfsync_time = 0;
2049		state->packets[0] = state->packets[1] = 0;
2050		state->bytes[0] = state->bytes[1] = 0;
2051
2052		if (pf_insert_state(kif, state)) {
2053			pfi_kif_unref(kif, PFI_KIF_REF_NONE);
2054			pool_put(&pf_state_pl, state);
2055			error = ENOMEM;
2056		}
2057		break;
2058	}
2059
2060	case DIOCGETSTATE: {
2061		struct pfioc_state	*ps = (struct pfioc_state *)addr;
2062		struct pf_state		*state;
2063		u_int32_t		 nr;
2064		int			 secs;
2065
2066		nr = 0;
2067		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2068			if (nr >= ps->nr)
2069				break;
2070			nr++;
2071		}
2072		if (state == NULL) {
2073			error = EBUSY;
2074			break;
2075		}
2076		secs = time_second;
2077		bcopy(state, &ps->state, sizeof(ps->state));
2078		strlcpy(ps->state.u.ifname, state->u.s.kif->pfik_name,
2079		    sizeof(ps->state.u.ifname));
2080		ps->state.rule.nr = state->rule.ptr->nr;
2081		ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
2082		    -1 : state->nat_rule.ptr->nr;
2083		ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
2084		    -1 : state->anchor.ptr->nr;
2085		ps->state.creation = secs - ps->state.creation;
2086		ps->state.expire = pf_state_expires(state);
2087		if (ps->state.expire > secs)
2088			ps->state.expire -= secs;
2089		else
2090			ps->state.expire = 0;
2091		break;
2092	}
2093
2094	case DIOCGETSTATES: {
2095		struct pfioc_states	*ps = (struct pfioc_states *)addr;
2096		struct pf_state		*state;
2097		struct pf_state		*p, *pstore;
2098		u_int32_t		 nr = 0;
2099		int			 space = ps->ps_len;
2100
2101		if (space == 0) {
2102			nr = pf_status.states;
2103			ps->ps_len = sizeof(struct pf_state) * nr;
2104			break;
2105		}
2106
2107#ifdef __FreeBSD__
2108		PF_UNLOCK();
2109#endif
2110		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2111#ifdef __FreeBSD__
2112		PF_LOCK();
2113#endif
2114
2115		p = ps->ps_states;
2116
2117		state = TAILQ_FIRST(&state_list);
2118		while (state) {
2119			if (state->timeout != PFTM_UNLINKED) {
2120				int	secs = time_second;
2121
2122				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
2123					break;
2124
2125				bcopy(state, pstore, sizeof(*pstore));
2126				strlcpy(pstore->u.ifname,
2127				    state->u.s.kif->pfik_name,
2128				    sizeof(pstore->u.ifname));
2129				pstore->rule.nr = state->rule.ptr->nr;
2130				pstore->nat_rule.nr = (state->nat_rule.ptr ==
2131				    NULL) ? -1 : state->nat_rule.ptr->nr;
2132				pstore->anchor.nr = (state->anchor.ptr ==
2133				    NULL) ? -1 : state->anchor.ptr->nr;
2134				pstore->creation = secs - pstore->creation;
2135				pstore->expire = pf_state_expires(state);
2136				if (pstore->expire > secs)
2137					pstore->expire -= secs;
2138				else
2139					pstore->expire = 0;
2140#ifdef __FreeBSD__
2141				PF_COPYOUT(pstore, p, sizeof(*p), error);
2142#else
2143				error = copyout(pstore, p, sizeof(*p));
2144#endif
2145				if (error) {
2146					free(pstore, M_TEMP);
2147					goto fail;
2148				}
2149				p++;
2150				nr++;
2151			}
2152			state = TAILQ_NEXT(state, u.s.entry_list);
2153		}
2154
2155		ps->ps_len = sizeof(struct pf_state) * nr;
2156
2157		free(pstore, M_TEMP);
2158		break;
2159	}
2160
2161	case DIOCGETSTATUS: {
2162		struct pf_status *s = (struct pf_status *)addr;
2163		bcopy(&pf_status, s, sizeof(struct pf_status));
2164		pfi_fill_oldstatus(s);
2165		break;
2166	}
2167
2168	case DIOCSETSTATUSIF: {
2169		struct pfioc_if	*pi = (struct pfioc_if *)addr;
2170
2171		if (pi->ifname[0] == 0) {
2172			bzero(pf_status.ifname, IFNAMSIZ);
2173			break;
2174		}
2175		if (ifunit(pi->ifname) == NULL) {
2176			error = EINVAL;
2177			break;
2178		}
2179		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
2180		break;
2181	}
2182
2183	case DIOCCLRSTATUS: {
2184		bzero(pf_status.counters, sizeof(pf_status.counters));
2185		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
2186		bzero(pf_status.scounters, sizeof(pf_status.scounters));
2187		pf_status.since = time_second;
2188		if (*pf_status.ifname)
2189			pfi_clr_istats(pf_status.ifname);
2190		break;
2191	}
2192
2193	case DIOCNATLOOK: {
2194		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
2195		struct pf_state		*state;
2196		struct pf_state_cmp	 key;
2197		int			 m = 0, direction = pnl->direction;
2198
2199		key.af = pnl->af;
2200		key.proto = pnl->proto;
2201
2202		if (!pnl->proto ||
2203		    PF_AZERO(&pnl->saddr, pnl->af) ||
2204		    PF_AZERO(&pnl->daddr, pnl->af) ||
2205		    ((pnl->proto == IPPROTO_TCP ||
2206		    pnl->proto == IPPROTO_UDP) &&
2207		    (!pnl->dport || !pnl->sport)))
2208			error = EINVAL;
2209		else {
2210			/*
2211			 * userland gives us source and dest of connection,
2212			 * reverse the lookup so we ask for what happens with
2213			 * the return traffic, enabling us to find it in the
2214			 * state tree.
2215			 */
2216			if (direction == PF_IN) {
2217				PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
2218				key.ext.port = pnl->dport;
2219				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
2220				key.gwy.port = pnl->sport;
2221				state = pf_find_state_all(&key, PF_EXT_GWY, &m);
2222			} else {
2223				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
2224				key.lan.port = pnl->dport;
2225				PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
2226				key.ext.port = pnl->sport;
2227				state = pf_find_state_all(&key, PF_LAN_EXT, &m);
2228			}
2229			if (m > 1)
2230				error = E2BIG;	/* more than one state */
2231			else if (state != NULL) {
2232				if (direction == PF_IN) {
2233					PF_ACPY(&pnl->rsaddr, &state->lan.addr,
2234					    state->af);
2235					pnl->rsport = state->lan.port;
2236					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
2237					    pnl->af);
2238					pnl->rdport = pnl->dport;
2239				} else {
2240					PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
2241					    state->af);
2242					pnl->rdport = state->gwy.port;
2243					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
2244					    pnl->af);
2245					pnl->rsport = pnl->sport;
2246				}
2247			} else
2248				error = ENOENT;
2249		}
2250		break;
2251	}
2252
2253	case DIOCSETTIMEOUT: {
2254		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
2255		int		 old;
2256
2257		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2258		    pt->seconds < 0) {
2259			error = EINVAL;
2260			goto fail;
2261		}
2262		old = pf_default_rule.timeout[pt->timeout];
2263		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
2264			pt->seconds = 1;
2265		pf_default_rule.timeout[pt->timeout] = pt->seconds;
2266		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
2267			wakeup(pf_purge_thread);
2268		pt->seconds = old;
2269		break;
2270	}
2271
2272	case DIOCGETTIMEOUT: {
2273		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
2274
2275		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2276			error = EINVAL;
2277			goto fail;
2278		}
2279		pt->seconds = pf_default_rule.timeout[pt->timeout];
2280		break;
2281	}
2282
2283	case DIOCGETLIMIT: {
2284		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
2285
2286		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2287			error = EINVAL;
2288			goto fail;
2289		}
2290		pl->limit = pf_pool_limits[pl->index].limit;
2291		break;
2292	}
2293
2294	case DIOCSETLIMIT: {
2295		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
2296		int			 old_limit;
2297
2298		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2299		    pf_pool_limits[pl->index].pp == NULL) {
2300			error = EINVAL;
2301			goto fail;
2302		}
2303#ifdef __FreeBSD__
2304		uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit);
2305#else
2306		if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2307		    pl->limit, NULL, 0) != 0) {
2308			error = EBUSY;
2309			goto fail;
2310		}
2311#endif
2312		old_limit = pf_pool_limits[pl->index].limit;
2313		pf_pool_limits[pl->index].limit = pl->limit;
2314		pl->limit = old_limit;
2315		break;
2316	}
2317
2318	case DIOCSETDEBUG: {
2319		u_int32_t	*level = (u_int32_t *)addr;
2320
2321		pf_status.debug = *level;
2322		break;
2323	}
2324
2325	case DIOCCLRRULECTRS: {
2326		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2327		struct pf_ruleset	*ruleset = &pf_main_ruleset;
2328		struct pf_rule		*rule;
2329
2330		TAILQ_FOREACH(rule,
2331		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
2332			rule->evaluations = 0;
2333			rule->packets[0] = rule->packets[1] = 0;
2334			rule->bytes[0] = rule->bytes[1] = 0;
2335		}
2336		break;
2337	}
2338
2339#ifdef __FreeBSD__
2340	case DIOCGIFSPEED: {
2341		struct pf_ifspeed	*psp = (struct pf_ifspeed *)addr;
2342		struct pf_ifspeed	ps;
2343		struct ifnet		*ifp;
2344
2345		if (psp->ifname[0] != 0) {
2346			/* Can we completely trust user-land? */
2347			strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2348			ifp = ifunit(ps.ifname);
2349			if (ifp != NULL)
2350				psp->baudrate = ifp->if_baudrate;
2351			else
2352				error = EINVAL;
2353		} else
2354			error = EINVAL;
2355		break;
2356	}
2357#endif /* __FreeBSD__ */
2358
2359#ifdef ALTQ
2360	case DIOCSTARTALTQ: {
2361		struct pf_altq		*altq;
2362
2363		/* enable all altq interfaces on active list */
2364		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2365#ifdef __FreeBSD__
2366			if (altq->qname[0] == 0 && (altq->local_flags &
2367			    PFALTQ_FLAG_IF_REMOVED) == 0) {
2368#else
2369			if (altq->qname[0] == 0) {
2370#endif
2371				error = pf_enable_altq(altq);
2372				if (error != 0)
2373					break;
2374			}
2375		}
2376		if (error == 0)
2377			pf_altq_running = 1;
2378		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2379		break;
2380	}
2381
2382	case DIOCSTOPALTQ: {
2383		struct pf_altq		*altq;
2384
2385		/* disable all altq interfaces on active list */
2386		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2387#ifdef __FreeBSD__
2388			if (altq->qname[0] == 0 && (altq->local_flags &
2389			    PFALTQ_FLAG_IF_REMOVED) == 0) {
2390#else
2391			if (altq->qname[0] == 0) {
2392#endif
2393				error = pf_disable_altq(altq);
2394				if (error != 0)
2395					break;
2396			}
2397		}
2398		if (error == 0)
2399			pf_altq_running = 0;
2400		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2401		break;
2402	}
2403
2404	case DIOCADDALTQ: {
2405		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2406		struct pf_altq		*altq, *a;
2407
2408		if (pa->ticket != ticket_altqs_inactive) {
2409			error = EBUSY;
2410			break;
2411		}
2412		altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2413		if (altq == NULL) {
2414			error = ENOMEM;
2415			break;
2416		}
2417		bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2418#ifdef __FreeBSD__
2419		altq->local_flags = 0;
2420#endif
2421
2422		/*
2423		 * if this is for a queue, find the discipline and
2424		 * copy the necessary fields
2425		 */
2426		if (altq->qname[0] != 0) {
2427			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2428				error = EBUSY;
2429				pool_put(&pf_altq_pl, altq);
2430				break;
2431			}
2432			altq->altq_disc = NULL;
2433			TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2434				if (strncmp(a->ifname, altq->ifname,
2435				    IFNAMSIZ) == 0 && a->qname[0] == 0) {
2436					altq->altq_disc = a->altq_disc;
2437					break;
2438				}
2439			}
2440		}
2441
2442#ifdef __FreeBSD__
2443		struct ifnet *ifp;
2444
2445		if ((ifp = ifunit(altq->ifname)) == NULL) {
2446			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2447		} else {
2448			PF_UNLOCK();
2449#endif
2450		error = altq_add(altq);
2451#ifdef __FreeBSD__
2452			PF_LOCK();
2453		}
2454#endif
2455		if (error) {
2456			pool_put(&pf_altq_pl, altq);
2457			break;
2458		}
2459
2460		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2461		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2462		break;
2463	}
2464
2465	case DIOCGETALTQS: {
2466		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2467		struct pf_altq		*altq;
2468
2469		pa->nr = 0;
2470		TAILQ_FOREACH(altq, pf_altqs_active, entries)
2471			pa->nr++;
2472		pa->ticket = ticket_altqs_active;
2473		break;
2474	}
2475
2476	case DIOCGETALTQ: {
2477		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2478		struct pf_altq		*altq;
2479		u_int32_t		 nr;
2480
2481		if (pa->ticket != ticket_altqs_active) {
2482			error = EBUSY;
2483			break;
2484		}
2485		nr = 0;
2486		altq = TAILQ_FIRST(pf_altqs_active);
2487		while ((altq != NULL) && (nr < pa->nr)) {
2488			altq = TAILQ_NEXT(altq, entries);
2489			nr++;
2490		}
2491		if (altq == NULL) {
2492			error = EBUSY;
2493			break;
2494		}
2495		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2496		break;
2497	}
2498
2499	case DIOCCHANGEALTQ:
2500		/* CHANGEALTQ not supported yet! */
2501		error = ENODEV;
2502		break;
2503
2504	case DIOCGETQSTATS: {
2505		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
2506		struct pf_altq		*altq;
2507		u_int32_t		 nr;
2508		int			 nbytes;
2509
2510		if (pq->ticket != ticket_altqs_active) {
2511			error = EBUSY;
2512			break;
2513		}
2514		nbytes = pq->nbytes;
2515		nr = 0;
2516		altq = TAILQ_FIRST(pf_altqs_active);
2517		while ((altq != NULL) && (nr < pq->nr)) {
2518			altq = TAILQ_NEXT(altq, entries);
2519			nr++;
2520		}
2521		if (altq == NULL) {
2522			error = EBUSY;
2523			break;
2524		}
2525#ifdef __FreeBSD__
2526		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2527			error = ENXIO;
2528			break;
2529		}
2530		PF_UNLOCK();
2531#endif
2532		error = altq_getqstats(altq, pq->buf, &nbytes);
2533#ifdef __FreeBSD__
2534		PF_LOCK();
2535#endif
2536		if (error == 0) {
2537			pq->scheduler = altq->scheduler;
2538			pq->nbytes = nbytes;
2539		}
2540		break;
2541	}
2542#endif /* ALTQ */
2543
2544	case DIOCBEGINADDRS: {
2545		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2546
2547		pf_empty_pool(&pf_pabuf);
2548		pp->ticket = ++ticket_pabuf;
2549		break;
2550	}
2551
2552	case DIOCADDADDR: {
2553		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2554
2555		if (pp->ticket != ticket_pabuf) {
2556			error = EBUSY;
2557			break;
2558		}
2559#ifndef INET
2560		if (pp->af == AF_INET) {
2561			error = EAFNOSUPPORT;
2562			break;
2563		}
2564#endif /* INET */
2565#ifndef INET6
2566		if (pp->af == AF_INET6) {
2567			error = EAFNOSUPPORT;
2568			break;
2569		}
2570#endif /* INET6 */
2571		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2572		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2573		    pp->addr.addr.type != PF_ADDR_TABLE) {
2574			error = EINVAL;
2575			break;
2576		}
2577		pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2578		if (pa == NULL) {
2579			error = ENOMEM;
2580			break;
2581		}
2582		bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2583		if (pa->ifname[0]) {
2584			pa->kif = pfi_kif_get(pa->ifname);
2585			if (pa->kif == NULL) {
2586				pool_put(&pf_pooladdr_pl, pa);
2587				error = EINVAL;
2588				break;
2589			}
2590			pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2591		}
2592		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2593			pfi_dynaddr_remove(&pa->addr);
2594			pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2595			pool_put(&pf_pooladdr_pl, pa);
2596			error = EINVAL;
2597			break;
2598		}
2599		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2600		break;
2601	}
2602
2603	case DIOCGETADDRS: {
2604		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2605
2606		pp->nr = 0;
2607		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2608		    pp->r_num, 0, 1, 0);
2609		if (pool == NULL) {
2610			error = EBUSY;
2611			break;
2612		}
2613		TAILQ_FOREACH(pa, &pool->list, entries)
2614			pp->nr++;
2615		break;
2616	}
2617
2618	case DIOCGETADDR: {
2619		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2620		u_int32_t		 nr = 0;
2621
2622		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2623		    pp->r_num, 0, 1, 1);
2624		if (pool == NULL) {
2625			error = EBUSY;
2626			break;
2627		}
2628		pa = TAILQ_FIRST(&pool->list);
2629		while ((pa != NULL) && (nr < pp->nr)) {
2630			pa = TAILQ_NEXT(pa, entries);
2631			nr++;
2632		}
2633		if (pa == NULL) {
2634			error = EBUSY;
2635			break;
2636		}
2637		bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2638		pfi_dynaddr_copyout(&pp->addr.addr);
2639		pf_tbladdr_copyout(&pp->addr.addr);
2640		pf_rtlabel_copyout(&pp->addr.addr);
2641		break;
2642	}
2643
2644	case DIOCCHANGEADDR: {
2645		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
2646		struct pf_pooladdr	*oldpa = NULL, *newpa = NULL;
2647		struct pf_ruleset	*ruleset;
2648
2649		if (pca->action < PF_CHANGE_ADD_HEAD ||
2650		    pca->action > PF_CHANGE_REMOVE) {
2651			error = EINVAL;
2652			break;
2653		}
2654		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2655		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2656		    pca->addr.addr.type != PF_ADDR_TABLE) {
2657			error = EINVAL;
2658			break;
2659		}
2660
2661		ruleset = pf_find_ruleset(pca->anchor);
2662		if (ruleset == NULL) {
2663			error = EBUSY;
2664			break;
2665		}
2666		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2667		    pca->r_num, pca->r_last, 1, 1);
2668		if (pool == NULL) {
2669			error = EBUSY;
2670			break;
2671		}
2672		if (pca->action != PF_CHANGE_REMOVE) {
2673			newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2674			if (newpa == NULL) {
2675				error = ENOMEM;
2676				break;
2677			}
2678			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2679#ifndef INET
2680			if (pca->af == AF_INET) {
2681				pool_put(&pf_pooladdr_pl, newpa);
2682				error = EAFNOSUPPORT;
2683				break;
2684			}
2685#endif /* INET */
2686#ifndef INET6
2687			if (pca->af == AF_INET6) {
2688				pool_put(&pf_pooladdr_pl, newpa);
2689				error = EAFNOSUPPORT;
2690				break;
2691			}
2692#endif /* INET6 */
2693			if (newpa->ifname[0]) {
2694				newpa->kif = pfi_kif_get(newpa->ifname);
2695				if (newpa->kif == NULL) {
2696					pool_put(&pf_pooladdr_pl, newpa);
2697					error = EINVAL;
2698					break;
2699				}
2700				pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2701			} else
2702				newpa->kif = NULL;
2703			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2704			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
2705				pfi_dynaddr_remove(&newpa->addr);
2706				pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2707				pool_put(&pf_pooladdr_pl, newpa);
2708				error = EINVAL;
2709				break;
2710			}
2711		}
2712
2713		if (pca->action == PF_CHANGE_ADD_HEAD)
2714			oldpa = TAILQ_FIRST(&pool->list);
2715		else if (pca->action == PF_CHANGE_ADD_TAIL)
2716			oldpa = TAILQ_LAST(&pool->list, pf_palist);
2717		else {
2718			int	i = 0;
2719
2720			oldpa = TAILQ_FIRST(&pool->list);
2721			while ((oldpa != NULL) && (i < pca->nr)) {
2722				oldpa = TAILQ_NEXT(oldpa, entries);
2723				i++;
2724			}
2725			if (oldpa == NULL) {
2726				error = EINVAL;
2727				break;
2728			}
2729		}
2730
2731		if (pca->action == PF_CHANGE_REMOVE) {
2732			TAILQ_REMOVE(&pool->list, oldpa, entries);
2733			pfi_dynaddr_remove(&oldpa->addr);
2734			pf_tbladdr_remove(&oldpa->addr);
2735			pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2736			pool_put(&pf_pooladdr_pl, oldpa);
2737		} else {
2738			if (oldpa == NULL)
2739				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2740			else if (pca->action == PF_CHANGE_ADD_HEAD ||
2741			    pca->action == PF_CHANGE_ADD_BEFORE)
2742				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2743			else
2744				TAILQ_INSERT_AFTER(&pool->list, oldpa,
2745				    newpa, entries);
2746		}
2747
2748		pool->cur = TAILQ_FIRST(&pool->list);
2749		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2750		    pca->af);
2751		break;
2752	}
2753
2754	case DIOCGETRULESETS: {
2755		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2756		struct pf_ruleset	*ruleset;
2757		struct pf_anchor	*anchor;
2758
2759		pr->path[sizeof(pr->path) - 1] = 0;
2760		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2761			error = EINVAL;
2762			break;
2763		}
2764		pr->nr = 0;
2765		if (ruleset->anchor == NULL) {
2766			/* XXX kludge for pf_main_ruleset */
2767			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2768				if (anchor->parent == NULL)
2769					pr->nr++;
2770		} else {
2771			RB_FOREACH(anchor, pf_anchor_node,
2772			    &ruleset->anchor->children)
2773				pr->nr++;
2774		}
2775		break;
2776	}
2777
2778	case DIOCGETRULESET: {
2779		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2780		struct pf_ruleset	*ruleset;
2781		struct pf_anchor	*anchor;
2782		u_int32_t		 nr = 0;
2783
2784		pr->path[sizeof(pr->path) - 1] = 0;
2785		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2786			error = EINVAL;
2787			break;
2788		}
2789		pr->name[0] = 0;
2790		if (ruleset->anchor == NULL) {
2791			/* XXX kludge for pf_main_ruleset */
2792			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2793				if (anchor->parent == NULL && nr++ == pr->nr) {
2794					strlcpy(pr->name, anchor->name,
2795					    sizeof(pr->name));
2796					break;
2797				}
2798		} else {
2799			RB_FOREACH(anchor, pf_anchor_node,
2800			    &ruleset->anchor->children)
2801				if (nr++ == pr->nr) {
2802					strlcpy(pr->name, anchor->name,
2803					    sizeof(pr->name));
2804					break;
2805				}
2806		}
2807		if (!pr->name[0])
2808			error = EBUSY;
2809		break;
2810	}
2811
2812	case DIOCRCLRTABLES: {
2813		struct pfioc_table *io = (struct pfioc_table *)addr;
2814
2815		if (io->pfrio_esize != 0) {
2816			error = ENODEV;
2817			break;
2818		}
2819		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2820		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2821		break;
2822	}
2823
2824	case DIOCRADDTABLES: {
2825		struct pfioc_table *io = (struct pfioc_table *)addr;
2826
2827		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2828			error = ENODEV;
2829			break;
2830		}
2831		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2832		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2833		break;
2834	}
2835
2836	case DIOCRDELTABLES: {
2837		struct pfioc_table *io = (struct pfioc_table *)addr;
2838
2839		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2840			error = ENODEV;
2841			break;
2842		}
2843		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2844		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2845		break;
2846	}
2847
2848	case DIOCRGETTABLES: {
2849		struct pfioc_table *io = (struct pfioc_table *)addr;
2850
2851		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2852			error = ENODEV;
2853			break;
2854		}
2855		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2856		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2857		break;
2858	}
2859
2860	case DIOCRGETTSTATS: {
2861		struct pfioc_table *io = (struct pfioc_table *)addr;
2862
2863		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2864			error = ENODEV;
2865			break;
2866		}
2867		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2868		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2869		break;
2870	}
2871
2872	case DIOCRCLRTSTATS: {
2873		struct pfioc_table *io = (struct pfioc_table *)addr;
2874
2875		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2876			error = ENODEV;
2877			break;
2878		}
2879		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2880		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2881		break;
2882	}
2883
2884	case DIOCRSETTFLAGS: {
2885		struct pfioc_table *io = (struct pfioc_table *)addr;
2886
2887		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2888			error = ENODEV;
2889			break;
2890		}
2891		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2892		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2893		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2894		break;
2895	}
2896
2897	case DIOCRCLRADDRS: {
2898		struct pfioc_table *io = (struct pfioc_table *)addr;
2899
2900		if (io->pfrio_esize != 0) {
2901			error = ENODEV;
2902			break;
2903		}
2904		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2905		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2906		break;
2907	}
2908
2909	case DIOCRADDADDRS: {
2910		struct pfioc_table *io = (struct pfioc_table *)addr;
2911
2912		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2913			error = ENODEV;
2914			break;
2915		}
2916		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2917		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2918		    PFR_FLAG_USERIOCTL);
2919		break;
2920	}
2921
2922	case DIOCRDELADDRS: {
2923		struct pfioc_table *io = (struct pfioc_table *)addr;
2924
2925		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2926			error = ENODEV;
2927			break;
2928		}
2929		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2930		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2931		    PFR_FLAG_USERIOCTL);
2932		break;
2933	}
2934
2935	case DIOCRSETADDRS: {
2936		struct pfioc_table *io = (struct pfioc_table *)addr;
2937
2938		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2939			error = ENODEV;
2940			break;
2941		}
2942		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2943		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2944		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2945		    PFR_FLAG_USERIOCTL, 0);
2946		break;
2947	}
2948
2949	case DIOCRGETADDRS: {
2950		struct pfioc_table *io = (struct pfioc_table *)addr;
2951
2952		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2953			error = ENODEV;
2954			break;
2955		}
2956		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2957		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2958		break;
2959	}
2960
2961	case DIOCRGETASTATS: {
2962		struct pfioc_table *io = (struct pfioc_table *)addr;
2963
2964		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2965			error = ENODEV;
2966			break;
2967		}
2968		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2969		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2970		break;
2971	}
2972
2973	case DIOCRCLRASTATS: {
2974		struct pfioc_table *io = (struct pfioc_table *)addr;
2975
2976		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2977			error = ENODEV;
2978			break;
2979		}
2980		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2981		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2982		    PFR_FLAG_USERIOCTL);
2983		break;
2984	}
2985
2986	case DIOCRTSTADDRS: {
2987		struct pfioc_table *io = (struct pfioc_table *)addr;
2988
2989		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2990			error = ENODEV;
2991			break;
2992		}
2993		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2994		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2995		    PFR_FLAG_USERIOCTL);
2996		break;
2997	}
2998
2999	case DIOCRINADEFINE: {
3000		struct pfioc_table *io = (struct pfioc_table *)addr;
3001
3002		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3003			error = ENODEV;
3004			break;
3005		}
3006		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
3007		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
3008		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3009		break;
3010	}
3011
3012	case DIOCOSFPADD: {
3013		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3014		error = pf_osfp_add(io);
3015		break;
3016	}
3017
3018	case DIOCOSFPGET: {
3019		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3020		error = pf_osfp_get(io);
3021		break;
3022	}
3023
3024	case DIOCXBEGIN: {
3025		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
3026		struct pfioc_trans_e	*ioe;
3027		struct pfr_table	*table;
3028		int			 i;
3029
3030		if (io->esize != sizeof(*ioe)) {
3031			error = ENODEV;
3032			goto fail;
3033		}
3034#ifdef __FreeBSD__
3035		PF_UNLOCK();
3036#endif
3037		ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
3038		    M_TEMP, M_WAITOK);
3039		table = (struct pfr_table *)malloc(sizeof(*table),
3040		    M_TEMP, M_WAITOK);
3041#ifdef __FreeBSD__
3042		PF_LOCK();
3043#endif
3044		for (i = 0; i < io->size; i++) {
3045#ifdef __FreeBSD__
3046			PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
3047			if (error) {
3048#else
3049			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3050#endif
3051				free(table, M_TEMP);
3052				free(ioe, M_TEMP);
3053				error = EFAULT;
3054				goto fail;
3055			}
3056			switch (ioe->rs_num) {
3057#ifdef ALTQ
3058			case PF_RULESET_ALTQ:
3059				if (ioe->anchor[0]) {
3060					free(table, M_TEMP);
3061					free(ioe, M_TEMP);
3062					error = EINVAL;
3063					goto fail;
3064				}
3065				if ((error = pf_begin_altq(&ioe->ticket))) {
3066					free(table, M_TEMP);
3067					free(ioe, M_TEMP);
3068					goto fail;
3069				}
3070				break;
3071#endif /* ALTQ */
3072			case PF_RULESET_TABLE:
3073				bzero(table, sizeof(*table));
3074				strlcpy(table->pfrt_anchor, ioe->anchor,
3075				    sizeof(table->pfrt_anchor));
3076				if ((error = pfr_ina_begin(table,
3077				    &ioe->ticket, NULL, 0))) {
3078					free(table, M_TEMP);
3079					free(ioe, M_TEMP);
3080					goto fail;
3081				}
3082				break;
3083			default:
3084				if ((error = pf_begin_rules(&ioe->ticket,
3085				    ioe->rs_num, ioe->anchor))) {
3086					free(table, M_TEMP);
3087					free(ioe, M_TEMP);
3088					goto fail;
3089				}
3090				break;
3091			}
3092#ifdef __FreeBSD__
3093			PF_COPYOUT(ioe, io->array+i, sizeof(io->array[i]),
3094			    error);
3095			if (error) {
3096#else
3097			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
3098#endif
3099				free(table, M_TEMP);
3100				free(ioe, M_TEMP);
3101				error = EFAULT;
3102				goto fail;
3103			}
3104		}
3105		free(table, M_TEMP);
3106		free(ioe, M_TEMP);
3107		break;
3108	}
3109
3110	case DIOCXROLLBACK: {
3111		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
3112		struct pfioc_trans_e	*ioe;
3113		struct pfr_table	*table;
3114		int			 i;
3115
3116		if (io->esize != sizeof(*ioe)) {
3117			error = ENODEV;
3118			goto fail;
3119		}
3120#ifdef __FreeBSD__
3121		PF_UNLOCK();
3122#endif
3123		ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
3124		    M_TEMP, M_WAITOK);
3125		table = (struct pfr_table *)malloc(sizeof(*table),
3126		    M_TEMP, M_WAITOK);
3127#ifdef __FreeBSD__
3128		PF_LOCK();
3129#endif
3130		for (i = 0; i < io->size; i++) {
3131#ifdef __FreeBSD__
3132			PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
3133			if (error) {
3134#else
3135			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3136#endif
3137				free(table, M_TEMP);
3138				free(ioe, M_TEMP);
3139				error = EFAULT;
3140				goto fail;
3141			}
3142			switch (ioe->rs_num) {
3143#ifdef ALTQ
3144			case PF_RULESET_ALTQ:
3145				if (ioe->anchor[0]) {
3146					free(table, M_TEMP);
3147					free(ioe, M_TEMP);
3148					error = EINVAL;
3149					goto fail;
3150				}
3151				if ((error = pf_rollback_altq(ioe->ticket))) {
3152					free(table, M_TEMP);
3153					free(ioe, M_TEMP);
3154					goto fail; /* really bad */
3155				}
3156				break;
3157#endif /* ALTQ */
3158			case PF_RULESET_TABLE:
3159				bzero(table, sizeof(*table));
3160				strlcpy(table->pfrt_anchor, ioe->anchor,
3161				    sizeof(table->pfrt_anchor));
3162				if ((error = pfr_ina_rollback(table,
3163				    ioe->ticket, NULL, 0))) {
3164					free(table, M_TEMP);
3165					free(ioe, M_TEMP);
3166					goto fail; /* really bad */
3167				}
3168				break;
3169			default:
3170				if ((error = pf_rollback_rules(ioe->ticket,
3171				    ioe->rs_num, ioe->anchor))) {
3172					free(table, M_TEMP);
3173					free(ioe, M_TEMP);
3174					goto fail; /* really bad */
3175				}
3176				break;
3177			}
3178		}
3179		free(table, M_TEMP);
3180		free(ioe, M_TEMP);
3181		break;
3182	}
3183
3184	case DIOCXCOMMIT: {
3185		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
3186		struct pfioc_trans_e	*ioe;
3187		struct pfr_table	*table;
3188		struct pf_ruleset	*rs;
3189		int			 i;
3190
3191		if (io->esize != sizeof(*ioe)) {
3192			error = ENODEV;
3193			goto fail;
3194		}
3195#ifdef __FreeBSD__
3196		PF_UNLOCK();
3197#endif
3198		ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
3199		    M_TEMP, M_WAITOK);
3200		table = (struct pfr_table *)malloc(sizeof(*table),
3201		    M_TEMP, M_WAITOK);
3202#ifdef __FreeBSD__
3203		PF_LOCK();
3204#endif
3205		/* first makes sure everything will succeed */
3206		for (i = 0; i < io->size; i++) {
3207#ifdef __FreeBSD__
3208			PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
3209			if (error) {
3210#else
3211			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3212#endif
3213				free(table, M_TEMP);
3214				free(ioe, M_TEMP);
3215				error = EFAULT;
3216				goto fail;
3217			}
3218			switch (ioe->rs_num) {
3219#ifdef ALTQ
3220			case PF_RULESET_ALTQ:
3221				if (ioe->anchor[0]) {
3222					free(table, M_TEMP);
3223					free(ioe, M_TEMP);
3224					error = EINVAL;
3225					goto fail;
3226				}
3227				if (!altqs_inactive_open || ioe->ticket !=
3228				    ticket_altqs_inactive) {
3229					free(table, M_TEMP);
3230					free(ioe, M_TEMP);
3231					error = EBUSY;
3232					goto fail;
3233				}
3234				break;
3235#endif /* ALTQ */
3236			case PF_RULESET_TABLE:
3237				rs = pf_find_ruleset(ioe->anchor);
3238				if (rs == NULL || !rs->topen || ioe->ticket !=
3239				     rs->tticket) {
3240					free(table, M_TEMP);
3241					free(ioe, M_TEMP);
3242					error = EBUSY;
3243					goto fail;
3244				}
3245				break;
3246			default:
3247				if (ioe->rs_num < 0 || ioe->rs_num >=
3248				    PF_RULESET_MAX) {
3249					free(table, M_TEMP);
3250					free(ioe, M_TEMP);
3251					error = EINVAL;
3252					goto fail;
3253				}
3254				rs = pf_find_ruleset(ioe->anchor);
3255				if (rs == NULL ||
3256				    !rs->rules[ioe->rs_num].inactive.open ||
3257				    rs->rules[ioe->rs_num].inactive.ticket !=
3258				    ioe->ticket) {
3259					free(table, M_TEMP);
3260					free(ioe, M_TEMP);
3261					error = EBUSY;
3262					goto fail;
3263				}
3264				break;
3265			}
3266		}
3267		/* now do the commit - no errors should happen here */
3268		for (i = 0; i < io->size; i++) {
3269#ifdef __FreeBSD__
3270			PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
3271			if (error) {
3272#else
3273			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3274#endif
3275				free(table, M_TEMP);
3276				free(ioe, M_TEMP);
3277				error = EFAULT;
3278				goto fail;
3279			}
3280			switch (ioe->rs_num) {
3281#ifdef ALTQ
3282			case PF_RULESET_ALTQ:
3283				if ((error = pf_commit_altq(ioe->ticket))) {
3284					free(table, M_TEMP);
3285					free(ioe, M_TEMP);
3286					goto fail; /* really bad */
3287				}
3288				break;
3289#endif /* ALTQ */
3290			case PF_RULESET_TABLE:
3291				bzero(table, sizeof(*table));
3292				strlcpy(table->pfrt_anchor, ioe->anchor,
3293				    sizeof(table->pfrt_anchor));
3294				if ((error = pfr_ina_commit(table, ioe->ticket,
3295				    NULL, NULL, 0))) {
3296					free(table, M_TEMP);
3297					free(ioe, M_TEMP);
3298					goto fail; /* really bad */
3299				}
3300				break;
3301			default:
3302				if ((error = pf_commit_rules(ioe->ticket,
3303				    ioe->rs_num, ioe->anchor))) {
3304					free(table, M_TEMP);
3305					free(ioe, M_TEMP);
3306					goto fail; /* really bad */
3307				}
3308				break;
3309			}
3310		}
3311		free(table, M_TEMP);
3312		free(ioe, M_TEMP);
3313		break;
3314	}
3315
3316	case DIOCGETSRCNODES: {
3317		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
3318		struct pf_src_node	*n, *p, *pstore;
3319		u_int32_t		 nr = 0;
3320		int			 space = psn->psn_len;
3321
3322		if (space == 0) {
3323			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
3324				nr++;
3325			psn->psn_len = sizeof(struct pf_src_node) * nr;
3326			break;
3327		}
3328
3329#ifdef __FreeBSD__
3330		PF_UNLOCK();
3331#endif
3332		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
3333#ifdef __FreeBSD__
3334		PF_LOCK();
3335#endif
3336
3337		p = psn->psn_src_nodes;
3338		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3339			int	secs = time_second, diff;
3340
3341			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3342				break;
3343
3344			bcopy(n, pstore, sizeof(*pstore));
3345			if (n->rule.ptr != NULL)
3346				pstore->rule.nr = n->rule.ptr->nr;
3347			pstore->creation = secs - pstore->creation;
3348			if (pstore->expire > secs)
3349				pstore->expire -= secs;
3350			else
3351				pstore->expire = 0;
3352
3353			/* adjust the connection rate estimate */
3354			diff = secs - n->conn_rate.last;
3355			if (diff >= n->conn_rate.seconds)
3356				pstore->conn_rate.count = 0;
3357			else
3358				pstore->conn_rate.count -=
3359				    n->conn_rate.count * diff /
3360				    n->conn_rate.seconds;
3361
3362#ifdef __FreeBSD__
3363			PF_COPYOUT(pstore, p, sizeof(*p), error);
3364#else
3365			error = copyout(pstore, p, sizeof(*p));
3366#endif
3367			if (error) {
3368				free(pstore, M_TEMP);
3369				goto fail;
3370			}
3371			p++;
3372			nr++;
3373		}
3374		psn->psn_len = sizeof(struct pf_src_node) * nr;
3375
3376		free(pstore, M_TEMP);
3377		break;
3378	}
3379
3380	case DIOCCLRSRCNODES: {
3381		struct pf_src_node	*n;
3382		struct pf_state		*state;
3383
3384		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3385			state->src_node = NULL;
3386			state->nat_src_node = NULL;
3387		}
3388		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3389			n->expire = 1;
3390			n->states = 0;
3391		}
3392		pf_purge_expired_src_nodes(1);
3393		pf_status.src_nodes = 0;
3394		break;
3395	}
3396
3397	case DIOCKILLSRCNODES: {
3398		struct pf_src_node	*sn;
3399		struct pf_state		*s;
3400		struct pfioc_src_node_kill *psnk = \
3401			(struct pfioc_src_node_kill *) addr;
3402		int			killed = 0;
3403
3404		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
3405        		if (PF_MATCHA(psnk->psnk_src.neg, \
3406				      &psnk->psnk_src.addr.v.a.addr, \
3407				      &psnk->psnk_src.addr.v.a.mask, \
3408				      &sn->addr, sn->af) &&
3409			    PF_MATCHA(psnk->psnk_dst.neg, \
3410				      &psnk->psnk_dst.addr.v.a.addr, \
3411				      &psnk->psnk_dst.addr.v.a.mask, \
3412				      &sn->raddr, sn->af)) {
3413				/* Handle state to src_node linkage */
3414				if (sn->states != 0) {
3415					RB_FOREACH(s, pf_state_tree_id,
3416					    &tree_id) {
3417						if (s->src_node == sn)
3418							s->src_node = NULL;
3419						if (s->nat_src_node == sn)
3420							s->nat_src_node = NULL;
3421					}
3422					sn->states = 0;
3423				}
3424				sn->expire = 1;
3425				killed++;
3426			}
3427		}
3428
3429		if (killed > 0)
3430			pf_purge_expired_src_nodes(1);
3431
3432		psnk->psnk_af = killed;
3433		break;
3434	}
3435
3436	case DIOCSETHOSTID: {
3437		u_int32_t	*hostid = (u_int32_t *)addr;
3438
3439		if (*hostid == 0)
3440			pf_status.hostid = arc4random();
3441		else
3442			pf_status.hostid = *hostid;
3443		break;
3444	}
3445
3446	case DIOCOSFPFLUSH:
3447		pf_osfp_flush();
3448		break;
3449
3450	case DIOCIGETIFACES: {
3451		struct pfioc_iface *io = (struct pfioc_iface *)addr;
3452
3453		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3454			error = ENODEV;
3455			break;
3456		}
3457		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
3458		    &io->pfiio_size);
3459		break;
3460	}
3461
3462	case DIOCSETIFFLAG: {
3463		struct pfioc_iface *io = (struct pfioc_iface *)addr;
3464
3465		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3466		break;
3467	}
3468
3469	case DIOCCLRIFFLAG: {
3470		struct pfioc_iface *io = (struct pfioc_iface *)addr;
3471
3472		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3473		break;
3474	}
3475
3476	default:
3477		error = ENODEV;
3478		break;
3479	}
3480fail:
3481#ifdef __FreeBSD__
3482	PF_UNLOCK();
3483
3484	if (flags & FWRITE)
3485		sx_xunlock(&pf_consistency_lock);
3486	else
3487		sx_sunlock(&pf_consistency_lock);
3488#else
3489	splx(s);
3490	/* XXX: Lock order? */
3491	if (flags & FWRITE)
3492		rw_exit_write(&pf_consistency_lock);
3493	else
3494		rw_exit_read(&pf_consistency_lock);
3495#endif
3496	return (error);
3497}
3498
3499#ifdef __FreeBSD__
3500/*
3501 * XXX - Check for version missmatch!!!
3502 */
3503static void
3504pf_clear_states(void)
3505{
3506	struct pf_state		*state;
3507
3508	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3509		state->timeout = PFTM_PURGE;
3510#if NPFSYNC
3511		/* don't send out individual delete messages */
3512		state->sync_flags = PFSTATE_NOSYNC;
3513#endif
3514		pf_unlink_state(state);
3515	}
3516
3517#if 0 /* NPFSYNC */
3518/*
3519 * XXX This is called on module unload, we do not want to sync that over? */
3520 */
3521	pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3522#endif
3523}
3524
3525static int
3526pf_clear_tables(void)
3527{
3528	struct pfioc_table io;
3529	int error;
3530
3531	bzero(&io, sizeof(io));
3532
3533	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3534	    io.pfrio_flags);
3535
3536	return (error);
3537}
3538
3539static void
3540pf_clear_srcnodes(void)
3541{
3542	struct pf_src_node	*n;
3543	struct pf_state		*state;
3544
3545	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3546		state->src_node = NULL;
3547		state->nat_src_node = NULL;
3548	}
3549	RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3550		n->expire = 1;
3551		n->states = 0;
3552	}
3553}
3554/*
3555 * XXX - Check for version missmatch!!!
3556 */
3557
3558/*
3559 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3560 */
3561static int
3562shutdown_pf(void)
3563{
3564	int error = 0;
3565	u_int32_t t[5];
3566	char nn = '\0';
3567
3568	pf_status.running = 0;
3569	do {
3570		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
3571		    != 0) {
3572			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3573			break;
3574		}
3575		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
3576		    != 0) {
3577			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3578			break;		/* XXX: rollback? */
3579		}
3580		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
3581		    != 0) {
3582			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3583			break;		/* XXX: rollback? */
3584		}
3585		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3586		    != 0) {
3587			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3588			break;		/* XXX: rollback? */
3589		}
3590		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3591		    != 0) {
3592			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3593			break;		/* XXX: rollback? */
3594		}
3595
3596		/* XXX: these should always succeed here */
3597		pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3598		pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3599		pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3600		pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3601		pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3602
3603		if ((error = pf_clear_tables()) != 0)
3604			break;
3605
3606#ifdef ALTQ
3607		if ((error = pf_begin_altq(&t[0])) != 0) {
3608			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3609			break;
3610		}
3611		pf_commit_altq(t[0]);
3612#endif
3613
3614		pf_clear_states();
3615
3616		pf_clear_srcnodes();
3617
3618		/* status does not use malloced mem so no need to cleanup */
3619		/* fingerprints and interfaces have thier own cleanup code */
3620	} while(0);
3621
3622        return (error);
3623}
3624
3625static int
3626pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3627    struct inpcb *inp)
3628{
3629	/*
3630	 * XXX Wed Jul 9 22:03:16 2003 UTC
3631	 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
3632	 * in network stack. OpenBSD's network stack have converted
3633	 * ip_len/ip_off to host byte order frist as FreeBSD.
3634	 * Now this is not true anymore , so we should convert back to network
3635	 * byte order.
3636	 */
3637	struct ip *h = NULL;
3638	int chk;
3639
3640	if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) {
3641		/* if m_pkthdr.len is less than ip header, pf will handle. */
3642		h = mtod(*m, struct ip *);
3643	        HTONS(h->ip_len);
3644	        HTONS(h->ip_off);
3645	}
3646	chk = pf_test(PF_IN, ifp, m, NULL, inp);
3647	if (chk && *m) {
3648		m_freem(*m);
3649		*m = NULL;
3650	}
3651	if (*m != NULL) {
3652		/* pf_test can change ip header location */
3653		h = mtod(*m, struct ip *);
3654		NTOHS(h->ip_len);
3655		NTOHS(h->ip_off);
3656	}
3657	return chk;
3658}
3659
3660static int
3661pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3662    struct inpcb *inp)
3663{
3664	/*
3665	 * XXX Wed Jul 9 22:03:16 2003 UTC
3666	 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
3667	 * in network stack. OpenBSD's network stack have converted
3668	 * ip_len/ip_off to host byte order frist as FreeBSD.
3669	 * Now this is not true anymore , so we should convert back to network
3670	 * byte order.
3671	 */
3672	struct ip *h = NULL;
3673	int chk;
3674
3675	/* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3676	if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3677		in_delayed_cksum(*m);
3678		(*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3679	}
3680	if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) {
3681		/* if m_pkthdr.len is less than ip header, pf will handle. */
3682		h = mtod(*m, struct ip *);
3683	        HTONS(h->ip_len);
3684	        HTONS(h->ip_off);
3685	}
3686	chk = pf_test(PF_OUT, ifp, m, NULL, inp);
3687	if (chk && *m) {
3688		m_freem(*m);
3689		*m = NULL;
3690	}
3691	if (*m != NULL) {
3692		/* pf_test can change ip header location */
3693		h = mtod(*m, struct ip *);
3694		NTOHS(h->ip_len);
3695		NTOHS(h->ip_off);
3696	}
3697	return chk;
3698}
3699
3700#ifdef INET6
3701static int
3702pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3703    struct inpcb *inp)
3704{
3705
3706	/*
3707	 * IPv6 is not affected by ip_len/ip_off byte order changes.
3708	 */
3709	int chk;
3710
3711	/*
3712	 * In case of loopback traffic IPv6 uses the real interface in
3713	 * order to support scoped addresses. In order to support stateful
3714	 * filtering we have change this to lo0 as it is the case in IPv4.
3715	 */
3716	chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? V_loif : ifp, m,
3717	    NULL, inp);
3718	if (chk && *m) {
3719		m_freem(*m);
3720		*m = NULL;
3721	}
3722	return chk;
3723}
3724
3725static int
3726pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3727    struct inpcb *inp)
3728{
3729	/*
3730	 * IPv6 does not affected ip_len/ip_off byte order changes.
3731	 */
3732	int chk;
3733
3734	/* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3735	if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3736		in_delayed_cksum(*m);
3737		(*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3738	}
3739	chk = pf_test6(PF_OUT, ifp, m, NULL, inp);
3740	if (chk && *m) {
3741		m_freem(*m);
3742		*m = NULL;
3743	}
3744	return chk;
3745}
3746#endif /* INET6 */
3747
3748static int
3749hook_pf(void)
3750{
3751	struct pfil_head *pfh_inet;
3752#ifdef INET6
3753	struct pfil_head *pfh_inet6;
3754#endif
3755
3756	PF_ASSERT(MA_NOTOWNED);
3757
3758	if (pf_pfil_hooked)
3759		return (0);
3760
3761	pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3762	if (pfh_inet == NULL)
3763		return (ESRCH); /* XXX */
3764	pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
3765	pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
3766#ifdef INET6
3767	pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3768	if (pfh_inet6 == NULL) {
3769		pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3770		    pfh_inet);
3771		pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3772		    pfh_inet);
3773		return (ESRCH); /* XXX */
3774	}
3775	pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
3776	pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
3777#endif
3778
3779	pf_pfil_hooked = 1;
3780	return (0);
3781}
3782
3783static int
3784dehook_pf(void)
3785{
3786	struct pfil_head *pfh_inet;
3787#ifdef INET6
3788	struct pfil_head *pfh_inet6;
3789#endif
3790
3791	PF_ASSERT(MA_NOTOWNED);
3792
3793	if (pf_pfil_hooked == 0)
3794		return (0);
3795
3796	pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3797	if (pfh_inet == NULL)
3798		return (ESRCH); /* XXX */
3799	pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3800	    pfh_inet);
3801	pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3802	    pfh_inet);
3803#ifdef INET6
3804	pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3805	if (pfh_inet6 == NULL)
3806		return (ESRCH); /* XXX */
3807	pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
3808	    pfh_inet6);
3809	pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
3810	    pfh_inet6);
3811#endif
3812
3813	pf_pfil_hooked = 0;
3814	return (0);
3815}
3816
3817static int
3818pf_load(void)
3819{
3820	init_zone_var();
3821	init_pf_mutex();
3822	pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
3823	if (pfattach() < 0) {
3824		destroy_dev(pf_dev);
3825		destroy_pf_mutex();
3826		return (ENOMEM);
3827	}
3828	return (0);
3829}
3830
3831static int
3832pf_unload(void)
3833{
3834	int error = 0;
3835
3836	PF_LOCK();
3837	pf_status.running = 0;
3838	PF_UNLOCK();
3839	error = dehook_pf();
3840	if (error) {
3841		/*
3842		 * Should not happen!
3843		 * XXX Due to error code ESRCH, kldunload will show
3844		 * a message like 'No such process'.
3845		 */
3846		printf("%s : pfil unregisteration fail\n", __FUNCTION__);
3847		return error;
3848	}
3849	PF_LOCK();
3850	shutdown_pf();
3851	pf_end_threads = 1;
3852	while (pf_end_threads < 2) {
3853		wakeup_one(pf_purge_thread);
3854		msleep(pf_purge_thread, &pf_task_mtx, 0, "pftmo", hz);
3855	}
3856	pfi_cleanup();
3857	pf_osfp_flush();
3858	pf_osfp_cleanup();
3859	cleanup_pf_zone();
3860	PF_UNLOCK();
3861	destroy_dev(pf_dev);
3862	destroy_pf_mutex();
3863	return error;
3864}
3865
3866static int
3867pf_modevent(module_t mod, int type, void *data)
3868{
3869	int error = 0;
3870
3871	switch(type) {
3872	case MOD_LOAD:
3873		error = pf_load();
3874		break;
3875
3876	case MOD_UNLOAD:
3877		error = pf_unload();
3878		break;
3879	default:
3880		error = EINVAL;
3881		break;
3882	}
3883	return error;
3884}
3885
3886static moduledata_t pf_mod = {
3887	"pf",
3888	pf_modevent,
3889	0
3890};
3891
3892DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST);
3893MODULE_VERSION(pf, PF_MODVER);
3894#endif	/* __FreeBSD__ */
3895