pf_table.c revision 127145
1/*	$FreeBSD: head/sys/contrib/pf/net/pf_table.c 127145 2004-03-17 21:11:02Z mlaier $	*/
2/*	$OpenBSD: pf_table.c,v 1.41 2003/08/22 15:19:23 henning Exp $	*/
3
4/*
5 * Copyright (c) 2002 Cedric Berger
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 *    - Redistributions of source code must retain the above copyright
13 *      notice, this list of conditions and the following disclaimer.
14 *    - Redistributions in binary form must reproduce the above
15 *      copyright notice, this list of conditions and the following
16 *      disclaimer in the documentation and/or other materials provided
17 *      with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#ifdef __FreeBSD__
35#include "opt_inet.h"
36#include "opt_inet6.h"
37#endif
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/socket.h>
42#include <sys/mbuf.h>
43#include <sys/kernel.h>
44#ifdef __FreeBSD__
45#include <sys/malloc.h>
46#endif
47
48#include <net/if.h>
49#include <net/route.h>
50#include <netinet/in.h>
51#ifndef __FreeBSD__
52#include <netinet/ip_ipsp.h>
53#endif
54
55#include <net/pfvar.h>
56
57#define ACCEPT_FLAGS(oklist)			\
58	do {					\
59		if ((flags & ~(oklist)) &	\
60		    PFR_FLAG_ALLMASK)		\
61			return (EINVAL);	\
62	} while (0)
63
64#define	FILLIN_SIN(sin, addr)			\
65	do {					\
66		(sin).sin_len = sizeof(sin);	\
67		(sin).sin_family = AF_INET;	\
68		(sin).sin_addr = (addr);	\
69	} while (0)
70
71#define	FILLIN_SIN6(sin6, addr)			\
72	do {					\
73		(sin6).sin6_len = sizeof(sin6);	\
74		(sin6).sin6_family = AF_INET6;	\
75		(sin6).sin6_addr = (addr);	\
76	} while (0)
77
78#define SWAP(type, a1, a2)			\
79	do {					\
80		type tmp = a1;			\
81		a1 = a2;			\
82		a2 = tmp;			\
83	} while (0)
84
85#define SUNION2PF(su, af) (((af)==AF_INET) ?	\
86        (struct pf_addr *)&(su)->sin.sin_addr :	\
87        (struct pf_addr *)&(su)->sin6.sin6_addr)
88
89#define	AF_BITS(af)		(((af)==AF_INET)?32:128)
90#define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
91#define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
92#define KENTRY_RNF_ROOT(ke) \
93		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
94
95#define NO_ADDRESSES		(-1)
96#define ENQUEUE_UNMARKED_ONLY	(1)
97#define INVERT_NEG_FLAG		(1)
98
99struct pfr_walktree {
100	enum pfrw_op {
101		PFRW_MARK,
102		PFRW_SWEEP,
103		PFRW_ENQUEUE,
104		PFRW_GET_ADDRS,
105		PFRW_GET_ASTATS,
106		PFRW_POOL_GET
107	}	 pfrw_op;
108	union {
109		struct pfr_addr		*pfrw1_addr;
110		struct pfr_astats	*pfrw1_astats;
111		struct pfr_kentryworkq	*pfrw1_workq;
112		struct pfr_kentry	*pfrw1_kentry;
113	}	 pfrw_1;
114	int	 pfrw_free;
115};
116#define pfrw_addr	pfrw_1.pfrw1_addr
117#define pfrw_astats	pfrw_1.pfrw1_astats
118#define pfrw_workq	pfrw_1.pfrw1_workq
119#define pfrw_kentry	pfrw_1.pfrw1_kentry
120#define pfrw_cnt	pfrw_free
121
122#define senderr(e)	do { rv = (e); goto _bad; } while (0)
123
124#ifdef __FreeBSD__
125uma_zone_t		 pfr_ktable_pl;
126uma_zone_t		 pfr_kentry_pl;
127#else
128struct pool		 pfr_ktable_pl;
129struct pool		 pfr_kentry_pl;
130#endif
131struct sockaddr_in	 pfr_sin;
132struct sockaddr_in6	 pfr_sin6;
133union  sockaddr_union	 pfr_mask;
134struct pf_addr		 pfr_ffaddr;
135
136void			 pfr_copyout_addr(struct pfr_addr *,
137			    struct pfr_kentry *ke);
138int			 pfr_validate_addr(struct pfr_addr *);
139void			 pfr_enqueue_addrs(struct pfr_ktable *,
140			    struct pfr_kentryworkq *, int *, int);
141void			 pfr_mark_addrs(struct pfr_ktable *);
142struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
143			    struct pfr_addr *, int);
144struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *);
145void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
146void			 pfr_destroy_kentry(struct pfr_kentry *);
147void			 pfr_insert_kentries(struct pfr_ktable *,
148			    struct pfr_kentryworkq *, long);
149void			 pfr_remove_kentries(struct pfr_ktable *,
150			    struct pfr_kentryworkq *);
151void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
152			    int);
153void			 pfr_reset_feedback(struct pfr_addr *, int);
154void			 pfr_prepare_network(union sockaddr_union *, int, int);
155int			 pfr_route_kentry(struct pfr_ktable *,
156			    struct pfr_kentry *);
157int			 pfr_unroute_kentry(struct pfr_ktable *,
158			    struct pfr_kentry *);
159int			 pfr_walktree(struct radix_node *, void *);
160int			 pfr_validate_table(struct pfr_table *, int);
161void			 pfr_commit_ktable(struct pfr_ktable *, long);
162void			 pfr_insert_ktables(struct pfr_ktableworkq *);
163void			 pfr_insert_ktable(struct pfr_ktable *);
164void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
165void			 pfr_setflags_ktable(struct pfr_ktable *, int);
166void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
167			    int);
168void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
169struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
170void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
171void			 pfr_destroy_ktable(struct pfr_ktable *, int);
172int			 pfr_ktable_compare(struct pfr_ktable *,
173			    struct pfr_ktable *);
174struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
175void			 pfr_clean_node_mask(struct pfr_ktable *,
176			    struct pfr_kentryworkq *);
177int			 pfr_table_count(struct pfr_table *, int);
178int			 pfr_skip_table(struct pfr_table *,
179			    struct pfr_ktable *, int);
180struct pfr_kentry       *pfr_kentry_byidx(struct pfr_ktable *, int, int);
181
182RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
183RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
184
185struct pfr_ktablehead	 pfr_ktables;
186struct pfr_table	 pfr_nulltable;
187int			 pfr_ktable_cnt;
188
189void
190pfr_initialize(void)
191{
192#ifndef __FreeBSD__
193	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
194	    "pfrktable", NULL);
195	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
196	    "pfrkentry", NULL);
197#endif
198
199	pfr_sin.sin_len = sizeof(pfr_sin);
200	pfr_sin.sin_family = AF_INET;
201	pfr_sin6.sin6_len = sizeof(pfr_sin6);
202	pfr_sin6.sin6_family = AF_INET6;
203
204	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
205}
206
207int
208pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
209{
210	struct pfr_ktable	*kt;
211	struct pfr_kentryworkq	 workq;
212	int			 s;
213
214	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
215	if (pfr_validate_table(tbl, 0))
216		return (EINVAL);
217	kt = pfr_lookup_table(tbl);
218	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
219		return (ESRCH);
220	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
221		return (EPERM);
222	pfr_enqueue_addrs(kt, &workq, ndel, 0);
223
224	if (!(flags & PFR_FLAG_DUMMY)) {
225		if (flags & PFR_FLAG_ATOMIC)
226			s = splsoftnet();
227		pfr_remove_kentries(kt, &workq);
228		if (flags & PFR_FLAG_ATOMIC)
229			splx(s);
230		if (kt->pfrkt_cnt) {
231			printf("pfr_clr_addrs: corruption detected (%d).\n",
232			    kt->pfrkt_cnt);
233			kt->pfrkt_cnt = 0;
234		}
235	}
236	return (0);
237}
238
239int
240pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
241    int *nadd, int flags)
242{
243	struct pfr_ktable	*kt, *tmpkt;
244	struct pfr_kentryworkq	 workq;
245	struct pfr_kentry	*p, *q;
246	struct pfr_addr		 ad;
247	int			 i, rv, s, xadd = 0;
248#ifdef __FreeBSD__
249	int			ec;
250	/*
251	 * XXX Is it OK under LP64 environments?
252	 */
253	long			 tzero = (long)time_second;
254#else
255	long			 tzero = time.tv_sec;
256#endif
257
258	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
259	if (pfr_validate_table(tbl, 0))
260		return (EINVAL);
261	kt = pfr_lookup_table(tbl);
262	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
263		return (ESRCH);
264	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
265		return (EPERM);
266	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
267	if (tmpkt == NULL)
268		return (ENOMEM);
269	SLIST_INIT(&workq);
270	for (i = 0; i < size; i++) {
271#ifdef __FreeBSD__
272		PF_COPYIN(addr+i, &ad, sizeof(ad), ec);
273		if (ec)
274			senderr(EFAULT);
275#else
276		if (copyin(addr+i, &ad, sizeof(ad)))
277			senderr(EFAULT);
278#endif
279		if (pfr_validate_addr(&ad))
280			senderr(EINVAL);
281		p = pfr_lookup_addr(kt, &ad, 1);
282		q = pfr_lookup_addr(tmpkt, &ad, 1);
283		if (flags & PFR_FLAG_FEEDBACK) {
284			if (q != NULL)
285				ad.pfra_fback = PFR_FB_DUPLICATE;
286			else if (p == NULL)
287				ad.pfra_fback = PFR_FB_ADDED;
288			else if (p->pfrke_not != ad.pfra_not)
289				ad.pfra_fback = PFR_FB_CONFLICT;
290			else
291				ad.pfra_fback = PFR_FB_NONE;
292		}
293		if (p == NULL && q == NULL) {
294			p = pfr_create_kentry(&ad);
295			if (p == NULL)
296				senderr(ENOMEM);
297			if (pfr_route_kentry(tmpkt, p)) {
298				pfr_destroy_kentry(p);
299				ad.pfra_fback = PFR_FB_NONE;
300			} else {
301				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
302				xadd++;
303			}
304		}
305#ifdef __FreeBSD__
306		if (flags & PFR_FLAG_FEEDBACK) {
307			PF_COPYOUT(&ad, addr+i, sizeof(ad), ec);
308			if (ec)
309				senderr(EFAULT);
310		}
311#else
312		if (flags & PFR_FLAG_FEEDBACK)
313			if (copyout(&ad, addr+i, sizeof(ad)))
314				senderr(EFAULT);
315#endif
316	}
317	pfr_clean_node_mask(tmpkt, &workq);
318	if (!(flags & PFR_FLAG_DUMMY)) {
319		if (flags & PFR_FLAG_ATOMIC)
320			s = splsoftnet();
321		pfr_insert_kentries(kt, &workq, tzero);
322		if (flags & PFR_FLAG_ATOMIC)
323			splx(s);
324	} else
325		pfr_destroy_kentries(&workq);
326	if (nadd != NULL)
327		*nadd = xadd;
328	pfr_destroy_ktable(tmpkt, 0);
329	return (0);
330_bad:
331	pfr_clean_node_mask(tmpkt, &workq);
332	pfr_destroy_kentries(&workq);
333	if (flags & PFR_FLAG_FEEDBACK)
334		pfr_reset_feedback(addr, size);
335	pfr_destroy_ktable(tmpkt, 0);
336	return (rv);
337}
338
339int
340pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
341    int *ndel, int flags)
342{
343	struct pfr_ktable	*kt;
344	struct pfr_kentryworkq	 workq;
345	struct pfr_kentry	*p;
346	struct pfr_addr		 ad;
347	int			 i, rv, s, xdel = 0;
348#ifdef __FreeBSD__
349	int			ec;
350#endif
351
352	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
353	if (pfr_validate_table(tbl, 0))
354		return (EINVAL);
355	kt = pfr_lookup_table(tbl);
356	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
357		return (ESRCH);
358	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
359		return (EPERM);
360	pfr_mark_addrs(kt);
361	SLIST_INIT(&workq);
362	for (i = 0; i < size; i++) {
363#ifdef __FreeBSD__
364		PF_COPYIN(addr+i, &ad, sizeof(ad), ec);
365		if (ec)
366			senderr(EFAULT);
367#else
368		if (copyin(addr+i, &ad, sizeof(ad)))
369			senderr(EFAULT);
370#endif
371		if (pfr_validate_addr(&ad))
372			senderr(EINVAL);
373		p = pfr_lookup_addr(kt, &ad, 1);
374		if (flags & PFR_FLAG_FEEDBACK) {
375			if (p == NULL)
376				ad.pfra_fback = PFR_FB_NONE;
377			else if (p->pfrke_not != ad.pfra_not)
378				ad.pfra_fback = PFR_FB_CONFLICT;
379			else if (p->pfrke_mark)
380				ad.pfra_fback = PFR_FB_DUPLICATE;
381			else
382				ad.pfra_fback = PFR_FB_DELETED;
383		}
384		if (p != NULL && p->pfrke_not == ad.pfra_not &&
385		    !p->pfrke_mark) {
386			p->pfrke_mark = 1;
387			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
388			xdel++;
389		}
390#ifdef __FreeBSD__
391		if (flags & PFR_FLAG_FEEDBACK) {
392			PF_COPYOUT(&ad, addr+i, sizeof(ad), ec);
393			if (ec)
394				senderr(EFAULT);
395		}
396#else
397		if (flags & PFR_FLAG_FEEDBACK)
398			if (copyout(&ad, addr+i, sizeof(ad)))
399				senderr(EFAULT);
400#endif
401	}
402	if (!(flags & PFR_FLAG_DUMMY)) {
403		if (flags & PFR_FLAG_ATOMIC)
404			s = splsoftnet();
405		pfr_remove_kentries(kt, &workq);
406		if (flags & PFR_FLAG_ATOMIC)
407			splx(s);
408	}
409	if (ndel != NULL)
410		*ndel = xdel;
411	return (0);
412_bad:
413	if (flags & PFR_FLAG_FEEDBACK)
414		pfr_reset_feedback(addr, size);
415	return (rv);
416}
417
418int
419pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
420    int *size2, int *nadd, int *ndel, int *nchange, int flags)
421{
422	struct pfr_ktable	*kt, *tmpkt;
423	struct pfr_kentryworkq	 addq, delq, changeq;
424	struct pfr_kentry	*p, *q;
425	struct pfr_addr		 ad;
426	int			 i, rv, s, xadd = 0, xdel = 0, xchange = 0;
427#ifdef __FreeBSD__
428	int			ec;
429	/*
430	 * XXX Is it OK under LP64 environments?
431	 */
432	long			 tzero = (long)time_second;
433#else
434	long			 tzero = time.tv_sec;
435#endif
436
437	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
438	if (pfr_validate_table(tbl, 0))
439		return (EINVAL);
440	kt = pfr_lookup_table(tbl);
441	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
442		return (ESRCH);
443	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
444		return (EPERM);
445	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
446	if (tmpkt == NULL)
447		return (ENOMEM);
448	pfr_mark_addrs(kt);
449	SLIST_INIT(&addq);
450	SLIST_INIT(&delq);
451	SLIST_INIT(&changeq);
452	for (i = 0; i < size; i++) {
453#ifdef __FreeBSD__
454		PF_COPYIN(addr+i, &ad, sizeof(ad), ec);
455		if (ec)
456			senderr(EFAULT);
457#else
458		if (copyin(addr+i, &ad, sizeof(ad)))
459			senderr(EFAULT);
460#endif
461		if (pfr_validate_addr(&ad))
462			senderr(EINVAL);
463		ad.pfra_fback = PFR_FB_NONE;
464		p = pfr_lookup_addr(kt, &ad, 1);
465		if (p != NULL) {
466			if (p->pfrke_mark) {
467				ad.pfra_fback = PFR_FB_DUPLICATE;
468				goto _skip;
469			}
470			p->pfrke_mark = 1;
471			if (p->pfrke_not != ad.pfra_not) {
472				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
473				ad.pfra_fback = PFR_FB_CHANGED;
474				xchange++;
475			}
476		} else {
477			q = pfr_lookup_addr(tmpkt, &ad, 1);
478			if (q != NULL) {
479				ad.pfra_fback = PFR_FB_DUPLICATE;
480				goto _skip;
481			}
482			p = pfr_create_kentry(&ad);
483			if (p == NULL)
484				senderr(ENOMEM);
485			if (pfr_route_kentry(tmpkt, p)) {
486				pfr_destroy_kentry(p);
487				ad.pfra_fback = PFR_FB_NONE;
488			} else {
489				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
490				ad.pfra_fback = PFR_FB_ADDED;
491				xadd++;
492			}
493		}
494_skip:
495#ifdef __FreeBSD__
496		if (flags & PFR_FLAG_FEEDBACK) {
497			PF_COPYOUT(&ad, addr+i, sizeof(ad), ec);
498			if (ec)
499				senderr(EFAULT);
500		}
501#else
502		if (flags & PFR_FLAG_FEEDBACK)
503			if (copyout(&ad, addr+i, sizeof(ad)))
504				senderr(EFAULT);
505#endif
506	}
507	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
508	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
509		if (*size2 < size+xdel) {
510			*size2 = size+xdel;
511			senderr(0);
512		}
513		i = 0;
514		SLIST_FOREACH(p, &delq, pfrke_workq) {
515			pfr_copyout_addr(&ad, p);
516			ad.pfra_fback = PFR_FB_DELETED;
517#ifdef __FreeBSD__
518			PF_COPYOUT(&ad, addr+size+i, sizeof(ad), ec);
519			if (ec)
520				senderr(EFAULT);
521#else
522			if (copyout(&ad, addr+size+i, sizeof(ad)))
523				senderr(EFAULT);
524#endif
525			i++;
526		}
527	}
528	pfr_clean_node_mask(tmpkt, &addq);
529	if (!(flags & PFR_FLAG_DUMMY)) {
530		if (flags & PFR_FLAG_ATOMIC)
531			s = splsoftnet();
532		pfr_insert_kentries(kt, &addq, tzero);
533		pfr_remove_kentries(kt, &delq);
534		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
535		if (flags & PFR_FLAG_ATOMIC)
536			splx(s);
537	} else
538		pfr_destroy_kentries(&addq);
539	if (nadd != NULL)
540		*nadd = xadd;
541	if (ndel != NULL)
542		*ndel = xdel;
543	if (nchange != NULL)
544		*nchange = xchange;
545	if ((flags & PFR_FLAG_FEEDBACK) && *size2)
546		*size2 = size+xdel;
547	pfr_destroy_ktable(tmpkt, 0);
548	return (0);
549_bad:
550	pfr_clean_node_mask(tmpkt, &addq);
551	pfr_destroy_kentries(&addq);
552	if (flags & PFR_FLAG_FEEDBACK)
553		pfr_reset_feedback(addr, size);
554	pfr_destroy_ktable(tmpkt, 0);
555	return (rv);
556}
557
558int
559pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
560	int *nmatch, int flags)
561{
562	struct pfr_ktable	*kt;
563	struct pfr_kentry	*p;
564	struct pfr_addr		 ad;
565	int			 i, xmatch = 0;
566#ifdef __FreeBSD__
567	int			ec;
568#endif
569
570	ACCEPT_FLAGS(PFR_FLAG_REPLACE);
571	if (pfr_validate_table(tbl, 0))
572		return (EINVAL);
573	kt = pfr_lookup_table(tbl);
574	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
575		return (ESRCH);
576
577	for (i = 0; i < size; i++) {
578#ifdef __FreeBSD__
579		PF_COPYIN(addr+i, &ad, sizeof(ad), ec);
580		if (ec)
581			return (EFAULT);
582#else
583		if (copyin(addr+i, &ad, sizeof(ad)))
584			return (EFAULT);
585#endif
586		if (pfr_validate_addr(&ad))
587			return (EINVAL);
588		if (ADDR_NETWORK(&ad))
589			return (EINVAL);
590		p = pfr_lookup_addr(kt, &ad, 0);
591		if (flags & PFR_FLAG_REPLACE)
592			pfr_copyout_addr(&ad, p);
593		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
594		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
595		if (p != NULL && !p->pfrke_not)
596			xmatch++;
597#ifdef __FreeBSD__
598		PF_COPYOUT(&ad, addr+i, sizeof(ad), ec);
599		if (ec)
600			return (EFAULT);
601#else
602		if (copyout(&ad, addr+i, sizeof(ad)))
603			return (EFAULT);
604#endif
605	}
606	if (nmatch != NULL)
607		*nmatch = xmatch;
608	return (0);
609}
610
611int
612pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
613	int flags)
614{
615	struct pfr_ktable	*kt;
616	struct pfr_walktree	 w;
617	int			 rv;
618
619	ACCEPT_FLAGS(0);
620	if (pfr_validate_table(tbl, 0))
621		return (EINVAL);
622	kt = pfr_lookup_table(tbl);
623	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
624		return (ESRCH);
625	if (kt->pfrkt_cnt > *size) {
626		*size = kt->pfrkt_cnt;
627		return (0);
628	}
629
630	bzero(&w, sizeof(w));
631	w.pfrw_op = PFRW_GET_ADDRS;
632	w.pfrw_addr = addr;
633	w.pfrw_free = kt->pfrkt_cnt;
634#ifdef __FreeBSD__
635	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
636#else
637	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
638#endif
639	if (!rv)
640#ifdef __FreeBSD__
641		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
642		    &w);
643#else
644		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
645#endif
646	if (rv)
647		return (rv);
648
649	if (w.pfrw_free) {
650		printf("pfr_get_addrs: corruption detected (%d).\n",
651		    w.pfrw_free);
652		return (ENOTTY);
653	}
654	*size = kt->pfrkt_cnt;
655	return (0);
656}
657
658int
659pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
660	int flags)
661{
662	struct pfr_ktable	*kt;
663	struct pfr_walktree	 w;
664	struct pfr_kentryworkq	 workq;
665	int			 rv, s;
666#ifdef __FreeBSD__
667	/*
668	 * XXX Is it OK under LP64 environments?
669	 */
670	long			 tzero = (long)time_second;
671#else
672	long			 tzero = time.tv_sec;
673#endif
674
675	ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
676	if (pfr_validate_table(tbl, 0))
677		return (EINVAL);
678	kt = pfr_lookup_table(tbl);
679	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
680		return (ESRCH);
681	if (kt->pfrkt_cnt > *size) {
682		*size = kt->pfrkt_cnt;
683		return (0);
684	}
685
686	bzero(&w, sizeof(w));
687	w.pfrw_op = PFRW_GET_ASTATS;
688	w.pfrw_astats = addr;
689	w.pfrw_free = kt->pfrkt_cnt;
690	if (flags & PFR_FLAG_ATOMIC)
691		s = splsoftnet();
692#ifdef __FreeBSD__
693	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
694#else
695	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
696#endif
697	if (!rv)
698#ifdef __FreeBSD__
699		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
700		    &w);
701#else
702		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
703#endif
704	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
705		pfr_enqueue_addrs(kt, &workq, NULL, 0);
706		pfr_clstats_kentries(&workq, tzero, 0);
707	}
708	if (flags & PFR_FLAG_ATOMIC)
709		splx(s);
710	if (rv)
711		return (rv);
712
713	if (w.pfrw_free) {
714		printf("pfr_get_astats: corruption detected (%d).\n",
715		    w.pfrw_free);
716		return (ENOTTY);
717	}
718	*size = kt->pfrkt_cnt;
719	return (0);
720}
721
722int
723pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
724    int *nzero, int flags)
725{
726	struct pfr_ktable	*kt;
727	struct pfr_kentryworkq	 workq;
728	struct pfr_kentry	*p;
729	struct pfr_addr		 ad;
730	int			 i, rv, s, xzero = 0;
731#ifdef __FreeBSD__
732	int			ec;
733#endif
734
735	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
736	if (pfr_validate_table(tbl, 0))
737		return (EINVAL);
738	kt = pfr_lookup_table(tbl);
739	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
740		return (ESRCH);
741	SLIST_INIT(&workq);
742	for (i = 0; i < size; i++) {
743#ifdef __FreeBSD__
744		PF_COPYIN(addr+i, &ad, sizeof(ad), ec);
745		if (ec)
746			senderr(EFAULT);
747#else
748		if (copyin(addr+i, &ad, sizeof(ad)))
749			senderr(EFAULT);
750#endif
751		if (pfr_validate_addr(&ad))
752			senderr(EINVAL);
753		p = pfr_lookup_addr(kt, &ad, 1);
754		if (flags & PFR_FLAG_FEEDBACK) {
755			ad.pfra_fback = (p != NULL) ?
756			    PFR_FB_CLEARED : PFR_FB_NONE;
757#ifdef __FreeBSD__
758			PF_COPYOUT(&ad, addr+i, sizeof(ad), ec);
759			if (ec)
760				senderr(EFAULT);
761#else
762			if (copyout(&ad, addr+i, sizeof(ad)))
763				senderr(EFAULT);
764#endif
765		}
766		if (p != NULL) {
767			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
768			xzero++;
769		}
770	}
771
772	if (!(flags & PFR_FLAG_DUMMY)) {
773		if (flags & PFR_FLAG_ATOMIC)
774			s = splsoftnet();
775		pfr_clstats_kentries(&workq, 0, 0);
776		if (flags & PFR_FLAG_ATOMIC)
777			splx(s);
778	}
779	if (nzero != NULL)
780		*nzero = xzero;
781	return (0);
782_bad:
783	if (flags & PFR_FLAG_FEEDBACK)
784		pfr_reset_feedback(addr, size);
785	return (rv);
786}
787
788int
789pfr_validate_addr(struct pfr_addr *ad)
790{
791	int i;
792
793	switch (ad->pfra_af) {
794	case AF_INET:
795		if (ad->pfra_net > 32)
796			return (-1);
797		break;
798	case AF_INET6:
799		if (ad->pfra_net > 128)
800			return (-1);
801		break;
802	default:
803		return (-1);
804	}
805	if (ad->pfra_net < 128 &&
806		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
807			return (-1);
808	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
809		if (((caddr_t)ad)[i])
810			return (-1);
811	if (ad->pfra_not && ad->pfra_not != 1)
812		return (-1);
813	if (ad->pfra_fback)
814		return (-1);
815	return (0);
816}
817
818void
819pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
820	int *naddr, int sweep)
821{
822	struct pfr_walktree	w;
823
824	SLIST_INIT(workq);
825	bzero(&w, sizeof(w));
826	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
827	w.pfrw_workq = workq;
828	if (kt->pfrkt_ip4 != NULL)
829#ifdef __FreeBSD__
830		if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree,
831		    &w))
832#else
833		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
834#endif
835			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
836	if (kt->pfrkt_ip6 != NULL)
837#ifdef __FreeBSD__
838		if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
839		    &w))
840#else
841		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
842#endif
843			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
844	if (naddr != NULL)
845		*naddr = w.pfrw_cnt;
846}
847
848void
849pfr_mark_addrs(struct pfr_ktable *kt)
850{
851	struct pfr_walktree	w;
852
853	bzero(&w, sizeof(w));
854	w.pfrw_op = PFRW_MARK;
855#ifdef __FreeBSD__
856	if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
857#else
858	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
859#endif
860		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
861#ifdef __FreeBSD__
862	if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
863#else
864	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
865#endif
866		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
867}
868
869
870struct pfr_kentry *
871pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
872{
873	union sockaddr_union	 sa, mask;
874	struct radix_node_head	*head;
875	struct pfr_kentry	*ke;
876	int			 s;
877
878	bzero(&sa, sizeof(sa));
879	if (ad->pfra_af == AF_INET) {
880		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
881		head = kt->pfrkt_ip4;
882	} else {
883		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
884		head = kt->pfrkt_ip6;
885	}
886	if (ADDR_NETWORK(ad)) {
887		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
888		s = splsoftnet(); /* rn_lookup makes use of globals */
889#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
890		RADIX_NODE_HEAD_LOCK(head);
891#endif
892		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
893#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
894		RADIX_NODE_HEAD_UNLOCK(head);
895#endif
896		splx(s);
897		if (ke && KENTRY_RNF_ROOT(ke))
898			ke = NULL;
899	} else {
900		ke = (struct pfr_kentry *)rn_match(&sa, head);
901		if (ke && KENTRY_RNF_ROOT(ke))
902			ke = NULL;
903		if (exact && ke && KENTRY_NETWORK(ke))
904			ke = NULL;
905	}
906	return (ke);
907}
908
909struct pfr_kentry *
910pfr_create_kentry(struct pfr_addr *ad)
911{
912	struct pfr_kentry	*ke;
913
914	ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
915	if (ke == NULL)
916		return (NULL);
917	bzero(ke, sizeof(*ke));
918
919	if (ad->pfra_af == AF_INET)
920		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
921	else
922		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
923	ke->pfrke_af = ad->pfra_af;
924	ke->pfrke_net = ad->pfra_net;
925	ke->pfrke_not = ad->pfra_not;
926	return (ke);
927}
928
929void
930pfr_destroy_kentries(struct pfr_kentryworkq *workq)
931{
932	struct pfr_kentry	*p, *q;
933
934	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
935		q = SLIST_NEXT(p, pfrke_workq);
936		pfr_destroy_kentry(p);
937	}
938}
939
940void
941pfr_destroy_kentry(struct pfr_kentry *ke)
942{
943	pool_put(&pfr_kentry_pl, ke);
944}
945
946void
947pfr_insert_kentries(struct pfr_ktable *kt,
948    struct pfr_kentryworkq *workq, long tzero)
949{
950	struct pfr_kentry	*p;
951	int			 rv, n = 0;
952
953	SLIST_FOREACH(p, workq, pfrke_workq) {
954		rv = pfr_route_kentry(kt, p);
955		if (rv) {
956			printf("pfr_insert_kentries: cannot route entry "
957			    "(code=%d).\n", rv);
958			break;
959		}
960		p->pfrke_tzero = tzero;
961		n++;
962	}
963	kt->pfrkt_cnt += n;
964}
965
966void
967pfr_remove_kentries(struct pfr_ktable *kt,
968    struct pfr_kentryworkq *workq)
969{
970	struct pfr_kentry	*p;
971	int			 n = 0;
972
973	SLIST_FOREACH(p, workq, pfrke_workq) {
974		pfr_unroute_kentry(kt, p);
975		n++;
976	}
977	kt->pfrkt_cnt -= n;
978	pfr_destroy_kentries(workq);
979}
980
981void
982pfr_clean_node_mask(struct pfr_ktable *kt,
983    struct pfr_kentryworkq *workq)
984{
985        struct pfr_kentry       *p;
986
987        SLIST_FOREACH(p, workq, pfrke_workq)
988                pfr_unroute_kentry(kt, p);
989}
990
991void
992pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
993{
994	struct pfr_kentry	*p;
995	int			 s;
996
997	SLIST_FOREACH(p, workq, pfrke_workq) {
998		s = splsoftnet();
999		if (negchange)
1000			p->pfrke_not = !p->pfrke_not;
1001		bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
1002		bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
1003		splx(s);
1004		p->pfrke_tzero = tzero;
1005	}
1006}
1007
1008void
1009pfr_reset_feedback(struct pfr_addr *addr, int size)
1010{
1011	struct pfr_addr	ad;
1012	int		i;
1013#ifdef __FreeBSD__
1014	int		ec;
1015#endif
1016
1017	for (i = 0; i < size; i++) {
1018#ifdef __FreeBSD__
1019		PF_COPYIN(addr+i, &ad, sizeof(ad), ec);
1020		if (ec)
1021			break;
1022#else
1023		if (copyin(addr+i, &ad, sizeof(ad)))
1024			break;
1025#endif
1026		ad.pfra_fback = PFR_FB_NONE;
1027#ifdef __FreeBSD__
1028		PF_COPYOUT(&ad, addr+i, sizeof(ad), ec);
1029		if (ec)
1030			break;
1031#else
1032		if (copyout(&ad, addr+i, sizeof(ad)))
1033			break;
1034#endif
1035	}
1036}
1037
1038void
1039pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1040{
1041	int	i;
1042
1043	bzero(sa, sizeof(*sa));
1044	if (af == AF_INET) {
1045		sa->sin.sin_len = sizeof(sa->sin);
1046		sa->sin.sin_family = AF_INET;
1047		sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
1048	} else {
1049		sa->sin6.sin6_len = sizeof(sa->sin6);
1050		sa->sin6.sin6_family = AF_INET6;
1051		for (i = 0; i < 4; i++) {
1052			if (net <= 32) {
1053				sa->sin6.sin6_addr.s6_addr32[i] =
1054				    htonl(-1 << (32-net));
1055				break;
1056			}
1057			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1058			net -= 32;
1059		}
1060	}
1061}
1062
1063int
1064pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1065{
1066	union sockaddr_union	 mask;
1067	struct radix_node	*rn;
1068	struct radix_node_head	*head;
1069	int			 s;
1070
1071	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1072	if (ke->pfrke_af == AF_INET)
1073		head = kt->pfrkt_ip4;
1074	else
1075		head = kt->pfrkt_ip6;
1076
1077	s = splsoftnet();
1078#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
1079	RADIX_NODE_HEAD_LOCK(head);
1080#endif
1081	if (KENTRY_NETWORK(ke)) {
1082		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1083		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1084	} else
1085		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1086#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
1087	RADIX_NODE_HEAD_UNLOCK(head);
1088#endif
1089	splx(s);
1090
1091	return (rn == NULL ? -1 : 0);
1092}
1093
1094int
1095pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1096{
1097	union sockaddr_union	 mask;
1098	struct radix_node	*rn;
1099	struct radix_node_head	*head;
1100	int			 s;
1101
1102	if (ke->pfrke_af == AF_INET)
1103		head = kt->pfrkt_ip4;
1104	else
1105		head = kt->pfrkt_ip6;
1106
1107	s = splsoftnet();
1108#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
1109	RADIX_NODE_HEAD_LOCK(head);
1110#endif
1111	if (KENTRY_NETWORK(ke)) {
1112		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1113		rn = rn_delete(&ke->pfrke_sa, &mask, head);
1114	} else
1115		rn = rn_delete(&ke->pfrke_sa, NULL, head);
1116#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
1117	RADIX_NODE_HEAD_UNLOCK(head);
1118#endif
1119	splx(s);
1120
1121	if (rn == NULL) {
1122		printf("pfr_unroute_kentry: delete failed.\n");
1123		return (-1);
1124	}
1125	return (0);
1126}
1127
1128void
1129pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1130{
1131	bzero(ad, sizeof(*ad));
1132	if (ke == NULL)
1133		return;
1134	ad->pfra_af = ke->pfrke_af;
1135	ad->pfra_net = ke->pfrke_net;
1136	ad->pfra_not = ke->pfrke_not;
1137	if (ad->pfra_af == AF_INET)
1138		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1139	else
1140		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1141}
1142
1143int
1144pfr_walktree(struct radix_node *rn, void *arg)
1145{
1146	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1147	struct pfr_walktree	*w = arg;
1148	int			 s;
1149#ifdef __FreeBSD__
1150	int			ec;
1151#endif
1152
1153	switch (w->pfrw_op) {
1154	case PFRW_MARK:
1155		ke->pfrke_mark = 0;
1156		break;
1157	case PFRW_SWEEP:
1158		if (ke->pfrke_mark)
1159			break;
1160		/* fall trough */
1161	case PFRW_ENQUEUE:
1162		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1163		w->pfrw_cnt++;
1164		break;
1165	case PFRW_GET_ADDRS:
1166		if (w->pfrw_free-- > 0) {
1167			struct pfr_addr ad;
1168
1169			pfr_copyout_addr(&ad, ke);
1170#ifdef __FreeBSD__
1171			PF_COPYOUT(&ad, w->pfrw_addr, sizeof(ad), ec);
1172			if (ec)
1173				return (EFAULT);
1174#else
1175			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1176				return (EFAULT);
1177#endif
1178			w->pfrw_addr++;
1179		}
1180		break;
1181	case PFRW_GET_ASTATS:
1182		if (w->pfrw_free-- > 0) {
1183			struct pfr_astats as;
1184
1185			pfr_copyout_addr(&as.pfras_a, ke);
1186
1187			s = splsoftnet();
1188			bcopy(ke->pfrke_packets, as.pfras_packets,
1189			    sizeof(as.pfras_packets));
1190			bcopy(ke->pfrke_bytes, as.pfras_bytes,
1191			    sizeof(as.pfras_bytes));
1192			splx(s);
1193			as.pfras_tzero = ke->pfrke_tzero;
1194
1195#ifdef __FreeBSD__
1196			PF_COPYOUT(&as, w->pfrw_astats, sizeof(as), ec);
1197			if (ec)
1198				return (EFAULT);
1199#else
1200			if (copyout(&as, w->pfrw_astats, sizeof(as)))
1201				return (EFAULT);
1202#endif
1203			w->pfrw_astats++;
1204		}
1205		break;
1206	case PFRW_POOL_GET:
1207		if (ke->pfrke_not)
1208			break; /* negative entries are ignored */
1209		if (!w->pfrw_cnt--) {
1210			w->pfrw_kentry = ke;
1211			return (1); /* finish search */
1212		}
1213		break;
1214	}
1215	return (0);
1216}
1217
1218int
1219pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1220{
1221	struct pfr_ktableworkq	 workq;
1222	struct pfr_ktable	*p;
1223	int			 s, xdel = 0;
1224
1225	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1226	if (pfr_table_count(filter, flags) < 0)
1227		return (ENOENT);
1228
1229	SLIST_INIT(&workq);
1230	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1231		if (pfr_skip_table(filter, p, flags))
1232			continue;
1233		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1234			continue;
1235		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1236		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1237		xdel++;
1238	}
1239	if (!(flags & PFR_FLAG_DUMMY)) {
1240		if (flags & PFR_FLAG_ATOMIC)
1241			s = splsoftnet();
1242		pfr_setflags_ktables(&workq);
1243		if (flags & PFR_FLAG_ATOMIC)
1244			splx(s);
1245	}
1246	if (ndel != NULL)
1247		*ndel = xdel;
1248	return (0);
1249}
1250
1251int
1252pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1253{
1254	struct pfr_ktableworkq	 addq, changeq;
1255	struct pfr_ktable	*p, *q, *r, key;
1256	int			 i, rv, s, xadd = 0;
1257#ifdef __FreeBSD__
1258	int			ec;
1259	/*
1260	 * XXX Is it OK under LP64 environments?
1261	 */
1262	long			 tzero = (long)time_second;
1263#else
1264	long			 tzero = time.tv_sec;
1265#endif
1266
1267	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1268	SLIST_INIT(&addq);
1269	SLIST_INIT(&changeq);
1270	for (i = 0; i < size; i++) {
1271#ifdef __FreeBSD__
1272		PF_COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), ec);
1273		if (ec)
1274			senderr(EFAULT);
1275#else
1276		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1277			senderr(EFAULT);
1278#endif
1279		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK))
1280			senderr(EINVAL);
1281		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1282		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1283		if (p == NULL) {
1284			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1285			if (p == NULL)
1286				senderr(ENOMEM);
1287			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1288				if (!pfr_ktable_compare(p, q))
1289					goto _skip;
1290			}
1291			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1292			xadd++;
1293			if (!key.pfrkt_anchor[0])
1294				goto _skip;
1295
1296			/* find or create root table */
1297			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1298			bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1299			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1300			if (r != NULL) {
1301				p->pfrkt_root = r;
1302				goto _skip;
1303			}
1304			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1305				if (!pfr_ktable_compare(&key, q)) {
1306					p->pfrkt_root = q;
1307					goto _skip;
1308				}
1309			}
1310			key.pfrkt_flags = 0;
1311			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1312			if (r == NULL)
1313				senderr(ENOMEM);
1314			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1315			p->pfrkt_root = r;
1316		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1317			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1318				if (!pfr_ktable_compare(&key, q))
1319					goto _skip;
1320			p->pfrkt_nflags = (p->pfrkt_flags &
1321			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1322			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1323			xadd++;
1324		}
1325_skip:
1326	;
1327	}
1328	if (!(flags & PFR_FLAG_DUMMY)) {
1329		if (flags & PFR_FLAG_ATOMIC)
1330			s = splsoftnet();
1331		pfr_insert_ktables(&addq);
1332		pfr_setflags_ktables(&changeq);
1333		if (flags & PFR_FLAG_ATOMIC)
1334			splx(s);
1335	} else
1336		 pfr_destroy_ktables(&addq, 0);
1337	if (nadd != NULL)
1338		*nadd = xadd;
1339	return (0);
1340_bad:
1341	pfr_destroy_ktables(&addq, 0);
1342	return (rv);
1343}
1344
1345int
1346pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1347{
1348	struct pfr_ktableworkq	 workq;
1349	struct pfr_ktable	*p, *q, key;
1350	int			 i, s, xdel = 0;
1351#ifdef __FreeBSD__
1352	int			ec;
1353#endif
1354
1355	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1356	SLIST_INIT(&workq);
1357	for (i = 0; i < size; i++) {
1358#ifdef __FreeBSD__
1359		PF_COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), ec);
1360		if (ec)
1361			return (EFAULT);
1362#else
1363		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1364			return (EFAULT);
1365#endif
1366		if (pfr_validate_table(&key.pfrkt_t, 0))
1367			return (EINVAL);
1368		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1369		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1370			SLIST_FOREACH(q, &workq, pfrkt_workq)
1371				if (!pfr_ktable_compare(p, q))
1372					goto _skip;
1373			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1374			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1375			xdel++;
1376		}
1377_skip:
1378	;
1379	}
1380
1381	if (!(flags & PFR_FLAG_DUMMY)) {
1382		if (flags & PFR_FLAG_ATOMIC)
1383			s = splsoftnet();
1384		pfr_setflags_ktables(&workq);
1385		if (flags & PFR_FLAG_ATOMIC)
1386			splx(s);
1387	}
1388	if (ndel != NULL)
1389		*ndel = xdel;
1390	return (0);
1391}
1392
1393int
1394pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1395	int flags)
1396{
1397	struct pfr_ktable	*p;
1398	int			 n, nn;
1399#ifdef __FreeBSD__
1400	int			ec;
1401#endif
1402
1403	ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1404	n = nn = pfr_table_count(filter, flags);
1405	if (n < 0)
1406		return (ENOENT);
1407	if (n > *size) {
1408		*size = n;
1409		return (0);
1410	}
1411	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1412		if (pfr_skip_table(filter, p, flags))
1413			continue;
1414		if (n-- <= 0)
1415			continue;
1416#ifdef __FreeBSD__
1417		PF_COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), ec);
1418		if (ec)
1419			return (EFAULT);
1420#else
1421		if (copyout(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1422			return (EFAULT);
1423#endif
1424	}
1425	if (n) {
1426		printf("pfr_get_tables: corruption detected (%d).\n", n);
1427		return (ENOTTY);
1428	}
1429	*size = nn;
1430	return (0);
1431}
1432
1433int
1434pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1435	int flags)
1436{
1437	struct pfr_ktable	*p;
1438	struct pfr_ktableworkq	 workq;
1439	int			 s, n, nn;
1440#ifdef __FreeBSD__
1441	int			ec;
1442	/*
1443	 * XXX Is it OK under LP64 environments?
1444	 */
1445	long			 tzero = (long)time_second;
1446#else
1447	long			 tzero = time.tv_sec;
1448#endif
1449
1450	ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1451					/* XXX PFR_FLAG_CLSTATS disabled */
1452	n = nn = pfr_table_count(filter, flags);
1453	if (n < 0)
1454		return (ENOENT);
1455	if (n > *size) {
1456		*size = n;
1457		return (0);
1458	}
1459	SLIST_INIT(&workq);
1460	if (flags & PFR_FLAG_ATOMIC)
1461		s = splsoftnet();
1462	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1463		if (pfr_skip_table(filter, p, flags))
1464			continue;
1465		if (n-- <= 0)
1466			continue;
1467		if (!(flags & PFR_FLAG_ATOMIC))
1468			s = splsoftnet();
1469#ifdef __FreeBSD__
1470		PF_COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), ec);
1471		if (ec) {
1472			splx(s);
1473			return (EFAULT);
1474		}
1475#else
1476		if (copyout(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1477			splx(s);
1478			return (EFAULT);
1479		}
1480#endif
1481		if (!(flags & PFR_FLAG_ATOMIC))
1482			splx(s);
1483		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1484	}
1485	if (flags & PFR_FLAG_CLSTATS)
1486		pfr_clstats_ktables(&workq, tzero,
1487		    flags & PFR_FLAG_ADDRSTOO);
1488	if (flags & PFR_FLAG_ATOMIC)
1489		splx(s);
1490	if (n) {
1491		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1492		return (ENOTTY);
1493	}
1494	*size = nn;
1495	return (0);
1496}
1497
1498int
1499pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1500{
1501	struct pfr_ktableworkq	 workq;
1502	struct pfr_ktable	*p, key;
1503	int			 i, s, xzero = 0;
1504#ifdef __FreeBSD__
1505	int			ec;
1506	/*
1507	 * XXX Is it OK under LP64 environments?
1508	 */
1509	long			 tzero = (long)time_second;
1510#else
1511	long			 tzero = time.tv_sec;
1512#endif
1513
1514	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1515	SLIST_INIT(&workq);
1516	for (i = 0; i < size; i++) {
1517#ifdef __FreeBSD__
1518		PF_COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), ec);
1519		if (ec)
1520			return (EFAULT);
1521#else
1522		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1523			return (EFAULT);
1524#endif
1525		if (pfr_validate_table(&key.pfrkt_t, 0))
1526			return (EINVAL);
1527		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1528		if (p != NULL) {
1529			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1530			xzero++;
1531		}
1532	}
1533	if (!(flags & PFR_FLAG_DUMMY)) {
1534		if (flags & PFR_FLAG_ATOMIC)
1535			s = splsoftnet();
1536		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1537		if (flags & PFR_FLAG_ATOMIC)
1538			splx(s);
1539	}
1540	if (nzero != NULL)
1541		*nzero = xzero;
1542	return (0);
1543}
1544
1545int
1546pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1547	int *nchange, int *ndel, int flags)
1548{
1549	struct pfr_ktableworkq	 workq;
1550	struct pfr_ktable	*p, *q, key;
1551	int			 i, s, xchange = 0, xdel = 0;
1552#ifdef __FreeBSD__
1553	int			ec;
1554#endif
1555
1556	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1557	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1558	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1559	    (setflag & clrflag))
1560		return (EINVAL);
1561	SLIST_INIT(&workq);
1562	for (i = 0; i < size; i++) {
1563#ifdef __FreeBSD__
1564		PF_COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), ec);
1565		if (ec)
1566			return (EFAULT);
1567#else
1568		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1569			return (EFAULT);
1570#endif
1571		if (pfr_validate_table(&key.pfrkt_t, 0))
1572			return (EINVAL);
1573		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1574		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1575			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1576			    ~clrflag;
1577			if (p->pfrkt_nflags == p->pfrkt_flags)
1578				goto _skip;
1579			SLIST_FOREACH(q, &workq, pfrkt_workq)
1580				if (!pfr_ktable_compare(p, q))
1581					goto _skip;
1582			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1583			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1584			    (clrflag & PFR_TFLAG_PERSIST) &&
1585			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1586				xdel++;
1587			else
1588				xchange++;
1589		}
1590_skip:
1591	;
1592	}
1593	if (!(flags & PFR_FLAG_DUMMY)) {
1594		if (flags & PFR_FLAG_ATOMIC)
1595			s = splsoftnet();
1596		pfr_setflags_ktables(&workq);
1597		if (flags & PFR_FLAG_ATOMIC)
1598			splx(s);
1599	}
1600	if (nchange != NULL)
1601		*nchange = xchange;
1602	if (ndel != NULL)
1603		*ndel = xdel;
1604	return (0);
1605}
1606
1607int
1608pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1609{
1610	struct pfr_ktableworkq	 workq;
1611	struct pfr_ktable	*p;
1612	struct pf_ruleset	*rs;
1613	int			 xdel = 0;
1614
1615	ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1616	rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1617	if (rs == NULL)
1618		return (ENOMEM);
1619	SLIST_INIT(&workq);
1620	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1621		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1622		    pfr_skip_table(trs, p, 0))
1623			continue;
1624		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1625		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1626		xdel++;
1627	}
1628	if (!(flags & PFR_FLAG_DUMMY)) {
1629		pfr_setflags_ktables(&workq);
1630		if (ticket != NULL)
1631			*ticket = ++rs->tticket;
1632		rs->topen = 1;
1633	} else
1634		pf_remove_if_empty_ruleset(rs);
1635	if (ndel != NULL)
1636		*ndel = xdel;
1637	return (0);
1638}
1639
1640int
1641pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1642    int *nadd, int *naddr, u_int32_t ticket, int flags)
1643{
1644	struct pfr_ktableworkq	 tableq;
1645	struct pfr_kentryworkq	 addrq;
1646	struct pfr_ktable	*kt, *rt, *shadow, key;
1647	struct pfr_kentry	*p;
1648	struct pfr_addr		 ad;
1649	struct pf_ruleset	*rs;
1650	int			 i, rv, xadd = 0, xaddr = 0;
1651#ifdef __FreeBSD__
1652	int			ec;
1653#endif
1654
1655	ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1656	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1657		return (EINVAL);
1658	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK))
1659		return (EINVAL);
1660	rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset);
1661	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1662		return (EBUSY);
1663	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1664	SLIST_INIT(&tableq);
1665	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1666	if (kt == NULL) {
1667		kt = pfr_create_ktable(tbl, 0, 1);
1668		if (kt == NULL)
1669			return (ENOMEM);
1670		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1671		xadd++;
1672		if (!tbl->pfrt_anchor[0])
1673			goto _skip;
1674
1675		/* find or create root table */
1676		bzero(&key, sizeof(key));
1677		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1678		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1679		if (rt != NULL) {
1680			kt->pfrkt_root = rt;
1681			goto _skip;
1682		}
1683		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1684		if (rt == NULL) {
1685			pfr_destroy_ktables(&tableq, 0);
1686			return (ENOMEM);
1687		}
1688		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1689		kt->pfrkt_root = rt;
1690	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1691		xadd++;
1692_skip:
1693	shadow = pfr_create_ktable(tbl, 0, 0);
1694	if (shadow == NULL) {
1695		pfr_destroy_ktables(&tableq, 0);
1696		return (ENOMEM);
1697	}
1698	SLIST_INIT(&addrq);
1699	for (i = 0; i < size; i++) {
1700#ifdef __FreeBSD__
1701		PF_COPYIN(addr+i, &ad, sizeof(ad), ec);
1702		if (ec)
1703			senderr(EFAULT);
1704#else
1705		if (copyin(addr+i, &ad, sizeof(ad)))
1706			senderr(EFAULT);
1707#endif
1708		if (pfr_validate_addr(&ad))
1709			senderr(EINVAL);
1710		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1711			continue;
1712		p = pfr_create_kentry(&ad);
1713		if (p == NULL)
1714			senderr(ENOMEM);
1715		if (pfr_route_kentry(shadow, p)) {
1716			pfr_destroy_kentry(p);
1717			continue;
1718		}
1719		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1720		xaddr++;
1721	}
1722	if (!(flags & PFR_FLAG_DUMMY)) {
1723		if (kt->pfrkt_shadow != NULL)
1724			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1725		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1726		pfr_insert_ktables(&tableq);
1727		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1728		    xaddr : NO_ADDRESSES;
1729		kt->pfrkt_shadow = shadow;
1730	} else {
1731		pfr_clean_node_mask(shadow, &addrq);
1732		pfr_destroy_ktable(shadow, 0);
1733		pfr_destroy_ktables(&tableq, 0);
1734		pfr_destroy_kentries(&addrq);
1735	}
1736	if (nadd != NULL)
1737		*nadd = xadd;
1738	if (naddr != NULL)
1739		*naddr = xaddr;
1740	return (0);
1741_bad:
1742	pfr_destroy_ktable(shadow, 0);
1743	pfr_destroy_ktables(&tableq, 0);
1744	pfr_destroy_kentries(&addrq);
1745	return (rv);
1746}
1747
1748int
1749pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1750    int *nchange, int flags)
1751{
1752	struct pfr_ktable	*p;
1753	struct pfr_ktableworkq	 workq;
1754	struct pf_ruleset	*rs;
1755	int			 s, xadd = 0, xchange = 0;
1756#ifdef __FreeBSD__
1757	/*
1758	 * XXX Is it OK under LP64 environments?
1759	 */
1760	long			 tzero = (long)time_second;
1761#else
1762	long			 tzero = time.tv_sec;
1763#endif
1764
1765	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1766	rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1767	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1768		return (EBUSY);
1769
1770	SLIST_INIT(&workq);
1771	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1772		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1773		    pfr_skip_table(trs, p, 0))
1774			continue;
1775		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1776		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1777			xchange++;
1778		else
1779			xadd++;
1780	}
1781
1782	if (!(flags & PFR_FLAG_DUMMY)) {
1783		if (flags & PFR_FLAG_ATOMIC)
1784			s = splsoftnet();
1785		SLIST_FOREACH(p, &workq, pfrkt_workq)
1786			pfr_commit_ktable(p, tzero);
1787		if (flags & PFR_FLAG_ATOMIC)
1788			splx(s);
1789		rs->topen = 0;
1790		pf_remove_if_empty_ruleset(rs);
1791	}
1792	if (nadd != NULL)
1793		*nadd = xadd;
1794	if (nchange != NULL)
1795		*nchange = xchange;
1796
1797	return (0);
1798}
1799
1800void
1801pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1802{
1803	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1804	int			 nflags;
1805
1806	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1807		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1808			pfr_clstats_ktable(kt, tzero, 1);
1809	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1810		/* kt might contain addresses */
1811		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1812		struct pfr_kentry	*p, *q, *next;
1813		struct pfr_addr		 ad;
1814
1815		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1816		pfr_mark_addrs(kt);
1817		SLIST_INIT(&addq);
1818		SLIST_INIT(&changeq);
1819		SLIST_INIT(&delq);
1820		SLIST_INIT(&garbageq);
1821		pfr_clean_node_mask(shadow, &addrq);
1822		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1823			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1824			pfr_copyout_addr(&ad, p);
1825			q = pfr_lookup_addr(kt, &ad, 1);
1826			if (q != NULL) {
1827				if (q->pfrke_not != p->pfrke_not)
1828					SLIST_INSERT_HEAD(&changeq, q,
1829					    pfrke_workq);
1830				q->pfrke_mark = 1;
1831				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1832			} else {
1833				p->pfrke_tzero = tzero;
1834				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1835			}
1836		}
1837		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1838		pfr_insert_kentries(kt, &addq, tzero);
1839		pfr_remove_kentries(kt, &delq);
1840		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1841		pfr_destroy_kentries(&garbageq);
1842	} else {
1843		/* kt cannot contain addresses */
1844		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1845		    shadow->pfrkt_ip4);
1846		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1847		    shadow->pfrkt_ip6);
1848		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1849		pfr_clstats_ktable(kt, tzero, 1);
1850	}
1851	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1852	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1853		& ~PFR_TFLAG_INACTIVE;
1854	pfr_destroy_ktable(shadow, 0);
1855	kt->pfrkt_shadow = NULL;
1856	pfr_setflags_ktable(kt, nflags);
1857}
1858
1859int
1860pfr_validate_table(struct pfr_table *tbl, int allowedflags)
1861{
1862	int i;
1863
1864	if (!tbl->pfrt_name[0])
1865		return (-1);
1866	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1867		return (-1);
1868	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1869		if (tbl->pfrt_name[i])
1870			return (-1);
1871	if (tbl->pfrt_flags & ~allowedflags)
1872		return (-1);
1873	return (0);
1874}
1875
1876int
1877pfr_table_count(struct pfr_table *filter, int flags)
1878{
1879	struct pf_ruleset *rs;
1880	struct pf_anchor *ac;
1881
1882	if (flags & PFR_FLAG_ALLRSETS)
1883		return (pfr_ktable_cnt);
1884	if (filter->pfrt_ruleset[0]) {
1885		rs = pf_find_ruleset(filter->pfrt_anchor,
1886		    filter->pfrt_ruleset);
1887		return ((rs != NULL) ? rs->tables : -1);
1888	}
1889	if (filter->pfrt_anchor[0]) {
1890		ac = pf_find_anchor(filter->pfrt_anchor);
1891		return ((ac != NULL) ? ac->tables : -1);
1892	}
1893	return (pf_main_ruleset.tables);
1894}
1895
1896int
1897pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1898{
1899	if (flags & PFR_FLAG_ALLRSETS)
1900		return (0);
1901	if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1902	    PF_ANCHOR_NAME_SIZE))
1903		return (1);
1904	if (!filter->pfrt_ruleset[0])
1905		return (0);
1906	if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1907	    PF_RULESET_NAME_SIZE))
1908		return (1);
1909	return (0);
1910}
1911
1912void
1913pfr_insert_ktables(struct pfr_ktableworkq *workq)
1914{
1915	struct pfr_ktable	*p;
1916
1917	SLIST_FOREACH(p, workq, pfrkt_workq)
1918		pfr_insert_ktable(p);
1919}
1920
1921void
1922pfr_insert_ktable(struct pfr_ktable *kt)
1923{
1924	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1925	pfr_ktable_cnt++;
1926	if (kt->pfrkt_root != NULL)
1927		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1928			pfr_setflags_ktable(kt->pfrkt_root,
1929			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1930}
1931
1932void
1933pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1934{
1935	struct pfr_ktable	*p;
1936
1937	SLIST_FOREACH(p, workq, pfrkt_workq)
1938		pfr_setflags_ktable(p, p->pfrkt_nflags);
1939}
1940
1941void
1942pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1943{
1944	struct pfr_kentryworkq	addrq;
1945
1946	if (!(newf & PFR_TFLAG_REFERENCED) &&
1947	    !(newf & PFR_TFLAG_PERSIST))
1948		newf &= ~PFR_TFLAG_ACTIVE;
1949	if (!(newf & PFR_TFLAG_ACTIVE))
1950		newf &= ~PFR_TFLAG_USRMASK;
1951	if (!(newf & PFR_TFLAG_SETMASK)) {
1952		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1953		if (kt->pfrkt_root != NULL)
1954			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1955				pfr_setflags_ktable(kt->pfrkt_root,
1956				    kt->pfrkt_root->pfrkt_flags &
1957					~PFR_TFLAG_REFDANCHOR);
1958		pfr_destroy_ktable(kt, 1);
1959		pfr_ktable_cnt--;
1960		return;
1961	}
1962	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1963		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1964		pfr_remove_kentries(kt, &addrq);
1965	}
1966	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1967		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1968		kt->pfrkt_shadow = NULL;
1969	}
1970	kt->pfrkt_flags = newf;
1971}
1972
1973void
1974pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1975{
1976	struct pfr_ktable	*p;
1977
1978	SLIST_FOREACH(p, workq, pfrkt_workq)
1979		pfr_clstats_ktable(p, tzero, recurse);
1980}
1981
1982void
1983pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1984{
1985	struct pfr_kentryworkq	 addrq;
1986	int			 s;
1987
1988	if (recurse) {
1989		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1990		pfr_clstats_kentries(&addrq, tzero, 0);
1991	}
1992	s = splsoftnet();
1993	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1994	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1995	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1996	splx(s);
1997	kt->pfrkt_tzero = tzero;
1998}
1999
2000struct pfr_ktable *
2001pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
2002{
2003	struct pfr_ktable	*kt;
2004	struct pf_ruleset	*rs;
2005
2006	kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
2007	if (kt == NULL)
2008		return (NULL);
2009	bzero(kt, sizeof(*kt));
2010	kt->pfrkt_t = *tbl;
2011
2012	if (attachruleset) {
2013		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
2014		    tbl->pfrt_ruleset);
2015		if (!rs) {
2016			pfr_destroy_ktable(kt, 0);
2017			return (NULL);
2018		}
2019		kt->pfrkt_rs = rs;
2020		rs->tables++;
2021		if (rs->anchor != NULL)
2022			rs->anchor->tables++;
2023	}
2024
2025	if (!rn_inithead((void **)&kt->pfrkt_ip4,
2026	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
2027	    !rn_inithead((void **)&kt->pfrkt_ip6,
2028	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2029		pfr_destroy_ktable(kt, 0);
2030		return (NULL);
2031	}
2032	kt->pfrkt_tzero = tzero;
2033
2034	return (kt);
2035}
2036
2037void
2038pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2039{
2040	struct pfr_ktable	*p, *q;
2041
2042	for (p = SLIST_FIRST(workq); p; p = q) {
2043		q = SLIST_NEXT(p, pfrkt_workq);
2044		pfr_destroy_ktable(p, flushaddr);
2045	}
2046}
2047
2048void
2049pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2050{
2051	struct pfr_kentryworkq	 addrq;
2052
2053	if (flushaddr) {
2054		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2055		pfr_clean_node_mask(kt, &addrq);
2056		pfr_destroy_kentries(&addrq);
2057	}
2058#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
2059	if (kt->pfrkt_ip4 != NULL) {
2060		RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
2061		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2062	}
2063	if (kt->pfrkt_ip6 != NULL) {
2064		RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
2065		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2066	}
2067#else
2068	if (kt->pfrkt_ip4 != NULL)
2069		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2070	if (kt->pfrkt_ip6 != NULL)
2071		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2072#endif
2073	if (kt->pfrkt_shadow != NULL)
2074		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2075	if (kt->pfrkt_rs != NULL) {
2076		kt->pfrkt_rs->tables--;
2077		if (kt->pfrkt_rs->anchor != NULL)
2078			kt->pfrkt_rs->anchor->tables--;
2079		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2080	}
2081	pool_put(&pfr_ktable_pl, kt);
2082}
2083
2084int
2085pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2086{
2087	int d;
2088
2089	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2090		return (d);
2091	if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
2092	    PF_ANCHOR_NAME_SIZE)))
2093		return (d);
2094	return strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
2095	    PF_RULESET_NAME_SIZE);
2096}
2097
2098struct pfr_ktable *
2099pfr_lookup_table(struct pfr_table *tbl)
2100{
2101	/* struct pfr_ktable start like a struct pfr_table */
2102	return RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
2103}
2104
2105int
2106pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2107{
2108	struct pfr_kentry	*ke = NULL;
2109	int			 match;
2110
2111	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2112		kt = kt->pfrkt_root;
2113	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2114		return 0;
2115
2116	switch (af) {
2117	case AF_INET:
2118		pfr_sin.sin_addr.s_addr = a->addr32[0];
2119		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2120		if (ke && KENTRY_RNF_ROOT(ke))
2121			ke = NULL;
2122		break;
2123	case AF_INET6:
2124		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2125		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2126		if (ke && KENTRY_RNF_ROOT(ke))
2127			ke = NULL;
2128		break;
2129	}
2130	match = (ke && !ke->pfrke_not);
2131	if (match)
2132		kt->pfrkt_match++;
2133	else
2134		kt->pfrkt_nomatch++;
2135	return (match);
2136}
2137
2138void
2139pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2140    u_int64_t len, int dir_out, int op_pass, int notrule)
2141{
2142	struct pfr_kentry	*ke = NULL;
2143
2144	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2145		kt = kt->pfrkt_root;
2146	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2147		return;
2148
2149	switch (af) {
2150	case AF_INET:
2151		pfr_sin.sin_addr.s_addr = a->addr32[0];
2152		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2153		if (ke && KENTRY_RNF_ROOT(ke))
2154			ke = NULL;
2155		break;
2156	case AF_INET6:
2157		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2158		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2159		if (ke && KENTRY_RNF_ROOT(ke))
2160			ke = NULL;
2161		break;
2162	}
2163	if ((ke == NULL || ke->pfrke_not) != notrule) {
2164		if (op_pass != PFR_OP_PASS)
2165			printf("pfr_update_stats: assertion failed.\n");
2166		op_pass = PFR_OP_XPASS;
2167	}
2168	kt->pfrkt_packets[dir_out][op_pass]++;
2169	kt->pfrkt_bytes[dir_out][op_pass] += len;
2170	if (ke != NULL && op_pass != PFR_OP_XPASS) {
2171		ke->pfrke_packets[dir_out][op_pass]++;
2172		ke->pfrke_bytes[dir_out][op_pass] += len;
2173	}
2174}
2175
2176struct pfr_ktable *
2177pfr_attach_table(struct pf_ruleset *rs, char *name)
2178{
2179	struct pfr_ktable	*kt, *rt;
2180	struct pfr_table	 tbl;
2181	struct pf_anchor	*ac = rs->anchor;
2182
2183	bzero(&tbl, sizeof(tbl));
2184	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2185	if (ac != NULL) {
2186		strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
2187		strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
2188	}
2189	kt = pfr_lookup_table(&tbl);
2190	if (kt == NULL) {
2191#ifdef __FreeBSD__
2192		/*
2193		 * XXX Is it OK under LP64 environments?
2194		 */
2195		kt = pfr_create_ktable(&tbl, (long)time_second, 1);
2196#else
2197		kt = pfr_create_ktable(&tbl, time.tv_sec, 1);
2198#endif
2199		if (kt == NULL)
2200			return (NULL);
2201		if (ac != NULL) {
2202			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2203			bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
2204			rt = pfr_lookup_table(&tbl);
2205			if (rt == NULL) {
2206				rt = pfr_create_ktable(&tbl, 0, 1);
2207				if (rt == NULL) {
2208					pfr_destroy_ktable(kt, 0);
2209					return (NULL);
2210				}
2211				pfr_insert_ktable(rt);
2212			}
2213			kt->pfrkt_root = rt;
2214		}
2215		pfr_insert_ktable(kt);
2216	}
2217	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2218		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2219	return kt;
2220}
2221
2222void
2223pfr_detach_table(struct pfr_ktable *kt)
2224{
2225	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2226		printf("pfr_detach_table: refcount = %d.\n",
2227		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2228	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2229		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2230}
2231
2232
2233int
2234pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2235    struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2236{
2237	struct pfr_kentry	*ke, *ke2;
2238	struct pf_addr		*addr;
2239	union sockaddr_union	 mask;
2240	int			 idx = -1, use_counter = 0;
2241
2242	addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr :
2243	    (struct pf_addr *)&pfr_sin6.sin6_addr;
2244	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2245		kt = kt->pfrkt_root;
2246	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2247		return (-1);
2248
2249	if (pidx != NULL)
2250		idx = *pidx;
2251	if (counter != NULL && idx >= 0)
2252		use_counter = 1;
2253	if (idx < 0)
2254		idx = 0;
2255
2256_next_block:
2257	ke = pfr_kentry_byidx(kt, idx, af);
2258	if (ke == NULL)
2259		return (1);
2260	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2261	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2262	*rmask = SUNION2PF(&pfr_mask, af);
2263
2264	if (use_counter) {
2265		/* is supplied address within block? */
2266		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2267			/* no, go to next block in table */
2268			idx++;
2269			use_counter = 0;
2270			goto _next_block;
2271		}
2272		PF_ACPY(addr, counter, af);
2273	} else {
2274		/* use first address of block */
2275		PF_ACPY(addr, *raddr, af);
2276	}
2277
2278	if (!KENTRY_NETWORK(ke)) {
2279		/* this is a single IP address - no possible nested block */
2280		PF_ACPY(counter, addr, af);
2281		*pidx = idx;
2282		return (0);
2283	}
2284	for (;;) {
2285		/* we don't want to use a nested block */
2286                ke2 = (struct pfr_kentry *)(af == AF_INET ?
2287		    rn_match(&pfr_sin, kt->pfrkt_ip4) :
2288		    rn_match(&pfr_sin6, kt->pfrkt_ip6));
2289		/* no need to check KENTRY_RNF_ROOT() here */
2290		if (ke2 == ke) {
2291			/* lookup return the same block - perfect */
2292			PF_ACPY(counter, addr, af);
2293			*pidx = idx;
2294			return (0);
2295		}
2296
2297		/* we need to increase the counter past the nested block */
2298		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2299		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2300		PF_AINC(addr, af);
2301		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2302			/* ok, we reached the end of our main block */
2303			/* go to next block in table */
2304			idx++;
2305			use_counter = 0;
2306			goto _next_block;
2307		}
2308	}
2309}
2310
2311struct pfr_kentry *
2312pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2313{
2314	struct pfr_walktree	w;
2315
2316        bzero(&w, sizeof(w));
2317        w.pfrw_op = PFRW_POOL_GET;
2318        w.pfrw_cnt = idx;
2319
2320	switch(af) {
2321	case AF_INET:
2322#ifdef __FreeBSD__
2323		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2324#else
2325		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2326#endif
2327		return w.pfrw_kentry;
2328	case AF_INET6:
2329#ifdef __FreeBSD__
2330		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2331#else
2332		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2333#endif
2334		return w.pfrw_kentry;
2335	default:
2336		return NULL;
2337	}
2338}
2339