1/*	$OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $	*/
2
3/*
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 *    - Redistributions of source code must retain the above copyright
12 *      notice, this list of conditions and the following disclaimer.
13 *    - Redistributions in binary form must reproduce the above
14 *      copyright notice, this list of conditions and the following
15 *      disclaimer in the documentation and/or other materials provided
16 *      with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33#ifdef __FreeBSD__
34#include "opt_inet.h"
35#include "opt_inet6.h"
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD$");
39#endif
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/socket.h>
44#include <sys/mbuf.h>
45#include <sys/kernel.h>
46#ifdef __FreeBSD__
47#include <sys/malloc.h>
48#else
49#include <sys/pool.h>
50#endif
51
52#include <net/if.h>
53#include <net/route.h>
54#include <netinet/in.h>
55#ifndef __FreeBSD__
56#include <netinet/ip_ipsp.h>
57#endif
58#include <net/pfvar.h>
59
60#define	ACCEPT_FLAGS(flags, oklist)		\
61	do {					\
62		if ((flags & ~(oklist)) &	\
63		    PFR_FLAG_ALLMASK)		\
64			return (EINVAL);	\
65	} while (0)
66
67#ifdef __FreeBSD__
68static inline int
69_copyin(const void *uaddr, void *kaddr, size_t len)
70{
71	int r;
72
73	PF_UNLOCK();
74	r = copyin(uaddr, kaddr, len);
75	PF_LOCK();
76
77	return (r);
78}
79
80static inline int
81_copyout(const void *uaddr, void *kaddr, size_t len)
82{
83	int r;
84
85	PF_UNLOCK();
86	r = copyout(uaddr, kaddr, len);
87	PF_LOCK();
88
89	return (r);
90}
91
92#define	COPYIN(from, to, size, flags)		\
93	((flags & PFR_FLAG_USERIOCTL) ?		\
94	_copyin((from), (to), (size)) :		\
95	(bcopy((from), (to), (size)), 0))
96
97#define	COPYOUT(from, to, size, flags)		\
98	((flags & PFR_FLAG_USERIOCTL) ?		\
99	_copyout((from), (to), (size)) :	\
100	(bcopy((from), (to), (size)), 0))
101
102#else
103#define	COPYIN(from, to, size, flags)		\
104	((flags & PFR_FLAG_USERIOCTL) ?		\
105	copyin((from), (to), (size)) :		\
106	(bcopy((from), (to), (size)), 0))
107
108#define	COPYOUT(from, to, size, flags)		\
109	((flags & PFR_FLAG_USERIOCTL) ?		\
110	copyout((from), (to), (size)) :		\
111	(bcopy((from), (to), (size)), 0))
112#endif
113
114#define	FILLIN_SIN(sin, addr)			\
115	do {					\
116		(sin).sin_len = sizeof(sin);	\
117		(sin).sin_family = AF_INET;	\
118		(sin).sin_addr = (addr);	\
119	} while (0)
120
121#define	FILLIN_SIN6(sin6, addr)			\
122	do {					\
123		(sin6).sin6_len = sizeof(sin6);	\
124		(sin6).sin6_family = AF_INET6;	\
125		(sin6).sin6_addr = (addr);	\
126	} while (0)
127
128#define	SWAP(type, a1, a2)			\
129	do {					\
130		type tmp = a1;			\
131		a1 = a2;			\
132		a2 = tmp;			\
133	} while (0)
134
135#define	SUNION2PF(su, af) (((af)==AF_INET) ?	\
136    (struct pf_addr *)&(su)->sin.sin_addr :	\
137    (struct pf_addr *)&(su)->sin6.sin6_addr)
138
139#define	AF_BITS(af)		(((af)==AF_INET)?32:128)
140#define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
141#define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
142#define	KENTRY_RNF_ROOT(ke) \
143		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
144
145#define	NO_ADDRESSES		(-1)
146#define	ENQUEUE_UNMARKED_ONLY	(1)
147#define	INVERT_NEG_FLAG		(1)
148
149struct pfr_walktree {
150	enum pfrw_op {
151		PFRW_MARK,
152		PFRW_SWEEP,
153		PFRW_ENQUEUE,
154		PFRW_GET_ADDRS,
155		PFRW_GET_ASTATS,
156		PFRW_POOL_GET,
157		PFRW_DYNADDR_UPDATE
158	}	 pfrw_op;
159	union {
160		struct pfr_addr		*pfrw1_addr;
161		struct pfr_astats	*pfrw1_astats;
162		struct pfr_kentryworkq	*pfrw1_workq;
163		struct pfr_kentry	*pfrw1_kentry;
164		struct pfi_dynaddr	*pfrw1_dyn;
165	}	 pfrw_1;
166	int	 pfrw_free;
167	int	 pfrw_flags;
168};
169#define	pfrw_addr	pfrw_1.pfrw1_addr
170#define	pfrw_astats	pfrw_1.pfrw1_astats
171#define	pfrw_workq	pfrw_1.pfrw1_workq
172#define	pfrw_kentry	pfrw_1.pfrw1_kentry
173#define	pfrw_dyn	pfrw_1.pfrw1_dyn
174#define	pfrw_cnt	pfrw_free
175
176#define	senderr(e)	do { rv = (e); goto _bad; } while (0)
177
178#ifdef __FreeBSD__
179VNET_DEFINE(uma_zone_t,			pfr_ktable_pl);
180VNET_DEFINE(uma_zone_t,			pfr_kentry_pl);
181VNET_DEFINE(uma_zone_t,			pfr_kcounters_pl);
182VNET_DEFINE(struct sockaddr_in,		pfr_sin);
183#define	V_pfr_sin			VNET(pfr_sin)
184VNET_DEFINE(struct sockaddr_in6,	pfr_sin6);
185#define	V_pfr_sin6			VNET(pfr_sin6)
186VNET_DEFINE(union sockaddr_union,	pfr_mask);
187#define	V_pfr_mask			VNET(pfr_mask)
188VNET_DEFINE(struct pf_addr,		pfr_ffaddr);
189#define	V_pfr_ffaddr			VNET(pfr_ffaddr)
190#else
191struct pool		 pfr_ktable_pl;
192struct pool		 pfr_kentry_pl;
193struct pool		 pfr_kcounters_pl;
194struct sockaddr_in	 pfr_sin;
195struct sockaddr_in6	 pfr_sin6;
196union sockaddr_union	 pfr_mask;
197struct pf_addr		 pfr_ffaddr;
198#endif
199
200void			 pfr_copyout_addr(struct pfr_addr *,
201			    struct pfr_kentry *ke);
202int			 pfr_validate_addr(struct pfr_addr *);
203void			 pfr_enqueue_addrs(struct pfr_ktable *,
204			    struct pfr_kentryworkq *, int *, int);
205void			 pfr_mark_addrs(struct pfr_ktable *);
206struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
207			    struct pfr_addr *, int);
208struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *, int);
209void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
210void			 pfr_destroy_kentry(struct pfr_kentry *);
211void			 pfr_insert_kentries(struct pfr_ktable *,
212			    struct pfr_kentryworkq *, long);
213void			 pfr_remove_kentries(struct pfr_ktable *,
214			    struct pfr_kentryworkq *);
215void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
216			    int);
217void			 pfr_reset_feedback(struct pfr_addr *, int, int);
218void			 pfr_prepare_network(union sockaddr_union *, int, int);
219int			 pfr_route_kentry(struct pfr_ktable *,
220			    struct pfr_kentry *);
221int			 pfr_unroute_kentry(struct pfr_ktable *,
222			    struct pfr_kentry *);
223int			 pfr_walktree(struct radix_node *, void *);
224int			 pfr_validate_table(struct pfr_table *, int, int);
225int			 pfr_fix_anchor(char *);
226void			 pfr_commit_ktable(struct pfr_ktable *, long);
227void			 pfr_insert_ktables(struct pfr_ktableworkq *);
228void			 pfr_insert_ktable(struct pfr_ktable *);
229void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
230void			 pfr_setflags_ktable(struct pfr_ktable *, int);
231void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
232			    int);
233void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
234struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int, int);
235void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
236void			 pfr_destroy_ktable(struct pfr_ktable *, int);
237int			 pfr_ktable_compare(struct pfr_ktable *,
238			    struct pfr_ktable *);
239struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
240void			 pfr_clean_node_mask(struct pfr_ktable *,
241			    struct pfr_kentryworkq *);
242int			 pfr_table_count(struct pfr_table *, int);
243int			 pfr_skip_table(struct pfr_table *,
244			    struct pfr_ktable *, int);
245struct pfr_kentry	*pfr_kentry_byidx(struct pfr_ktable *, int, int);
246
247RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
248RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
249
250struct pfr_ktablehead	 pfr_ktables;
251struct pfr_table	 pfr_nulltable;
252int			 pfr_ktable_cnt;
253
254void
255pfr_initialize(void)
256{
257#ifndef __FreeBSD__
258	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
259	    "pfrktable", NULL);
260	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
261	    "pfrkentry", NULL);
262	pool_init(&pfr_kcounters_pl, sizeof(struct pfr_kcounters), 0, 0, 0,
263	    "pfrkcounters", NULL);
264
265	pfr_sin.sin_len = sizeof(pfr_sin);
266	pfr_sin.sin_family = AF_INET;
267	pfr_sin6.sin6_len = sizeof(pfr_sin6);
268	pfr_sin6.sin6_family = AF_INET6;
269
270	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
271#else
272	V_pfr_sin.sin_len = sizeof(V_pfr_sin);
273	V_pfr_sin.sin_family = AF_INET;
274	V_pfr_sin6.sin6_len = sizeof(V_pfr_sin6);
275	V_pfr_sin6.sin6_family = AF_INET6;
276
277	memset(&V_pfr_ffaddr, 0xff, sizeof(V_pfr_ffaddr));
278#endif
279}
280
281int
282pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
283{
284	struct pfr_ktable	*kt;
285	struct pfr_kentryworkq	 workq;
286	int			 s;
287
288	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
289	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
290		return (EINVAL);
291	kt = pfr_lookup_table(tbl);
292	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
293		return (ESRCH);
294	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
295		return (EPERM);
296	pfr_enqueue_addrs(kt, &workq, ndel, 0);
297
298	if (!(flags & PFR_FLAG_DUMMY)) {
299		if (flags & PFR_FLAG_ATOMIC)
300			s = splsoftnet();
301		pfr_remove_kentries(kt, &workq);
302		if (flags & PFR_FLAG_ATOMIC)
303			splx(s);
304		if (kt->pfrkt_cnt) {
305			printf("pfr_clr_addrs: corruption detected (%d).\n",
306			    kt->pfrkt_cnt);
307			kt->pfrkt_cnt = 0;
308		}
309	}
310	return (0);
311}
312
313int
314pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
315    int *nadd, int flags)
316{
317	struct pfr_ktable	*kt, *tmpkt;
318	struct pfr_kentryworkq	 workq;
319	struct pfr_kentry	*p, *q;
320	struct pfr_addr		 ad;
321	int			 i, rv, s, xadd = 0;
322	long			 tzero = time_second;
323
324	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
325	    PFR_FLAG_FEEDBACK);
326	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
327		return (EINVAL);
328	kt = pfr_lookup_table(tbl);
329	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
330		return (ESRCH);
331	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
332		return (EPERM);
333	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
334	    !(flags & PFR_FLAG_USERIOCTL));
335	if (tmpkt == NULL)
336		return (ENOMEM);
337	SLIST_INIT(&workq);
338	for (i = 0; i < size; i++) {
339		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
340			senderr(EFAULT);
341		if (pfr_validate_addr(&ad))
342			senderr(EINVAL);
343		p = pfr_lookup_addr(kt, &ad, 1);
344		q = pfr_lookup_addr(tmpkt, &ad, 1);
345		if (flags & PFR_FLAG_FEEDBACK) {
346			if (q != NULL)
347				ad.pfra_fback = PFR_FB_DUPLICATE;
348			else if (p == NULL)
349				ad.pfra_fback = PFR_FB_ADDED;
350			else if (p->pfrke_not != ad.pfra_not)
351				ad.pfra_fback = PFR_FB_CONFLICT;
352			else
353				ad.pfra_fback = PFR_FB_NONE;
354		}
355		if (p == NULL && q == NULL) {
356			p = pfr_create_kentry(&ad,
357			    !(flags & PFR_FLAG_USERIOCTL));
358			if (p == NULL)
359				senderr(ENOMEM);
360			if (pfr_route_kentry(tmpkt, p)) {
361				pfr_destroy_kentry(p);
362				ad.pfra_fback = PFR_FB_NONE;
363			} else {
364				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
365				xadd++;
366			}
367		}
368		if (flags & PFR_FLAG_FEEDBACK)
369			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
370				senderr(EFAULT);
371	}
372	pfr_clean_node_mask(tmpkt, &workq);
373	if (!(flags & PFR_FLAG_DUMMY)) {
374		if (flags & PFR_FLAG_ATOMIC)
375			s = splsoftnet();
376		pfr_insert_kentries(kt, &workq, tzero);
377		if (flags & PFR_FLAG_ATOMIC)
378			splx(s);
379	} else
380		pfr_destroy_kentries(&workq);
381	if (nadd != NULL)
382		*nadd = xadd;
383	pfr_destroy_ktable(tmpkt, 0);
384	return (0);
385_bad:
386	pfr_clean_node_mask(tmpkt, &workq);
387	pfr_destroy_kentries(&workq);
388	if (flags & PFR_FLAG_FEEDBACK)
389		pfr_reset_feedback(addr, size, flags);
390	pfr_destroy_ktable(tmpkt, 0);
391	return (rv);
392}
393
394int
395pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
396    int *ndel, int flags)
397{
398	struct pfr_ktable	*kt;
399	struct pfr_kentryworkq	 workq;
400	struct pfr_kentry	*p;
401	struct pfr_addr		 ad;
402	int			 i, rv, s, xdel = 0, log = 1;
403
404	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
405	    PFR_FLAG_FEEDBACK);
406	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
407		return (EINVAL);
408	kt = pfr_lookup_table(tbl);
409	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
410		return (ESRCH);
411	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
412		return (EPERM);
413	/*
414	 * there are two algorithms to choose from here.
415	 * with:
416	 *   n: number of addresses to delete
417	 *   N: number of addresses in the table
418	 *
419	 * one is O(N) and is better for large 'n'
420	 * one is O(n*LOG(N)) and is better for small 'n'
421	 *
422	 * following code try to decide which one is best.
423	 */
424	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
425		log++;
426	if (size > kt->pfrkt_cnt/log) {
427		/* full table scan */
428		pfr_mark_addrs(kt);
429	} else {
430		/* iterate over addresses to delete */
431		for (i = 0; i < size; i++) {
432			if (COPYIN(addr+i, &ad, sizeof(ad), flags))
433				return (EFAULT);
434			if (pfr_validate_addr(&ad))
435				return (EINVAL);
436			p = pfr_lookup_addr(kt, &ad, 1);
437			if (p != NULL)
438				p->pfrke_mark = 0;
439		}
440	}
441	SLIST_INIT(&workq);
442	for (i = 0; i < size; i++) {
443		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
444			senderr(EFAULT);
445		if (pfr_validate_addr(&ad))
446			senderr(EINVAL);
447		p = pfr_lookup_addr(kt, &ad, 1);
448		if (flags & PFR_FLAG_FEEDBACK) {
449			if (p == NULL)
450				ad.pfra_fback = PFR_FB_NONE;
451			else if (p->pfrke_not != ad.pfra_not)
452				ad.pfra_fback = PFR_FB_CONFLICT;
453			else if (p->pfrke_mark)
454				ad.pfra_fback = PFR_FB_DUPLICATE;
455			else
456				ad.pfra_fback = PFR_FB_DELETED;
457		}
458		if (p != NULL && p->pfrke_not == ad.pfra_not &&
459		    !p->pfrke_mark) {
460			p->pfrke_mark = 1;
461			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
462			xdel++;
463		}
464		if (flags & PFR_FLAG_FEEDBACK)
465			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
466				senderr(EFAULT);
467	}
468	if (!(flags & PFR_FLAG_DUMMY)) {
469		if (flags & PFR_FLAG_ATOMIC)
470			s = splsoftnet();
471		pfr_remove_kentries(kt, &workq);
472		if (flags & PFR_FLAG_ATOMIC)
473			splx(s);
474	}
475	if (ndel != NULL)
476		*ndel = xdel;
477	return (0);
478_bad:
479	if (flags & PFR_FLAG_FEEDBACK)
480		pfr_reset_feedback(addr, size, flags);
481	return (rv);
482}
483
484int
485pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
486    int *size2, int *nadd, int *ndel, int *nchange, int flags,
487    u_int32_t ignore_pfrt_flags)
488{
489	struct pfr_ktable	*kt, *tmpkt;
490	struct pfr_kentryworkq	 addq, delq, changeq;
491	struct pfr_kentry	*p, *q;
492	struct pfr_addr		 ad;
493	int			 i, rv, s, xadd = 0, xdel = 0, xchange = 0;
494	long			 tzero = time_second;
495
496	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
497	    PFR_FLAG_FEEDBACK);
498	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
499	    PFR_FLAG_USERIOCTL))
500		return (EINVAL);
501	kt = pfr_lookup_table(tbl);
502	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
503		return (ESRCH);
504	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
505		return (EPERM);
506	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
507	    !(flags & PFR_FLAG_USERIOCTL));
508	if (tmpkt == NULL)
509		return (ENOMEM);
510	pfr_mark_addrs(kt);
511	SLIST_INIT(&addq);
512	SLIST_INIT(&delq);
513	SLIST_INIT(&changeq);
514	for (i = 0; i < size; i++) {
515		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
516			senderr(EFAULT);
517		if (pfr_validate_addr(&ad))
518			senderr(EINVAL);
519		ad.pfra_fback = PFR_FB_NONE;
520		p = pfr_lookup_addr(kt, &ad, 1);
521		if (p != NULL) {
522			if (p->pfrke_mark) {
523				ad.pfra_fback = PFR_FB_DUPLICATE;
524				goto _skip;
525			}
526			p->pfrke_mark = 1;
527			if (p->pfrke_not != ad.pfra_not) {
528				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
529				ad.pfra_fback = PFR_FB_CHANGED;
530				xchange++;
531			}
532		} else {
533			q = pfr_lookup_addr(tmpkt, &ad, 1);
534			if (q != NULL) {
535				ad.pfra_fback = PFR_FB_DUPLICATE;
536				goto _skip;
537			}
538			p = pfr_create_kentry(&ad,
539			    !(flags & PFR_FLAG_USERIOCTL));
540			if (p == NULL)
541				senderr(ENOMEM);
542			if (pfr_route_kentry(tmpkt, p)) {
543				pfr_destroy_kentry(p);
544				ad.pfra_fback = PFR_FB_NONE;
545			} else {
546				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
547				ad.pfra_fback = PFR_FB_ADDED;
548				xadd++;
549			}
550		}
551_skip:
552		if (flags & PFR_FLAG_FEEDBACK)
553			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
554				senderr(EFAULT);
555	}
556	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
557	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
558		if (*size2 < size+xdel) {
559			*size2 = size+xdel;
560			senderr(0);
561		}
562		i = 0;
563		SLIST_FOREACH(p, &delq, pfrke_workq) {
564			pfr_copyout_addr(&ad, p);
565			ad.pfra_fback = PFR_FB_DELETED;
566			if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
567				senderr(EFAULT);
568			i++;
569		}
570	}
571	pfr_clean_node_mask(tmpkt, &addq);
572	if (!(flags & PFR_FLAG_DUMMY)) {
573		if (flags & PFR_FLAG_ATOMIC)
574			s = splsoftnet();
575		pfr_insert_kentries(kt, &addq, tzero);
576		pfr_remove_kentries(kt, &delq);
577		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
578		if (flags & PFR_FLAG_ATOMIC)
579			splx(s);
580	} else
581		pfr_destroy_kentries(&addq);
582	if (nadd != NULL)
583		*nadd = xadd;
584	if (ndel != NULL)
585		*ndel = xdel;
586	if (nchange != NULL)
587		*nchange = xchange;
588	if ((flags & PFR_FLAG_FEEDBACK) && size2)
589		*size2 = size+xdel;
590	pfr_destroy_ktable(tmpkt, 0);
591	return (0);
592_bad:
593	pfr_clean_node_mask(tmpkt, &addq);
594	pfr_destroy_kentries(&addq);
595	if (flags & PFR_FLAG_FEEDBACK)
596		pfr_reset_feedback(addr, size, flags);
597	pfr_destroy_ktable(tmpkt, 0);
598	return (rv);
599}
600
601int
602pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
603	int *nmatch, int flags)
604{
605	struct pfr_ktable	*kt;
606	struct pfr_kentry	*p;
607	struct pfr_addr		 ad;
608	int			 i, xmatch = 0;
609
610	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
611	if (pfr_validate_table(tbl, 0, 0))
612		return (EINVAL);
613	kt = pfr_lookup_table(tbl);
614	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
615		return (ESRCH);
616
617	for (i = 0; i < size; i++) {
618		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
619			return (EFAULT);
620		if (pfr_validate_addr(&ad))
621			return (EINVAL);
622		if (ADDR_NETWORK(&ad))
623			return (EINVAL);
624		p = pfr_lookup_addr(kt, &ad, 0);
625		if (flags & PFR_FLAG_REPLACE)
626			pfr_copyout_addr(&ad, p);
627		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
628		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
629		if (p != NULL && !p->pfrke_not)
630			xmatch++;
631		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
632			return (EFAULT);
633	}
634	if (nmatch != NULL)
635		*nmatch = xmatch;
636	return (0);
637}
638
639int
640pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
641	int flags)
642{
643	struct pfr_ktable	*kt;
644	struct pfr_walktree	 w;
645	int			 rv;
646
647	ACCEPT_FLAGS(flags, 0);
648	if (pfr_validate_table(tbl, 0, 0))
649		return (EINVAL);
650	kt = pfr_lookup_table(tbl);
651	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
652		return (ESRCH);
653	if (kt->pfrkt_cnt > *size) {
654		*size = kt->pfrkt_cnt;
655		return (0);
656	}
657
658	bzero(&w, sizeof(w));
659	w.pfrw_op = PFRW_GET_ADDRS;
660	w.pfrw_addr = addr;
661	w.pfrw_free = kt->pfrkt_cnt;
662	w.pfrw_flags = flags;
663#ifdef __FreeBSD__
664	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
665#else
666	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
667#endif
668	if (!rv)
669#ifdef __FreeBSD__
670		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
671		    &w);
672#else
673		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
674#endif
675	if (rv)
676		return (rv);
677
678	if (w.pfrw_free) {
679		printf("pfr_get_addrs: corruption detected (%d).\n",
680		    w.pfrw_free);
681		return (ENOTTY);
682	}
683	*size = kt->pfrkt_cnt;
684	return (0);
685}
686
687int
688pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
689	int flags)
690{
691	struct pfr_ktable	*kt;
692	struct pfr_walktree	 w;
693	struct pfr_kentryworkq	 workq;
694	int			 rv, s;
695	long			 tzero = time_second;
696
697	/* XXX PFR_FLAG_CLSTATS disabled */
698	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
699	if (pfr_validate_table(tbl, 0, 0))
700		return (EINVAL);
701	kt = pfr_lookup_table(tbl);
702	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
703		return (ESRCH);
704	if (kt->pfrkt_cnt > *size) {
705		*size = kt->pfrkt_cnt;
706		return (0);
707	}
708
709	bzero(&w, sizeof(w));
710	w.pfrw_op = PFRW_GET_ASTATS;
711	w.pfrw_astats = addr;
712	w.pfrw_free = kt->pfrkt_cnt;
713	w.pfrw_flags = flags;
714	if (flags & PFR_FLAG_ATOMIC)
715		s = splsoftnet();
716#ifdef __FreeBSD__
717	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
718#else
719	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
720#endif
721	if (!rv)
722#ifdef __FreeBSD__
723		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
724		    &w);
725#else
726		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
727#endif
728	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
729		pfr_enqueue_addrs(kt, &workq, NULL, 0);
730		pfr_clstats_kentries(&workq, tzero, 0);
731	}
732	if (flags & PFR_FLAG_ATOMIC)
733		splx(s);
734	if (rv)
735		return (rv);
736
737	if (w.pfrw_free) {
738		printf("pfr_get_astats: corruption detected (%d).\n",
739		    w.pfrw_free);
740		return (ENOTTY);
741	}
742	*size = kt->pfrkt_cnt;
743	return (0);
744}
745
746int
747pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
748    int *nzero, int flags)
749{
750	struct pfr_ktable	*kt;
751	struct pfr_kentryworkq	 workq;
752	struct pfr_kentry	*p;
753	struct pfr_addr		 ad;
754	int			 i, rv, s, xzero = 0;
755
756	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
757	    PFR_FLAG_FEEDBACK);
758	if (pfr_validate_table(tbl, 0, 0))
759		return (EINVAL);
760	kt = pfr_lookup_table(tbl);
761	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
762		return (ESRCH);
763	SLIST_INIT(&workq);
764	for (i = 0; i < size; i++) {
765		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
766			senderr(EFAULT);
767		if (pfr_validate_addr(&ad))
768			senderr(EINVAL);
769		p = pfr_lookup_addr(kt, &ad, 1);
770		if (flags & PFR_FLAG_FEEDBACK) {
771			ad.pfra_fback = (p != NULL) ?
772			    PFR_FB_CLEARED : PFR_FB_NONE;
773			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
774				senderr(EFAULT);
775		}
776		if (p != NULL) {
777			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
778			xzero++;
779		}
780	}
781
782	if (!(flags & PFR_FLAG_DUMMY)) {
783		if (flags & PFR_FLAG_ATOMIC)
784			s = splsoftnet();
785		pfr_clstats_kentries(&workq, 0, 0);
786		if (flags & PFR_FLAG_ATOMIC)
787			splx(s);
788	}
789	if (nzero != NULL)
790		*nzero = xzero;
791	return (0);
792_bad:
793	if (flags & PFR_FLAG_FEEDBACK)
794		pfr_reset_feedback(addr, size, flags);
795	return (rv);
796}
797
798int
799pfr_validate_addr(struct pfr_addr *ad)
800{
801	int i;
802
803	switch (ad->pfra_af) {
804#ifdef INET
805	case AF_INET:
806		if (ad->pfra_net > 32)
807			return (-1);
808		break;
809#endif /* INET */
810#ifdef INET6
811	case AF_INET6:
812		if (ad->pfra_net > 128)
813			return (-1);
814		break;
815#endif /* INET6 */
816	default:
817		return (-1);
818	}
819	if (ad->pfra_net < 128 &&
820		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
821			return (-1);
822	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
823		if (((caddr_t)ad)[i])
824			return (-1);
825	if (ad->pfra_not && ad->pfra_not != 1)
826		return (-1);
827	if (ad->pfra_fback)
828		return (-1);
829	return (0);
830}
831
832void
833pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
834	int *naddr, int sweep)
835{
836	struct pfr_walktree	w;
837
838	SLIST_INIT(workq);
839	bzero(&w, sizeof(w));
840	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
841	w.pfrw_workq = workq;
842	if (kt->pfrkt_ip4 != NULL)
843#ifdef __FreeBSD__
844		if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree,
845		    &w))
846#else
847		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
848#endif
849			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
850	if (kt->pfrkt_ip6 != NULL)
851#ifdef __FreeBSD__
852		if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
853		    &w))
854#else
855		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
856#endif
857			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
858	if (naddr != NULL)
859		*naddr = w.pfrw_cnt;
860}
861
862void
863pfr_mark_addrs(struct pfr_ktable *kt)
864{
865	struct pfr_walktree	w;
866
867	bzero(&w, sizeof(w));
868	w.pfrw_op = PFRW_MARK;
869#ifdef __FreeBSD__
870	if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
871#else
872	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
873#endif
874		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
875#ifdef __FreeBSD__
876	if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
877#else
878	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
879#endif
880		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
881}
882
883
884struct pfr_kentry *
885pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
886{
887	union sockaddr_union	 sa, mask;
888#ifdef __FreeBSD__
889	struct radix_node_head	*head = NULL;
890#else
891	struct radix_node_head	*head;
892#endif
893	struct pfr_kentry	*ke;
894	int			 s;
895
896	bzero(&sa, sizeof(sa));
897	if (ad->pfra_af == AF_INET) {
898		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
899		head = kt->pfrkt_ip4;
900	} else if ( ad->pfra_af == AF_INET6 ) {
901		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
902		head = kt->pfrkt_ip6;
903	}
904	if (ADDR_NETWORK(ad)) {
905		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
906		s = splsoftnet(); /* rn_lookup makes use of globals */
907#ifdef __FreeBSD__
908		PF_LOCK_ASSERT();
909#endif
910		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
911		splx(s);
912		if (ke && KENTRY_RNF_ROOT(ke))
913			ke = NULL;
914	} else {
915		ke = (struct pfr_kentry *)rn_match(&sa, head);
916		if (ke && KENTRY_RNF_ROOT(ke))
917			ke = NULL;
918		if (exact && ke && KENTRY_NETWORK(ke))
919			ke = NULL;
920	}
921	return (ke);
922}
923
924struct pfr_kentry *
925pfr_create_kentry(struct pfr_addr *ad, int intr)
926{
927	struct pfr_kentry	*ke;
928
929#ifdef __FreeBSD__
930	ke =  pool_get(&V_pfr_kentry_pl, PR_NOWAIT | PR_ZERO);
931#else
932	if (intr)
933		ke = pool_get(&pfr_kentry_pl, PR_NOWAIT | PR_ZERO);
934	else
935		ke = pool_get(&pfr_kentry_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
936#endif
937	if (ke == NULL)
938		return (NULL);
939
940	if (ad->pfra_af == AF_INET)
941		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
942	else if (ad->pfra_af == AF_INET6)
943		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
944	ke->pfrke_af = ad->pfra_af;
945	ke->pfrke_net = ad->pfra_net;
946	ke->pfrke_not = ad->pfra_not;
947	return (ke);
948}
949
950void
951pfr_destroy_kentries(struct pfr_kentryworkq *workq)
952{
953	struct pfr_kentry	*p, *q;
954
955	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
956		q = SLIST_NEXT(p, pfrke_workq);
957		pfr_destroy_kentry(p);
958	}
959}
960
961void
962pfr_destroy_kentry(struct pfr_kentry *ke)
963{
964	if (ke->pfrke_counters)
965#ifdef __FreeBSD__
966		pool_put(&V_pfr_kcounters_pl, ke->pfrke_counters);
967	pool_put(&V_pfr_kentry_pl, ke);
968#else
969		pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
970	pool_put(&pfr_kentry_pl, ke);
971#endif
972}
973
974void
975pfr_insert_kentries(struct pfr_ktable *kt,
976    struct pfr_kentryworkq *workq, long tzero)
977{
978	struct pfr_kentry	*p;
979	int			 rv, n = 0;
980
981	SLIST_FOREACH(p, workq, pfrke_workq) {
982		rv = pfr_route_kentry(kt, p);
983		if (rv) {
984			printf("pfr_insert_kentries: cannot route entry "
985			    "(code=%d).\n", rv);
986			break;
987		}
988		p->pfrke_tzero = tzero;
989		n++;
990	}
991	kt->pfrkt_cnt += n;
992}
993
994int
995pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
996{
997	struct pfr_kentry	*p;
998	int			 rv;
999
1000	p = pfr_lookup_addr(kt, ad, 1);
1001	if (p != NULL)
1002		return (0);
1003	p = pfr_create_kentry(ad, 1);
1004	if (p == NULL)
1005		return (EINVAL);
1006
1007	rv = pfr_route_kentry(kt, p);
1008	if (rv)
1009		return (rv);
1010
1011	p->pfrke_tzero = tzero;
1012	kt->pfrkt_cnt++;
1013
1014	return (0);
1015}
1016
1017void
1018pfr_remove_kentries(struct pfr_ktable *kt,
1019    struct pfr_kentryworkq *workq)
1020{
1021	struct pfr_kentry	*p;
1022	int			 n = 0;
1023
1024	SLIST_FOREACH(p, workq, pfrke_workq) {
1025		pfr_unroute_kentry(kt, p);
1026		n++;
1027	}
1028	kt->pfrkt_cnt -= n;
1029	pfr_destroy_kentries(workq);
1030}
1031
1032void
1033pfr_clean_node_mask(struct pfr_ktable *kt,
1034    struct pfr_kentryworkq *workq)
1035{
1036	struct pfr_kentry	*p;
1037
1038	SLIST_FOREACH(p, workq, pfrke_workq)
1039		pfr_unroute_kentry(kt, p);
1040}
1041
1042void
1043pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
1044{
1045	struct pfr_kentry	*p;
1046	int			 s;
1047
1048	SLIST_FOREACH(p, workq, pfrke_workq) {
1049		s = splsoftnet();
1050		if (negchange)
1051			p->pfrke_not = !p->pfrke_not;
1052		if (p->pfrke_counters) {
1053#ifdef __FreeBSD__
1054			pool_put(&V_pfr_kcounters_pl, p->pfrke_counters);
1055#else
1056			pool_put(&pfr_kcounters_pl, p->pfrke_counters);
1057#endif
1058			p->pfrke_counters = NULL;
1059		}
1060		splx(s);
1061		p->pfrke_tzero = tzero;
1062	}
1063}
1064
1065void
1066pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
1067{
1068	struct pfr_addr	ad;
1069	int		i;
1070
1071	for (i = 0; i < size; i++) {
1072		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1073			break;
1074		ad.pfra_fback = PFR_FB_NONE;
1075		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
1076			break;
1077	}
1078}
1079
1080void
1081pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1082{
1083	int	i;
1084
1085	bzero(sa, sizeof(*sa));
1086	if (af == AF_INET) {
1087		sa->sin.sin_len = sizeof(sa->sin);
1088		sa->sin.sin_family = AF_INET;
1089		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
1090	} else if (af == AF_INET6) {
1091		sa->sin6.sin6_len = sizeof(sa->sin6);
1092		sa->sin6.sin6_family = AF_INET6;
1093		for (i = 0; i < 4; i++) {
1094			if (net <= 32) {
1095				sa->sin6.sin6_addr.s6_addr32[i] =
1096				    net ? htonl(-1 << (32-net)) : 0;
1097				break;
1098			}
1099			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1100			net -= 32;
1101		}
1102	}
1103}
1104
1105int
1106pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1107{
1108	union sockaddr_union	 mask;
1109	struct radix_node	*rn;
1110#ifdef __FreeBSD__
1111	struct radix_node_head	*head = NULL;
1112#else
1113	struct radix_node_head	*head;
1114#endif
1115	int			 s;
1116
1117	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1118	if (ke->pfrke_af == AF_INET)
1119		head = kt->pfrkt_ip4;
1120	else if (ke->pfrke_af == AF_INET6)
1121		head = kt->pfrkt_ip6;
1122
1123	s = splsoftnet();
1124#ifdef __FreeBSD__
1125	PF_LOCK_ASSERT();
1126#endif
1127	if (KENTRY_NETWORK(ke)) {
1128		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1129#ifdef __FreeBSD__
1130		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1131#else
1132		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node, 0);
1133#endif
1134	} else
1135#ifdef __FreeBSD__
1136		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1137#else
1138		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node, 0);
1139#endif
1140	splx(s);
1141
1142	return (rn == NULL ? -1 : 0);
1143}
1144
1145int
1146pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1147{
1148	union sockaddr_union	 mask;
1149	struct radix_node	*rn;
1150#ifdef __FreeBSD__
1151	struct radix_node_head	*head = NULL;
1152#else
1153	struct radix_node_head	*head;
1154#endif
1155	int			 s;
1156
1157	if (ke->pfrke_af == AF_INET)
1158		head = kt->pfrkt_ip4;
1159	else if (ke->pfrke_af == AF_INET6)
1160		head = kt->pfrkt_ip6;
1161
1162	s = splsoftnet();
1163#ifdef __FreeBSD__
1164	PF_LOCK_ASSERT();
1165#endif
1166	if (KENTRY_NETWORK(ke)) {
1167		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1168#ifdef __FreeBSD__
1169		rn = rn_delete(&ke->pfrke_sa, &mask, head);
1170#else
1171		rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1172#endif
1173	} else
1174#ifdef __FreeBSD__
1175		rn = rn_delete(&ke->pfrke_sa, NULL, head);
1176#else
1177		rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1178#endif
1179	splx(s);
1180
1181	if (rn == NULL) {
1182		printf("pfr_unroute_kentry: delete failed.\n");
1183		return (-1);
1184	}
1185	return (0);
1186}
1187
1188void
1189pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1190{
1191	bzero(ad, sizeof(*ad));
1192	if (ke == NULL)
1193		return;
1194	ad->pfra_af = ke->pfrke_af;
1195	ad->pfra_net = ke->pfrke_net;
1196	ad->pfra_not = ke->pfrke_not;
1197	if (ad->pfra_af == AF_INET)
1198		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1199	else if (ad->pfra_af == AF_INET6)
1200		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1201}
1202
1203int
1204pfr_walktree(struct radix_node *rn, void *arg)
1205{
1206	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1207	struct pfr_walktree	*w = arg;
1208	int			 s, flags = w->pfrw_flags;
1209
1210	switch (w->pfrw_op) {
1211	case PFRW_MARK:
1212		ke->pfrke_mark = 0;
1213		break;
1214	case PFRW_SWEEP:
1215		if (ke->pfrke_mark)
1216			break;
1217		/* FALLTHROUGH */
1218	case PFRW_ENQUEUE:
1219		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1220		w->pfrw_cnt++;
1221		break;
1222	case PFRW_GET_ADDRS:
1223		if (w->pfrw_free-- > 0) {
1224			struct pfr_addr ad;
1225
1226			pfr_copyout_addr(&ad, ke);
1227			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1228				return (EFAULT);
1229			w->pfrw_addr++;
1230		}
1231		break;
1232	case PFRW_GET_ASTATS:
1233		if (w->pfrw_free-- > 0) {
1234			struct pfr_astats as;
1235
1236			pfr_copyout_addr(&as.pfras_a, ke);
1237
1238			s = splsoftnet();
1239			if (ke->pfrke_counters) {
1240				bcopy(ke->pfrke_counters->pfrkc_packets,
1241				    as.pfras_packets, sizeof(as.pfras_packets));
1242				bcopy(ke->pfrke_counters->pfrkc_bytes,
1243				    as.pfras_bytes, sizeof(as.pfras_bytes));
1244			} else {
1245				bzero(as.pfras_packets, sizeof(as.pfras_packets));
1246				bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1247				as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1248			}
1249			splx(s);
1250			as.pfras_tzero = ke->pfrke_tzero;
1251
1252			if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1253				return (EFAULT);
1254			w->pfrw_astats++;
1255		}
1256		break;
1257	case PFRW_POOL_GET:
1258		if (ke->pfrke_not)
1259			break; /* negative entries are ignored */
1260		if (!w->pfrw_cnt--) {
1261			w->pfrw_kentry = ke;
1262			return (1); /* finish search */
1263		}
1264		break;
1265	case PFRW_DYNADDR_UPDATE:
1266		if (ke->pfrke_af == AF_INET) {
1267			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1268				break;
1269#ifdef __FreeBSD__
1270			pfr_prepare_network(&V_pfr_mask, AF_INET, ke->pfrke_net);
1271#else
1272			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1273#endif
1274			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1275			    &ke->pfrke_sa, AF_INET);
1276			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1277#ifdef __FreeBSD__
1278			    &V_pfr_mask, AF_INET);
1279#else
1280			    &pfr_mask, AF_INET);
1281#endif
1282		} else if (ke->pfrke_af == AF_INET6){
1283			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1284				break;
1285#ifdef __FreeBSD__
1286			pfr_prepare_network(&V_pfr_mask, AF_INET6, ke->pfrke_net);
1287#else
1288			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1289#endif
1290			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1291			    &ke->pfrke_sa, AF_INET6);
1292			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1293#ifdef __FreeBSD__
1294			    &V_pfr_mask, AF_INET6);
1295#else
1296			    &pfr_mask, AF_INET6);
1297#endif
1298		}
1299		break;
1300	}
1301	return (0);
1302}
1303
1304int
1305pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1306{
1307	struct pfr_ktableworkq	 workq;
1308	struct pfr_ktable	*p;
1309	int			 s, xdel = 0;
1310
1311	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1312	    PFR_FLAG_ALLRSETS);
1313	if (pfr_fix_anchor(filter->pfrt_anchor))
1314		return (EINVAL);
1315	if (pfr_table_count(filter, flags) < 0)
1316		return (ENOENT);
1317
1318	SLIST_INIT(&workq);
1319	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1320		if (pfr_skip_table(filter, p, flags))
1321			continue;
1322		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1323			continue;
1324		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1325			continue;
1326		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1327		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1328		xdel++;
1329	}
1330	if (!(flags & PFR_FLAG_DUMMY)) {
1331		if (flags & PFR_FLAG_ATOMIC)
1332			s = splsoftnet();
1333		pfr_setflags_ktables(&workq);
1334		if (flags & PFR_FLAG_ATOMIC)
1335			splx(s);
1336	}
1337	if (ndel != NULL)
1338		*ndel = xdel;
1339	return (0);
1340}
1341
1342int
1343pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1344{
1345	struct pfr_ktableworkq	 addq, changeq;
1346	struct pfr_ktable	*p, *q, *r, key;
1347	int			 i, rv, s, xadd = 0;
1348	long			 tzero = time_second;
1349
1350	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1351	SLIST_INIT(&addq);
1352	SLIST_INIT(&changeq);
1353	for (i = 0; i < size; i++) {
1354		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1355			senderr(EFAULT);
1356		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1357		    flags & PFR_FLAG_USERIOCTL))
1358			senderr(EINVAL);
1359		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1360		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1361		if (p == NULL) {
1362			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1,
1363			    !(flags & PFR_FLAG_USERIOCTL));
1364			if (p == NULL)
1365				senderr(ENOMEM);
1366			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1367				if (!pfr_ktable_compare(p, q))
1368					goto _skip;
1369			}
1370			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1371			xadd++;
1372			if (!key.pfrkt_anchor[0])
1373				goto _skip;
1374
1375			/* find or create root table */
1376			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1377			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1378			if (r != NULL) {
1379				p->pfrkt_root = r;
1380				goto _skip;
1381			}
1382			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1383				if (!pfr_ktable_compare(&key, q)) {
1384					p->pfrkt_root = q;
1385					goto _skip;
1386				}
1387			}
1388			key.pfrkt_flags = 0;
1389			r = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1390			    !(flags & PFR_FLAG_USERIOCTL));
1391			if (r == NULL)
1392				senderr(ENOMEM);
1393			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1394			p->pfrkt_root = r;
1395		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1396			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1397				if (!pfr_ktable_compare(&key, q))
1398					goto _skip;
1399			p->pfrkt_nflags = (p->pfrkt_flags &
1400			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1401			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1402			xadd++;
1403		}
1404_skip:
1405	;
1406	}
1407	if (!(flags & PFR_FLAG_DUMMY)) {
1408		if (flags & PFR_FLAG_ATOMIC)
1409			s = splsoftnet();
1410		pfr_insert_ktables(&addq);
1411		pfr_setflags_ktables(&changeq);
1412		if (flags & PFR_FLAG_ATOMIC)
1413			splx(s);
1414	} else
1415		 pfr_destroy_ktables(&addq, 0);
1416	if (nadd != NULL)
1417		*nadd = xadd;
1418	return (0);
1419_bad:
1420	pfr_destroy_ktables(&addq, 0);
1421	return (rv);
1422}
1423
1424int
1425pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1426{
1427	struct pfr_ktableworkq	 workq;
1428	struct pfr_ktable	*p, *q, key;
1429	int			 i, s, xdel = 0;
1430
1431	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1432	SLIST_INIT(&workq);
1433	for (i = 0; i < size; i++) {
1434		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1435			return (EFAULT);
1436		if (pfr_validate_table(&key.pfrkt_t, 0,
1437		    flags & PFR_FLAG_USERIOCTL))
1438			return (EINVAL);
1439		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1440		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1441			SLIST_FOREACH(q, &workq, pfrkt_workq)
1442				if (!pfr_ktable_compare(p, q))
1443					goto _skip;
1444			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1445			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1446			xdel++;
1447		}
1448_skip:
1449	;
1450	}
1451
1452	if (!(flags & PFR_FLAG_DUMMY)) {
1453		if (flags & PFR_FLAG_ATOMIC)
1454			s = splsoftnet();
1455		pfr_setflags_ktables(&workq);
1456		if (flags & PFR_FLAG_ATOMIC)
1457			splx(s);
1458	}
1459	if (ndel != NULL)
1460		*ndel = xdel;
1461	return (0);
1462}
1463
1464int
1465pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1466	int flags)
1467{
1468	struct pfr_ktable	*p;
1469	int			 n, nn;
1470
1471	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1472	if (pfr_fix_anchor(filter->pfrt_anchor))
1473		return (EINVAL);
1474	n = nn = pfr_table_count(filter, flags);
1475	if (n < 0)
1476		return (ENOENT);
1477	if (n > *size) {
1478		*size = n;
1479		return (0);
1480	}
1481	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1482		if (pfr_skip_table(filter, p, flags))
1483			continue;
1484		if (n-- <= 0)
1485			continue;
1486		if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1487			return (EFAULT);
1488	}
1489	if (n) {
1490		printf("pfr_get_tables: corruption detected (%d).\n", n);
1491		return (ENOTTY);
1492	}
1493	*size = nn;
1494	return (0);
1495}
1496
1497int
1498pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1499	int flags)
1500{
1501	struct pfr_ktable	*p;
1502	struct pfr_ktableworkq	 workq;
1503	int			 s, n, nn;
1504	long			 tzero = time_second;
1505
1506	/* XXX PFR_FLAG_CLSTATS disabled */
1507	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1508	if (pfr_fix_anchor(filter->pfrt_anchor))
1509		return (EINVAL);
1510	n = nn = pfr_table_count(filter, flags);
1511	if (n < 0)
1512		return (ENOENT);
1513	if (n > *size) {
1514		*size = n;
1515		return (0);
1516	}
1517	SLIST_INIT(&workq);
1518	if (flags & PFR_FLAG_ATOMIC)
1519		s = splsoftnet();
1520	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1521		if (pfr_skip_table(filter, p, flags))
1522			continue;
1523		if (n-- <= 0)
1524			continue;
1525		if (!(flags & PFR_FLAG_ATOMIC))
1526			s = splsoftnet();
1527		if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1528			splx(s);
1529			return (EFAULT);
1530		}
1531		if (!(flags & PFR_FLAG_ATOMIC))
1532			splx(s);
1533		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1534	}
1535	if (flags & PFR_FLAG_CLSTATS)
1536		pfr_clstats_ktables(&workq, tzero,
1537		    flags & PFR_FLAG_ADDRSTOO);
1538	if (flags & PFR_FLAG_ATOMIC)
1539		splx(s);
1540	if (n) {
1541		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1542		return (ENOTTY);
1543	}
1544	*size = nn;
1545	return (0);
1546}
1547
1548int
1549pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1550{
1551	struct pfr_ktableworkq	 workq;
1552	struct pfr_ktable	*p, key;
1553	int			 i, s, xzero = 0;
1554	long			 tzero = time_second;
1555
1556	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1557	    PFR_FLAG_ADDRSTOO);
1558	SLIST_INIT(&workq);
1559	for (i = 0; i < size; i++) {
1560		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1561			return (EFAULT);
1562		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1563			return (EINVAL);
1564		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1565		if (p != NULL) {
1566			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1567			xzero++;
1568		}
1569	}
1570	if (!(flags & PFR_FLAG_DUMMY)) {
1571		if (flags & PFR_FLAG_ATOMIC)
1572			s = splsoftnet();
1573		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1574		if (flags & PFR_FLAG_ATOMIC)
1575			splx(s);
1576	}
1577	if (nzero != NULL)
1578		*nzero = xzero;
1579	return (0);
1580}
1581
1582int
1583pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1584	int *nchange, int *ndel, int flags)
1585{
1586	struct pfr_ktableworkq	 workq;
1587	struct pfr_ktable	*p, *q, key;
1588	int			 i, s, xchange = 0, xdel = 0;
1589
1590	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1591	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1592	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1593	    (setflag & clrflag))
1594		return (EINVAL);
1595	SLIST_INIT(&workq);
1596	for (i = 0; i < size; i++) {
1597		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1598			return (EFAULT);
1599		if (pfr_validate_table(&key.pfrkt_t, 0,
1600		    flags & PFR_FLAG_USERIOCTL))
1601			return (EINVAL);
1602		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1603		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1604			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1605			    ~clrflag;
1606			if (p->pfrkt_nflags == p->pfrkt_flags)
1607				goto _skip;
1608			SLIST_FOREACH(q, &workq, pfrkt_workq)
1609				if (!pfr_ktable_compare(p, q))
1610					goto _skip;
1611			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1612			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1613			    (clrflag & PFR_TFLAG_PERSIST) &&
1614			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1615				xdel++;
1616			else
1617				xchange++;
1618		}
1619_skip:
1620	;
1621	}
1622	if (!(flags & PFR_FLAG_DUMMY)) {
1623		if (flags & PFR_FLAG_ATOMIC)
1624			s = splsoftnet();
1625		pfr_setflags_ktables(&workq);
1626		if (flags & PFR_FLAG_ATOMIC)
1627			splx(s);
1628	}
1629	if (nchange != NULL)
1630		*nchange = xchange;
1631	if (ndel != NULL)
1632		*ndel = xdel;
1633	return (0);
1634}
1635
1636int
1637pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1638{
1639	struct pfr_ktableworkq	 workq;
1640	struct pfr_ktable	*p;
1641	struct pf_ruleset	*rs;
1642	int			 xdel = 0;
1643
1644	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1645	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1646	if (rs == NULL)
1647		return (ENOMEM);
1648	SLIST_INIT(&workq);
1649	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1650		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1651		    pfr_skip_table(trs, p, 0))
1652			continue;
1653		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1654		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1655		xdel++;
1656	}
1657	if (!(flags & PFR_FLAG_DUMMY)) {
1658		pfr_setflags_ktables(&workq);
1659		if (ticket != NULL)
1660			*ticket = ++rs->tticket;
1661		rs->topen = 1;
1662	} else
1663		pf_remove_if_empty_ruleset(rs);
1664	if (ndel != NULL)
1665		*ndel = xdel;
1666	return (0);
1667}
1668
1669int
1670pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1671    int *nadd, int *naddr, u_int32_t ticket, int flags)
1672{
1673	struct pfr_ktableworkq	 tableq;
1674	struct pfr_kentryworkq	 addrq;
1675	struct pfr_ktable	*kt, *rt, *shadow, key;
1676	struct pfr_kentry	*p;
1677	struct pfr_addr		 ad;
1678	struct pf_ruleset	*rs;
1679	int			 i, rv, xadd = 0, xaddr = 0;
1680
1681	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1682	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1683		return (EINVAL);
1684	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1685	    flags & PFR_FLAG_USERIOCTL))
1686		return (EINVAL);
1687	rs = pf_find_ruleset(tbl->pfrt_anchor);
1688	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1689		return (EBUSY);
1690	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1691	SLIST_INIT(&tableq);
1692	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1693	if (kt == NULL) {
1694		kt = pfr_create_ktable(tbl, 0, 1,
1695		    !(flags & PFR_FLAG_USERIOCTL));
1696		if (kt == NULL)
1697			return (ENOMEM);
1698		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1699		xadd++;
1700		if (!tbl->pfrt_anchor[0])
1701			goto _skip;
1702
1703		/* find or create root table */
1704		bzero(&key, sizeof(key));
1705		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1706		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1707		if (rt != NULL) {
1708			kt->pfrkt_root = rt;
1709			goto _skip;
1710		}
1711		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1712		    !(flags & PFR_FLAG_USERIOCTL));
1713		if (rt == NULL) {
1714			pfr_destroy_ktables(&tableq, 0);
1715			return (ENOMEM);
1716		}
1717		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1718		kt->pfrkt_root = rt;
1719	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1720		xadd++;
1721_skip:
1722	shadow = pfr_create_ktable(tbl, 0, 0, !(flags & PFR_FLAG_USERIOCTL));
1723	if (shadow == NULL) {
1724		pfr_destroy_ktables(&tableq, 0);
1725		return (ENOMEM);
1726	}
1727	SLIST_INIT(&addrq);
1728	for (i = 0; i < size; i++) {
1729		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1730			senderr(EFAULT);
1731		if (pfr_validate_addr(&ad))
1732			senderr(EINVAL);
1733		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1734			continue;
1735		p = pfr_create_kentry(&ad, 0);
1736		if (p == NULL)
1737			senderr(ENOMEM);
1738		if (pfr_route_kentry(shadow, p)) {
1739			pfr_destroy_kentry(p);
1740			continue;
1741		}
1742		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1743		xaddr++;
1744	}
1745	if (!(flags & PFR_FLAG_DUMMY)) {
1746		if (kt->pfrkt_shadow != NULL)
1747			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1748		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1749		pfr_insert_ktables(&tableq);
1750		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1751		    xaddr : NO_ADDRESSES;
1752		kt->pfrkt_shadow = shadow;
1753	} else {
1754		pfr_clean_node_mask(shadow, &addrq);
1755		pfr_destroy_ktable(shadow, 0);
1756		pfr_destroy_ktables(&tableq, 0);
1757		pfr_destroy_kentries(&addrq);
1758	}
1759	if (nadd != NULL)
1760		*nadd = xadd;
1761	if (naddr != NULL)
1762		*naddr = xaddr;
1763	return (0);
1764_bad:
1765	pfr_destroy_ktable(shadow, 0);
1766	pfr_destroy_ktables(&tableq, 0);
1767	pfr_destroy_kentries(&addrq);
1768	return (rv);
1769}
1770
1771int
1772pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1773{
1774	struct pfr_ktableworkq	 workq;
1775	struct pfr_ktable	*p;
1776	struct pf_ruleset	*rs;
1777	int			 xdel = 0;
1778
1779	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1780	rs = pf_find_ruleset(trs->pfrt_anchor);
1781	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1782		return (0);
1783	SLIST_INIT(&workq);
1784	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1785		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1786		    pfr_skip_table(trs, p, 0))
1787			continue;
1788		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1789		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1790		xdel++;
1791	}
1792	if (!(flags & PFR_FLAG_DUMMY)) {
1793		pfr_setflags_ktables(&workq);
1794		rs->topen = 0;
1795		pf_remove_if_empty_ruleset(rs);
1796	}
1797	if (ndel != NULL)
1798		*ndel = xdel;
1799	return (0);
1800}
1801
1802int
1803pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1804    int *nchange, int flags)
1805{
1806	struct pfr_ktable	*p, *q;
1807	struct pfr_ktableworkq	 workq;
1808	struct pf_ruleset	*rs;
1809	int			 s, xadd = 0, xchange = 0;
1810	long			 tzero = time_second;
1811
1812	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1813	rs = pf_find_ruleset(trs->pfrt_anchor);
1814	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1815		return (EBUSY);
1816
1817	SLIST_INIT(&workq);
1818	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1819		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1820		    pfr_skip_table(trs, p, 0))
1821			continue;
1822		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1823		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1824			xchange++;
1825		else
1826			xadd++;
1827	}
1828
1829	if (!(flags & PFR_FLAG_DUMMY)) {
1830		if (flags & PFR_FLAG_ATOMIC)
1831			s = splsoftnet();
1832		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1833			q = SLIST_NEXT(p, pfrkt_workq);
1834			pfr_commit_ktable(p, tzero);
1835		}
1836		if (flags & PFR_FLAG_ATOMIC)
1837			splx(s);
1838		rs->topen = 0;
1839		pf_remove_if_empty_ruleset(rs);
1840	}
1841	if (nadd != NULL)
1842		*nadd = xadd;
1843	if (nchange != NULL)
1844		*nchange = xchange;
1845
1846	return (0);
1847}
1848
1849void
1850pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1851{
1852	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1853	int			 nflags;
1854
1855	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1856		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1857			pfr_clstats_ktable(kt, tzero, 1);
1858	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1859		/* kt might contain addresses */
1860		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1861		struct pfr_kentry	*p, *q, *next;
1862		struct pfr_addr		 ad;
1863
1864		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1865		pfr_mark_addrs(kt);
1866		SLIST_INIT(&addq);
1867		SLIST_INIT(&changeq);
1868		SLIST_INIT(&delq);
1869		SLIST_INIT(&garbageq);
1870		pfr_clean_node_mask(shadow, &addrq);
1871		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1872			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1873			pfr_copyout_addr(&ad, p);
1874			q = pfr_lookup_addr(kt, &ad, 1);
1875			if (q != NULL) {
1876				if (q->pfrke_not != p->pfrke_not)
1877					SLIST_INSERT_HEAD(&changeq, q,
1878					    pfrke_workq);
1879				q->pfrke_mark = 1;
1880				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1881			} else {
1882				p->pfrke_tzero = tzero;
1883				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1884			}
1885		}
1886		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1887		pfr_insert_kentries(kt, &addq, tzero);
1888		pfr_remove_kentries(kt, &delq);
1889		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1890		pfr_destroy_kentries(&garbageq);
1891	} else {
1892		/* kt cannot contain addresses */
1893		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1894		    shadow->pfrkt_ip4);
1895		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1896		    shadow->pfrkt_ip6);
1897		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1898		pfr_clstats_ktable(kt, tzero, 1);
1899	}
1900	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1901	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1902		& ~PFR_TFLAG_INACTIVE;
1903	pfr_destroy_ktable(shadow, 0);
1904	kt->pfrkt_shadow = NULL;
1905	pfr_setflags_ktable(kt, nflags);
1906}
1907
1908int
1909pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1910{
1911	int i;
1912
1913	if (!tbl->pfrt_name[0])
1914		return (-1);
1915	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1916		 return (-1);
1917	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1918		return (-1);
1919	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1920		if (tbl->pfrt_name[i])
1921			return (-1);
1922	if (pfr_fix_anchor(tbl->pfrt_anchor))
1923		return (-1);
1924	if (tbl->pfrt_flags & ~allowedflags)
1925		return (-1);
1926	return (0);
1927}
1928
1929/*
1930 * Rewrite anchors referenced by tables to remove slashes
1931 * and check for validity.
1932 */
1933int
1934pfr_fix_anchor(char *anchor)
1935{
1936	size_t siz = MAXPATHLEN;
1937	int i;
1938
1939	if (anchor[0] == '/') {
1940		char *path;
1941		int off;
1942
1943		path = anchor;
1944		off = 1;
1945		while (*++path == '/')
1946			off++;
1947		bcopy(path, anchor, siz - off);
1948		memset(anchor + siz - off, 0, off);
1949	}
1950	if (anchor[siz - 1])
1951		return (-1);
1952	for (i = strlen(anchor); i < siz; i++)
1953		if (anchor[i])
1954			return (-1);
1955	return (0);
1956}
1957
1958int
1959pfr_table_count(struct pfr_table *filter, int flags)
1960{
1961	struct pf_ruleset *rs;
1962
1963	if (flags & PFR_FLAG_ALLRSETS)
1964		return (pfr_ktable_cnt);
1965	if (filter->pfrt_anchor[0]) {
1966		rs = pf_find_ruleset(filter->pfrt_anchor);
1967		return ((rs != NULL) ? rs->tables : -1);
1968	}
1969	return (pf_main_ruleset.tables);
1970}
1971
1972int
1973pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1974{
1975	if (flags & PFR_FLAG_ALLRSETS)
1976		return (0);
1977	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1978		return (1);
1979	return (0);
1980}
1981
1982void
1983pfr_insert_ktables(struct pfr_ktableworkq *workq)
1984{
1985	struct pfr_ktable	*p;
1986
1987	SLIST_FOREACH(p, workq, pfrkt_workq)
1988		pfr_insert_ktable(p);
1989}
1990
1991void
1992pfr_insert_ktable(struct pfr_ktable *kt)
1993{
1994	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1995	pfr_ktable_cnt++;
1996	if (kt->pfrkt_root != NULL)
1997		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1998			pfr_setflags_ktable(kt->pfrkt_root,
1999			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
2000}
2001
2002void
2003pfr_setflags_ktables(struct pfr_ktableworkq *workq)
2004{
2005	struct pfr_ktable	*p, *q;
2006
2007	for (p = SLIST_FIRST(workq); p; p = q) {
2008		q = SLIST_NEXT(p, pfrkt_workq);
2009		pfr_setflags_ktable(p, p->pfrkt_nflags);
2010	}
2011}
2012
2013void
2014pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
2015{
2016	struct pfr_kentryworkq	addrq;
2017
2018	if (!(newf & PFR_TFLAG_REFERENCED) &&
2019	    !(newf & PFR_TFLAG_PERSIST))
2020		newf &= ~PFR_TFLAG_ACTIVE;
2021	if (!(newf & PFR_TFLAG_ACTIVE))
2022		newf &= ~PFR_TFLAG_USRMASK;
2023	if (!(newf & PFR_TFLAG_SETMASK)) {
2024		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
2025		if (kt->pfrkt_root != NULL)
2026			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
2027				pfr_setflags_ktable(kt->pfrkt_root,
2028				    kt->pfrkt_root->pfrkt_flags &
2029					~PFR_TFLAG_REFDANCHOR);
2030		pfr_destroy_ktable(kt, 1);
2031		pfr_ktable_cnt--;
2032		return;
2033	}
2034	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
2035		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2036		pfr_remove_kentries(kt, &addrq);
2037	}
2038	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
2039		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
2040		kt->pfrkt_shadow = NULL;
2041	}
2042	kt->pfrkt_flags = newf;
2043}
2044
2045void
2046pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
2047{
2048	struct pfr_ktable	*p;
2049
2050	SLIST_FOREACH(p, workq, pfrkt_workq)
2051		pfr_clstats_ktable(p, tzero, recurse);
2052}
2053
2054void
2055pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
2056{
2057	struct pfr_kentryworkq	 addrq;
2058	int			 s;
2059
2060	if (recurse) {
2061		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2062		pfr_clstats_kentries(&addrq, tzero, 0);
2063	}
2064	s = splsoftnet();
2065	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
2066	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
2067	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
2068	splx(s);
2069	kt->pfrkt_tzero = tzero;
2070}
2071
2072struct pfr_ktable *
2073pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset,
2074    int intr)
2075{
2076	struct pfr_ktable	*kt;
2077	struct pf_ruleset	*rs;
2078
2079#ifdef __FreeBSD__
2080	kt = pool_get(&V_pfr_ktable_pl, PR_NOWAIT|PR_ZERO);
2081#else
2082	if (intr)
2083		kt = pool_get(&pfr_ktable_pl, PR_NOWAIT|PR_ZERO|PR_LIMITFAIL);
2084	else
2085		kt = pool_get(&pfr_ktable_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
2086#endif
2087	if (kt == NULL)
2088		return (NULL);
2089	kt->pfrkt_t = *tbl;
2090
2091	if (attachruleset) {
2092		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2093		if (!rs) {
2094			pfr_destroy_ktable(kt, 0);
2095			return (NULL);
2096		}
2097		kt->pfrkt_rs = rs;
2098		rs->tables++;
2099	}
2100
2101	if (!rn_inithead((void **)&kt->pfrkt_ip4,
2102	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
2103	    !rn_inithead((void **)&kt->pfrkt_ip6,
2104	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2105		pfr_destroy_ktable(kt, 0);
2106		return (NULL);
2107	}
2108	kt->pfrkt_tzero = tzero;
2109
2110	return (kt);
2111}
2112
2113void
2114pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2115{
2116	struct pfr_ktable	*p, *q;
2117
2118	for (p = SLIST_FIRST(workq); p; p = q) {
2119		q = SLIST_NEXT(p, pfrkt_workq);
2120		pfr_destroy_ktable(p, flushaddr);
2121	}
2122}
2123
2124void
2125pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2126{
2127	struct pfr_kentryworkq	 addrq;
2128
2129	if (flushaddr) {
2130		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2131		pfr_clean_node_mask(kt, &addrq);
2132		pfr_destroy_kentries(&addrq);
2133	}
2134#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
2135	if (kt->pfrkt_ip4 != NULL) {
2136		RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
2137		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2138	}
2139	if (kt->pfrkt_ip6 != NULL) {
2140		RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
2141		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2142	}
2143#else
2144	if (kt->pfrkt_ip4 != NULL)
2145		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2146	if (kt->pfrkt_ip6 != NULL)
2147		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2148#endif
2149	if (kt->pfrkt_shadow != NULL)
2150		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2151	if (kt->pfrkt_rs != NULL) {
2152		kt->pfrkt_rs->tables--;
2153		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2154	}
2155#ifdef __FreeBSD__
2156	pool_put(&V_pfr_ktable_pl, kt);
2157#else
2158	pool_put(&pfr_ktable_pl, kt);
2159#endif
2160}
2161
2162int
2163pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2164{
2165	int d;
2166
2167	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2168		return (d);
2169	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2170}
2171
2172struct pfr_ktable *
2173pfr_lookup_table(struct pfr_table *tbl)
2174{
2175	/* struct pfr_ktable start like a struct pfr_table */
2176	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2177	    (struct pfr_ktable *)tbl));
2178}
2179
2180int
2181pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2182{
2183	struct pfr_kentry	*ke = NULL;
2184	int			 match;
2185
2186	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2187		kt = kt->pfrkt_root;
2188	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2189		return (0);
2190
2191	switch (af) {
2192#ifdef INET
2193	case AF_INET:
2194#ifdef __FreeBSD__
2195		V_pfr_sin.sin_addr.s_addr = a->addr32[0];
2196		ke = (struct pfr_kentry *)rn_match(&V_pfr_sin, kt->pfrkt_ip4);
2197#else
2198		pfr_sin.sin_addr.s_addr = a->addr32[0];
2199		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2200#endif
2201		if (ke && KENTRY_RNF_ROOT(ke))
2202			ke = NULL;
2203		break;
2204#endif /* INET */
2205#ifdef INET6
2206	case AF_INET6:
2207#ifdef __FreeBSD__
2208		bcopy(a, &V_pfr_sin6.sin6_addr, sizeof(V_pfr_sin6.sin6_addr));
2209		ke = (struct pfr_kentry *)rn_match(&V_pfr_sin6, kt->pfrkt_ip6);
2210#else
2211		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2212		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2213#endif
2214		if (ke && KENTRY_RNF_ROOT(ke))
2215			ke = NULL;
2216		break;
2217#endif /* INET6 */
2218	}
2219	match = (ke && !ke->pfrke_not);
2220	if (match)
2221		kt->pfrkt_match++;
2222	else
2223		kt->pfrkt_nomatch++;
2224	return (match);
2225}
2226
2227void
2228pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2229    u_int64_t len, int dir_out, int op_pass, int notrule)
2230{
2231	struct pfr_kentry	*ke = NULL;
2232
2233	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2234		kt = kt->pfrkt_root;
2235	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2236		return;
2237
2238	switch (af) {
2239#ifdef INET
2240	case AF_INET:
2241#ifdef __FreeBSD__
2242		V_pfr_sin.sin_addr.s_addr = a->addr32[0];
2243		ke = (struct pfr_kentry *)rn_match(&V_pfr_sin, kt->pfrkt_ip4);
2244#else
2245		pfr_sin.sin_addr.s_addr = a->addr32[0];
2246		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2247#endif
2248		if (ke && KENTRY_RNF_ROOT(ke))
2249			ke = NULL;
2250		break;
2251#endif /* INET */
2252#ifdef INET6
2253	case AF_INET6:
2254#ifdef __FreeBSD__
2255		bcopy(a, &V_pfr_sin6.sin6_addr, sizeof(V_pfr_sin6.sin6_addr));
2256		ke = (struct pfr_kentry *)rn_match(&V_pfr_sin6, kt->pfrkt_ip6);
2257#else
2258		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2259		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2260#endif
2261		if (ke && KENTRY_RNF_ROOT(ke))
2262			ke = NULL;
2263		break;
2264#endif /* INET6 */
2265	default:
2266		;
2267	}
2268	if ((ke == NULL || ke->pfrke_not) != notrule) {
2269		if (op_pass != PFR_OP_PASS)
2270			printf("pfr_update_stats: assertion failed.\n");
2271		op_pass = PFR_OP_XPASS;
2272	}
2273	kt->pfrkt_packets[dir_out][op_pass]++;
2274	kt->pfrkt_bytes[dir_out][op_pass] += len;
2275	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2276	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2277		if (ke->pfrke_counters == NULL)
2278#ifdef __FreeBSD__
2279			ke->pfrke_counters = pool_get(&V_pfr_kcounters_pl,
2280#else
2281			ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2282#endif
2283			    PR_NOWAIT | PR_ZERO);
2284		if (ke->pfrke_counters != NULL) {
2285			ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2286			ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2287		}
2288	}
2289}
2290
2291struct pfr_ktable *
2292pfr_attach_table(struct pf_ruleset *rs, char *name, int intr)
2293{
2294	struct pfr_ktable	*kt, *rt;
2295	struct pfr_table	 tbl;
2296	struct pf_anchor	*ac = rs->anchor;
2297
2298	bzero(&tbl, sizeof(tbl));
2299	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2300	if (ac != NULL)
2301		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2302	kt = pfr_lookup_table(&tbl);
2303	if (kt == NULL) {
2304		kt = pfr_create_ktable(&tbl, time_second, 1, intr);
2305		if (kt == NULL)
2306			return (NULL);
2307		if (ac != NULL) {
2308			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2309			rt = pfr_lookup_table(&tbl);
2310			if (rt == NULL) {
2311				rt = pfr_create_ktable(&tbl, 0, 1, intr);
2312				if (rt == NULL) {
2313					pfr_destroy_ktable(kt, 0);
2314					return (NULL);
2315				}
2316				pfr_insert_ktable(rt);
2317			}
2318			kt->pfrkt_root = rt;
2319		}
2320		pfr_insert_ktable(kt);
2321	}
2322	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2323		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2324	return (kt);
2325}
2326
2327void
2328pfr_detach_table(struct pfr_ktable *kt)
2329{
2330	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2331		printf("pfr_detach_table: refcount = %d.\n",
2332		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2333	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2334		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2335}
2336
2337int
2338pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2339    struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2340{
2341#ifdef __FreeBSD__
2342	struct pfr_kentry	*ke, *ke2 = NULL;
2343	struct pf_addr		*addr = NULL;
2344#else
2345	struct pfr_kentry	*ke, *ke2;
2346	struct pf_addr		*addr;
2347#endif
2348	union sockaddr_union	 mask;
2349	int			 idx = -1, use_counter = 0;
2350
2351#ifdef __FreeBSD__
2352	if (af == AF_INET)
2353		addr = (struct pf_addr *)&V_pfr_sin.sin_addr;
2354	else if (af == AF_INET6)
2355		addr = (struct pf_addr *)&V_pfr_sin6.sin6_addr;
2356#else
2357	if (af == AF_INET)
2358		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2359	else if (af == AF_INET6)
2360		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2361#endif
2362	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2363		kt = kt->pfrkt_root;
2364	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2365		return (-1);
2366
2367	if (pidx != NULL)
2368		idx = *pidx;
2369	if (counter != NULL && idx >= 0)
2370		use_counter = 1;
2371	if (idx < 0)
2372		idx = 0;
2373
2374_next_block:
2375	ke = pfr_kentry_byidx(kt, idx, af);
2376	if (ke == NULL) {
2377		kt->pfrkt_nomatch++;
2378		return (1);
2379	}
2380#ifdef __FreeBSD__
2381	pfr_prepare_network(&V_pfr_mask, af, ke->pfrke_net);
2382#else
2383	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2384#endif
2385	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2386#ifdef __FreeBSD__
2387	*rmask = SUNION2PF(&V_pfr_mask, af);
2388#else
2389	*rmask = SUNION2PF(&pfr_mask, af);
2390#endif
2391
2392	if (use_counter) {
2393		/* is supplied address within block? */
2394		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2395			/* no, go to next block in table */
2396			idx++;
2397			use_counter = 0;
2398			goto _next_block;
2399		}
2400		PF_ACPY(addr, counter, af);
2401	} else {
2402		/* use first address of block */
2403		PF_ACPY(addr, *raddr, af);
2404	}
2405
2406	if (!KENTRY_NETWORK(ke)) {
2407		/* this is a single IP address - no possible nested block */
2408		PF_ACPY(counter, addr, af);
2409		*pidx = idx;
2410		kt->pfrkt_match++;
2411		return (0);
2412	}
2413	for (;;) {
2414		/* we don't want to use a nested block */
2415#ifdef __FreeBSD__
2416		if (af == AF_INET)
2417			ke2 = (struct pfr_kentry *)rn_match(&V_pfr_sin,
2418			    kt->pfrkt_ip4);
2419		else if (af == AF_INET6)
2420			ke2 = (struct pfr_kentry *)rn_match(&V_pfr_sin6,
2421			    kt->pfrkt_ip6);
2422#else
2423		if (af == AF_INET)
2424			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2425			    kt->pfrkt_ip4);
2426		else if (af == AF_INET6)
2427			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2428			    kt->pfrkt_ip6);
2429#endif
2430		/* no need to check KENTRY_RNF_ROOT() here */
2431		if (ke2 == ke) {
2432			/* lookup return the same block - perfect */
2433			PF_ACPY(counter, addr, af);
2434			*pidx = idx;
2435			kt->pfrkt_match++;
2436			return (0);
2437		}
2438
2439		/* we need to increase the counter past the nested block */
2440		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2441#ifdef __FreeBSD__
2442		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &V_pfr_ffaddr, af);
2443#else
2444		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2445#endif
2446		PF_AINC(addr, af);
2447		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2448			/* ok, we reached the end of our main block */
2449			/* go to next block in table */
2450			idx++;
2451			use_counter = 0;
2452			goto _next_block;
2453		}
2454	}
2455}
2456
2457struct pfr_kentry *
2458pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2459{
2460	struct pfr_walktree	w;
2461
2462	bzero(&w, sizeof(w));
2463	w.pfrw_op = PFRW_POOL_GET;
2464	w.pfrw_cnt = idx;
2465
2466	switch (af) {
2467#ifdef INET
2468	case AF_INET:
2469#ifdef __FreeBSD__
2470		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2471#else
2472		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2473#endif
2474		return (w.pfrw_kentry);
2475#endif /* INET */
2476#ifdef INET6
2477	case AF_INET6:
2478#ifdef __FreeBSD__
2479		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2480#else
2481		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2482#endif
2483		return (w.pfrw_kentry);
2484#endif /* INET6 */
2485	default:
2486		return (NULL);
2487	}
2488}
2489
2490void
2491pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2492{
2493	struct pfr_walktree	w;
2494	int			s;
2495
2496	bzero(&w, sizeof(w));
2497	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2498	w.pfrw_dyn = dyn;
2499
2500	s = splsoftnet();
2501	dyn->pfid_acnt4 = 0;
2502	dyn->pfid_acnt6 = 0;
2503	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2504#ifdef __FreeBSD__
2505		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2506#else
2507		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2508#endif
2509	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2510#ifdef __FreeBSD__
2511		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2512#else
2513		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2514#endif
2515	splx(s);
2516}
2517