pf_table.c revision 361453
1/*-
2 * Copyright (c) 2002 Cedric Berger
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 *    - Redistributions of source code must retain the above copyright
10 *      notice, this list of conditions and the following disclaimer.
11 *    - Redistributions in binary form must reproduce the above
12 *      copyright notice, this list of conditions and the following
13 *      disclaimer in the documentation and/or other materials provided
14 *      with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
19 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
20 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 *
29 *	$OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/netpfil/pf/pf_table.c 361453 2020-05-25 12:48:09Z markj $");
34
35#include "opt_inet.h"
36#include "opt_inet6.h"
37
38#include <sys/param.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mbuf.h>
43#include <sys/mutex.h>
44#include <sys/refcount.h>
45#include <sys/socket.h>
46#include <vm/uma.h>
47
48#include <net/if.h>
49#include <net/vnet.h>
50#include <net/pfvar.h>
51
52#define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
53
54#define	ACCEPT_FLAGS(flags, oklist)		\
55	do {					\
56		if ((flags & ~(oklist)) &	\
57		    PFR_FLAG_ALLMASK)		\
58			return (EINVAL);	\
59	} while (0)
60
61#define	FILLIN_SIN(sin, addr)			\
62	do {					\
63		(sin).sin_len = sizeof(sin);	\
64		(sin).sin_family = AF_INET;	\
65		(sin).sin_addr = (addr);	\
66	} while (0)
67
68#define	FILLIN_SIN6(sin6, addr)			\
69	do {					\
70		(sin6).sin6_len = sizeof(sin6);	\
71		(sin6).sin6_family = AF_INET6;	\
72		(sin6).sin6_addr = (addr);	\
73	} while (0)
74
75#define	SWAP(type, a1, a2)			\
76	do {					\
77		type tmp = a1;			\
78		a1 = a2;			\
79		a2 = tmp;			\
80	} while (0)
81
82#define	SUNION2PF(su, af) (((af)==AF_INET) ?	\
83    (struct pf_addr *)&(su)->sin.sin_addr :	\
84    (struct pf_addr *)&(su)->sin6.sin6_addr)
85
86#define	AF_BITS(af)		(((af)==AF_INET)?32:128)
87#define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
88#define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
89#define	KENTRY_RNF_ROOT(ke) \
90		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
91
92#define	NO_ADDRESSES		(-1)
93#define	ENQUEUE_UNMARKED_ONLY	(1)
94#define	INVERT_NEG_FLAG		(1)
95
96struct pfr_walktree {
97	enum pfrw_op {
98		PFRW_MARK,
99		PFRW_SWEEP,
100		PFRW_ENQUEUE,
101		PFRW_GET_ADDRS,
102		PFRW_GET_ASTATS,
103		PFRW_POOL_GET,
104		PFRW_DYNADDR_UPDATE
105	}	 pfrw_op;
106	union {
107		struct pfr_addr		*pfrw1_addr;
108		struct pfr_astats	*pfrw1_astats;
109		struct pfr_kentryworkq	*pfrw1_workq;
110		struct pfr_kentry	*pfrw1_kentry;
111		struct pfi_dynaddr	*pfrw1_dyn;
112	}	 pfrw_1;
113	int	 pfrw_free;
114	int	 pfrw_flags;
115};
116#define	pfrw_addr	pfrw_1.pfrw1_addr
117#define	pfrw_astats	pfrw_1.pfrw1_astats
118#define	pfrw_workq	pfrw_1.pfrw1_workq
119#define	pfrw_kentry	pfrw_1.pfrw1_kentry
120#define	pfrw_dyn	pfrw_1.pfrw1_dyn
121#define	pfrw_cnt	pfrw_free
122
123#define	senderr(e)	do { rv = (e); goto _bad; } while (0)
124
125static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
126static VNET_DEFINE(uma_zone_t, pfr_kentry_z);
127#define	V_pfr_kentry_z		VNET(pfr_kentry_z)
128
129static struct pf_addr	 pfr_ffaddr = {
130	.addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
131};
132
133static void		 pfr_copyout_astats(struct pfr_astats *,
134			    const struct pfr_kentry *,
135			    const struct pfr_walktree *);
136static void		 pfr_copyout_addr(struct pfr_addr *,
137			    const struct pfr_kentry *ke);
138static int		 pfr_validate_addr(struct pfr_addr *);
139static void		 pfr_enqueue_addrs(struct pfr_ktable *,
140			    struct pfr_kentryworkq *, int *, int);
141static void		 pfr_mark_addrs(struct pfr_ktable *);
142static struct pfr_kentry
143			*pfr_lookup_addr(struct pfr_ktable *,
144			    struct pfr_addr *, int);
145static bool		 pfr_create_kentry_counter(struct pfr_kentry *, int,
146			    int);
147static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
148static void		 pfr_destroy_kentries(struct pfr_kentryworkq *);
149static void		 pfr_destroy_kentry_counter(struct pfr_kcounters *,
150			    int, int);
151static void		 pfr_destroy_kentry(struct pfr_kentry *);
152static void		 pfr_insert_kentries(struct pfr_ktable *,
153			    struct pfr_kentryworkq *, long);
154static void		 pfr_remove_kentries(struct pfr_ktable *,
155			    struct pfr_kentryworkq *);
156static void		 pfr_clstats_kentries(struct pfr_ktable *,
157			    struct pfr_kentryworkq *, long, int);
158static void		 pfr_reset_feedback(struct pfr_addr *, int);
159static void		 pfr_prepare_network(union sockaddr_union *, int, int);
160static int		 pfr_route_kentry(struct pfr_ktable *,
161			    struct pfr_kentry *);
162static int		 pfr_unroute_kentry(struct pfr_ktable *,
163			    struct pfr_kentry *);
164static int		 pfr_walktree(struct radix_node *, void *);
165static int		 pfr_validate_table(struct pfr_table *, int, int);
166static int		 pfr_fix_anchor(char *);
167static void		 pfr_commit_ktable(struct pfr_ktable *, long);
168static void		 pfr_insert_ktables(struct pfr_ktableworkq *);
169static void		 pfr_insert_ktable(struct pfr_ktable *);
170static void		 pfr_setflags_ktables(struct pfr_ktableworkq *);
171static void		 pfr_setflags_ktable(struct pfr_ktable *, int);
172static void		 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
173			    int);
174static void		 pfr_clstats_ktable(struct pfr_ktable *, long, int);
175static struct pfr_ktable
176			*pfr_create_ktable(struct pfr_table *, long, int);
177static void		 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
178static void		 pfr_destroy_ktable(struct pfr_ktable *, int);
179static int		 pfr_ktable_compare(struct pfr_ktable *,
180			    struct pfr_ktable *);
181static struct pfr_ktable
182			*pfr_lookup_table(struct pfr_table *);
183static void		 pfr_clean_node_mask(struct pfr_ktable *,
184			    struct pfr_kentryworkq *);
185static int		 pfr_skip_table(struct pfr_table *,
186			    struct pfr_ktable *, int);
187static struct pfr_kentry
188			*pfr_kentry_byidx(struct pfr_ktable *, int, int);
189
190static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
191static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
192
193struct pfr_ktablehead	 pfr_ktables;
194struct pfr_table	 pfr_nulltable;
195int			 pfr_ktable_cnt;
196
197void
198pfr_initialize(void)
199{
200
201	V_pfr_kentry_z = uma_zcreate("pf table entries",
202	    sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
203	    0);
204	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
205	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
206}
207
208void
209pfr_cleanup(void)
210{
211
212	uma_zdestroy(V_pfr_kentry_z);
213}
214
215int
216pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
217{
218	struct pfr_ktable	*kt;
219	struct pfr_kentryworkq	 workq;
220
221	PF_RULES_WASSERT();
222
223	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
224	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
225		return (EINVAL);
226	kt = pfr_lookup_table(tbl);
227	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
228		return (ESRCH);
229	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
230		return (EPERM);
231	pfr_enqueue_addrs(kt, &workq, ndel, 0);
232
233	if (!(flags & PFR_FLAG_DUMMY)) {
234		pfr_remove_kentries(kt, &workq);
235		KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
236	}
237	return (0);
238}
239
240int
241pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
242    int *nadd, int flags)
243{
244	struct pfr_ktable	*kt, *tmpkt;
245	struct pfr_kentryworkq	 workq;
246	struct pfr_kentry	*p, *q;
247	struct pfr_addr		*ad;
248	int			 i, rv, xadd = 0;
249	long			 tzero = time_second;
250
251	PF_RULES_WASSERT();
252
253	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
254	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
255		return (EINVAL);
256	kt = pfr_lookup_table(tbl);
257	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
258		return (ESRCH);
259	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
260		return (EPERM);
261	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
262	if (tmpkt == NULL)
263		return (ENOMEM);
264	SLIST_INIT(&workq);
265	for (i = 0, ad = addr; i < size; i++, ad++) {
266		if (pfr_validate_addr(ad))
267			senderr(EINVAL);
268		p = pfr_lookup_addr(kt, ad, 1);
269		q = pfr_lookup_addr(tmpkt, ad, 1);
270		if (flags & PFR_FLAG_FEEDBACK) {
271			if (q != NULL)
272				ad->pfra_fback = PFR_FB_DUPLICATE;
273			else if (p == NULL)
274				ad->pfra_fback = PFR_FB_ADDED;
275			else if (p->pfrke_not != ad->pfra_not)
276				ad->pfra_fback = PFR_FB_CONFLICT;
277			else
278				ad->pfra_fback = PFR_FB_NONE;
279		}
280		if (p == NULL && q == NULL) {
281			p = pfr_create_kentry(ad,
282			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
283			if (p == NULL)
284				senderr(ENOMEM);
285			if (pfr_route_kentry(tmpkt, p)) {
286				pfr_destroy_kentry(p);
287				ad->pfra_fback = PFR_FB_NONE;
288			} else {
289				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
290				xadd++;
291			}
292		}
293	}
294	pfr_clean_node_mask(tmpkt, &workq);
295	if (!(flags & PFR_FLAG_DUMMY))
296		pfr_insert_kentries(kt, &workq, tzero);
297	else
298		pfr_destroy_kentries(&workq);
299	if (nadd != NULL)
300		*nadd = xadd;
301	pfr_destroy_ktable(tmpkt, 0);
302	return (0);
303_bad:
304	pfr_clean_node_mask(tmpkt, &workq);
305	pfr_destroy_kentries(&workq);
306	if (flags & PFR_FLAG_FEEDBACK)
307		pfr_reset_feedback(addr, size);
308	pfr_destroy_ktable(tmpkt, 0);
309	return (rv);
310}
311
312int
313pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
314    int *ndel, int flags)
315{
316	struct pfr_ktable	*kt;
317	struct pfr_kentryworkq	 workq;
318	struct pfr_kentry	*p;
319	struct pfr_addr		*ad;
320	int			 i, rv, xdel = 0, log = 1;
321
322	PF_RULES_WASSERT();
323
324	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
325	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
326		return (EINVAL);
327	kt = pfr_lookup_table(tbl);
328	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
329		return (ESRCH);
330	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
331		return (EPERM);
332	/*
333	 * there are two algorithms to choose from here.
334	 * with:
335	 *   n: number of addresses to delete
336	 *   N: number of addresses in the table
337	 *
338	 * one is O(N) and is better for large 'n'
339	 * one is O(n*LOG(N)) and is better for small 'n'
340	 *
341	 * following code try to decide which one is best.
342	 */
343	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
344		log++;
345	if (size > kt->pfrkt_cnt/log) {
346		/* full table scan */
347		pfr_mark_addrs(kt);
348	} else {
349		/* iterate over addresses to delete */
350		for (i = 0, ad = addr; i < size; i++, ad++) {
351			if (pfr_validate_addr(ad))
352				return (EINVAL);
353			p = pfr_lookup_addr(kt, ad, 1);
354			if (p != NULL)
355				p->pfrke_mark = 0;
356		}
357	}
358	SLIST_INIT(&workq);
359	for (i = 0, ad = addr; i < size; i++, ad++) {
360		if (pfr_validate_addr(ad))
361			senderr(EINVAL);
362		p = pfr_lookup_addr(kt, ad, 1);
363		if (flags & PFR_FLAG_FEEDBACK) {
364			if (p == NULL)
365				ad->pfra_fback = PFR_FB_NONE;
366			else if (p->pfrke_not != ad->pfra_not)
367				ad->pfra_fback = PFR_FB_CONFLICT;
368			else if (p->pfrke_mark)
369				ad->pfra_fback = PFR_FB_DUPLICATE;
370			else
371				ad->pfra_fback = PFR_FB_DELETED;
372		}
373		if (p != NULL && p->pfrke_not == ad->pfra_not &&
374		    !p->pfrke_mark) {
375			p->pfrke_mark = 1;
376			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
377			xdel++;
378		}
379	}
380	if (!(flags & PFR_FLAG_DUMMY))
381		pfr_remove_kentries(kt, &workq);
382	if (ndel != NULL)
383		*ndel = xdel;
384	return (0);
385_bad:
386	if (flags & PFR_FLAG_FEEDBACK)
387		pfr_reset_feedback(addr, size);
388	return (rv);
389}
390
391int
392pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
393    int *size2, int *nadd, int *ndel, int *nchange, int flags,
394    u_int32_t ignore_pfrt_flags)
395{
396	struct pfr_ktable	*kt, *tmpkt;
397	struct pfr_kentryworkq	 addq, delq, changeq;
398	struct pfr_kentry	*p, *q;
399	struct pfr_addr		 ad;
400	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
401	long			 tzero = time_second;
402
403	PF_RULES_WASSERT();
404
405	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
406	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
407	    PFR_FLAG_USERIOCTL))
408		return (EINVAL);
409	kt = pfr_lookup_table(tbl);
410	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
411		return (ESRCH);
412	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
413		return (EPERM);
414	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
415	if (tmpkt == NULL)
416		return (ENOMEM);
417	pfr_mark_addrs(kt);
418	SLIST_INIT(&addq);
419	SLIST_INIT(&delq);
420	SLIST_INIT(&changeq);
421	for (i = 0; i < size; i++) {
422		/*
423		 * XXXGL: undertand pf_if usage of this function
424		 * and make ad a moving pointer
425		 */
426		bcopy(addr + i, &ad, sizeof(ad));
427		if (pfr_validate_addr(&ad))
428			senderr(EINVAL);
429		ad.pfra_fback = PFR_FB_NONE;
430		p = pfr_lookup_addr(kt, &ad, 1);
431		if (p != NULL) {
432			if (p->pfrke_mark) {
433				ad.pfra_fback = PFR_FB_DUPLICATE;
434				goto _skip;
435			}
436			p->pfrke_mark = 1;
437			if (p->pfrke_not != ad.pfra_not) {
438				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
439				ad.pfra_fback = PFR_FB_CHANGED;
440				xchange++;
441			}
442		} else {
443			q = pfr_lookup_addr(tmpkt, &ad, 1);
444			if (q != NULL) {
445				ad.pfra_fback = PFR_FB_DUPLICATE;
446				goto _skip;
447			}
448			p = pfr_create_kentry(&ad,
449			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
450			if (p == NULL)
451				senderr(ENOMEM);
452			if (pfr_route_kentry(tmpkt, p)) {
453				pfr_destroy_kentry(p);
454				ad.pfra_fback = PFR_FB_NONE;
455			} else {
456				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
457				ad.pfra_fback = PFR_FB_ADDED;
458				xadd++;
459			}
460		}
461_skip:
462		if (flags & PFR_FLAG_FEEDBACK)
463			bcopy(&ad, addr + i, sizeof(ad));
464	}
465	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
466	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
467		if (*size2 < size+xdel) {
468			*size2 = size+xdel;
469			senderr(0);
470		}
471		i = 0;
472		SLIST_FOREACH(p, &delq, pfrke_workq) {
473			pfr_copyout_addr(&ad, p);
474			ad.pfra_fback = PFR_FB_DELETED;
475			bcopy(&ad, addr + size + i, sizeof(ad));
476			i++;
477		}
478	}
479	pfr_clean_node_mask(tmpkt, &addq);
480	if (!(flags & PFR_FLAG_DUMMY)) {
481		pfr_insert_kentries(kt, &addq, tzero);
482		pfr_remove_kentries(kt, &delq);
483		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
484	} else
485		pfr_destroy_kentries(&addq);
486	if (nadd != NULL)
487		*nadd = xadd;
488	if (ndel != NULL)
489		*ndel = xdel;
490	if (nchange != NULL)
491		*nchange = xchange;
492	if ((flags & PFR_FLAG_FEEDBACK) && size2)
493		*size2 = size+xdel;
494	pfr_destroy_ktable(tmpkt, 0);
495	return (0);
496_bad:
497	pfr_clean_node_mask(tmpkt, &addq);
498	pfr_destroy_kentries(&addq);
499	if (flags & PFR_FLAG_FEEDBACK)
500		pfr_reset_feedback(addr, size);
501	pfr_destroy_ktable(tmpkt, 0);
502	return (rv);
503}
504
505int
506pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
507	int *nmatch, int flags)
508{
509	struct pfr_ktable	*kt;
510	struct pfr_kentry	*p;
511	struct pfr_addr		*ad;
512	int			 i, xmatch = 0;
513
514	PF_RULES_RASSERT();
515
516	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
517	if (pfr_validate_table(tbl, 0, 0))
518		return (EINVAL);
519	kt = pfr_lookup_table(tbl);
520	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
521		return (ESRCH);
522
523	for (i = 0, ad = addr; i < size; i++, ad++) {
524		if (pfr_validate_addr(ad))
525			return (EINVAL);
526		if (ADDR_NETWORK(ad))
527			return (EINVAL);
528		p = pfr_lookup_addr(kt, ad, 0);
529		if (flags & PFR_FLAG_REPLACE)
530			pfr_copyout_addr(ad, p);
531		ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
532		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
533		if (p != NULL && !p->pfrke_not)
534			xmatch++;
535	}
536	if (nmatch != NULL)
537		*nmatch = xmatch;
538	return (0);
539}
540
541int
542pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
543	int flags)
544{
545	struct pfr_ktable	*kt;
546	struct pfr_walktree	 w;
547	int			 rv;
548
549	PF_RULES_RASSERT();
550
551	ACCEPT_FLAGS(flags, 0);
552	if (pfr_validate_table(tbl, 0, 0))
553		return (EINVAL);
554	kt = pfr_lookup_table(tbl);
555	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
556		return (ESRCH);
557	if (kt->pfrkt_cnt > *size) {
558		*size = kt->pfrkt_cnt;
559		return (0);
560	}
561
562	bzero(&w, sizeof(w));
563	w.pfrw_op = PFRW_GET_ADDRS;
564	w.pfrw_addr = addr;
565	w.pfrw_free = kt->pfrkt_cnt;
566	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
567	if (!rv)
568		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
569		    pfr_walktree, &w);
570	if (rv)
571		return (rv);
572
573	KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
574	    w.pfrw_free));
575
576	*size = kt->pfrkt_cnt;
577	return (0);
578}
579
580int
581pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
582	int flags)
583{
584	struct pfr_ktable	*kt;
585	struct pfr_walktree	 w;
586	struct pfr_kentryworkq	 workq;
587	int			 rv;
588	long			 tzero = time_second;
589
590	PF_RULES_RASSERT();
591
592	/* XXX PFR_FLAG_CLSTATS disabled */
593	ACCEPT_FLAGS(flags, 0);
594	if (pfr_validate_table(tbl, 0, 0))
595		return (EINVAL);
596	kt = pfr_lookup_table(tbl);
597	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
598		return (ESRCH);
599	if (kt->pfrkt_cnt > *size) {
600		*size = kt->pfrkt_cnt;
601		return (0);
602	}
603
604	bzero(&w, sizeof(w));
605	w.pfrw_op = PFRW_GET_ASTATS;
606	w.pfrw_astats = addr;
607	w.pfrw_free = kt->pfrkt_cnt;
608	/*
609	 * Flags below are for backward compatibility. It was possible to have
610	 * a table without per-entry counters. Now they are always allocated,
611	 * we just discard data when reading it if table is not configured to
612	 * have counters.
613	 */
614	w.pfrw_flags = kt->pfrkt_flags;
615	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
616	if (!rv)
617		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
618		    pfr_walktree, &w);
619	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
620		pfr_enqueue_addrs(kt, &workq, NULL, 0);
621		pfr_clstats_kentries(kt, &workq, tzero, 0);
622	}
623	if (rv)
624		return (rv);
625
626	if (w.pfrw_free) {
627		printf("pfr_get_astats: corruption detected (%d).\n",
628		    w.pfrw_free);
629		return (ENOTTY);
630	}
631	*size = kt->pfrkt_cnt;
632	return (0);
633}
634
635int
636pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
637    int *nzero, int flags)
638{
639	struct pfr_ktable	*kt;
640	struct pfr_kentryworkq	 workq;
641	struct pfr_kentry	*p;
642	struct pfr_addr		*ad;
643	int			 i, rv, xzero = 0;
644
645	PF_RULES_WASSERT();
646
647	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
648	if (pfr_validate_table(tbl, 0, 0))
649		return (EINVAL);
650	kt = pfr_lookup_table(tbl);
651	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
652		return (ESRCH);
653	SLIST_INIT(&workq);
654	for (i = 0, ad = addr; i < size; i++, ad++) {
655		if (pfr_validate_addr(ad))
656			senderr(EINVAL);
657		p = pfr_lookup_addr(kt, ad, 1);
658		if (flags & PFR_FLAG_FEEDBACK) {
659			ad->pfra_fback = (p != NULL) ?
660			    PFR_FB_CLEARED : PFR_FB_NONE;
661		}
662		if (p != NULL) {
663			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
664			xzero++;
665		}
666	}
667
668	if (!(flags & PFR_FLAG_DUMMY))
669		pfr_clstats_kentries(kt, &workq, 0, 0);
670	if (nzero != NULL)
671		*nzero = xzero;
672	return (0);
673_bad:
674	if (flags & PFR_FLAG_FEEDBACK)
675		pfr_reset_feedback(addr, size);
676	return (rv);
677}
678
679static int
680pfr_validate_addr(struct pfr_addr *ad)
681{
682	int i;
683
684	switch (ad->pfra_af) {
685#ifdef INET
686	case AF_INET:
687		if (ad->pfra_net > 32)
688			return (-1);
689		break;
690#endif /* INET */
691#ifdef INET6
692	case AF_INET6:
693		if (ad->pfra_net > 128)
694			return (-1);
695		break;
696#endif /* INET6 */
697	default:
698		return (-1);
699	}
700	if (ad->pfra_net < 128 &&
701		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
702			return (-1);
703	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
704		if (((caddr_t)ad)[i])
705			return (-1);
706	if (ad->pfra_not && ad->pfra_not != 1)
707		return (-1);
708	if (ad->pfra_fback)
709		return (-1);
710	return (0);
711}
712
713static void
714pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
715	int *naddr, int sweep)
716{
717	struct pfr_walktree	w;
718
719	SLIST_INIT(workq);
720	bzero(&w, sizeof(w));
721	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
722	w.pfrw_workq = workq;
723	if (kt->pfrkt_ip4 != NULL)
724		if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
725		    pfr_walktree, &w))
726			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
727	if (kt->pfrkt_ip6 != NULL)
728		if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
729		    pfr_walktree, &w))
730			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
731	if (naddr != NULL)
732		*naddr = w.pfrw_cnt;
733}
734
735static void
736pfr_mark_addrs(struct pfr_ktable *kt)
737{
738	struct pfr_walktree	w;
739
740	bzero(&w, sizeof(w));
741	w.pfrw_op = PFRW_MARK;
742	if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
743		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
744	if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
745		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
746}
747
748
749static struct pfr_kentry *
750pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
751{
752	union sockaddr_union	 sa, mask;
753	struct radix_head	*head = NULL;
754	struct pfr_kentry	*ke;
755
756	PF_RULES_ASSERT();
757
758	bzero(&sa, sizeof(sa));
759	if (ad->pfra_af == AF_INET) {
760		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
761		head = &kt->pfrkt_ip4->rh;
762	} else if ( ad->pfra_af == AF_INET6 ) {
763		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
764		head = &kt->pfrkt_ip6->rh;
765	}
766	if (ADDR_NETWORK(ad)) {
767		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
768		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
769		if (ke && KENTRY_RNF_ROOT(ke))
770			ke = NULL;
771	} else {
772		ke = (struct pfr_kentry *)rn_match(&sa, head);
773		if (ke && KENTRY_RNF_ROOT(ke))
774			ke = NULL;
775		if (exact && ke && KENTRY_NETWORK(ke))
776			ke = NULL;
777	}
778	return (ke);
779}
780
781static bool
782pfr_create_kentry_counter(struct pfr_kentry *ke, int pfr_dir, int pfr_op)
783{
784	counter_u64_t c;
785
786	c = counter_u64_alloc(M_NOWAIT);
787	if (c == NULL)
788		return (false);
789	ke->pfrke_counters.pfrkc_packets[pfr_dir][pfr_op] = c;
790	c = counter_u64_alloc(M_NOWAIT);
791	if (c == NULL)
792		return (false);
793	ke->pfrke_counters.pfrkc_bytes[pfr_dir][pfr_op] = c;
794	return (true);
795}
796
797static struct pfr_kentry *
798pfr_create_kentry(struct pfr_addr *ad, bool counters)
799{
800	struct pfr_kentry	*ke;
801	int pfr_dir, pfr_op;
802
803	ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
804	if (ke == NULL)
805		return (NULL);
806
807	if (ad->pfra_af == AF_INET)
808		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
809	else if (ad->pfra_af == AF_INET6)
810		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
811	ke->pfrke_af = ad->pfra_af;
812	ke->pfrke_net = ad->pfra_net;
813	ke->pfrke_not = ad->pfra_not;
814	ke->pfrke_counters.pfrkc_tzero = 0;
815	if (counters)
816		for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir++)
817			for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op++) {
818				if (!pfr_create_kentry_counter(ke, pfr_dir,
819				    pfr_op)) {
820					pfr_destroy_kentry(ke);
821					return (NULL);
822				}
823			}
824	return (ke);
825}
826
827static void
828pfr_destroy_kentries(struct pfr_kentryworkq *workq)
829{
830	struct pfr_kentry	*p, *q;
831
832	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
833		q = SLIST_NEXT(p, pfrke_workq);
834		pfr_destroy_kentry(p);
835	}
836}
837
838static void
839pfr_destroy_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op)
840{
841	counter_u64_t c;
842
843	if ((c = kc->pfrkc_packets[pfr_dir][pfr_op]) != NULL)
844		counter_u64_free(c);
845	if ((c = kc->pfrkc_bytes[pfr_dir][pfr_op]) != NULL)
846		counter_u64_free(c);
847}
848
849static void
850pfr_destroy_kentry(struct pfr_kentry *ke)
851{
852	int pfr_dir, pfr_op;
853
854	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++)
855		for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++)
856			pfr_destroy_kentry_counter(&ke->pfrke_counters,
857			    pfr_dir, pfr_op);
858
859	uma_zfree(V_pfr_kentry_z, ke);
860}
861
862static void
863pfr_insert_kentries(struct pfr_ktable *kt,
864    struct pfr_kentryworkq *workq, long tzero)
865{
866	struct pfr_kentry	*p;
867	int			 rv, n = 0;
868
869	SLIST_FOREACH(p, workq, pfrke_workq) {
870		rv = pfr_route_kentry(kt, p);
871		if (rv) {
872			printf("pfr_insert_kentries: cannot route entry "
873			    "(code=%d).\n", rv);
874			break;
875		}
876		p->pfrke_counters.pfrkc_tzero = tzero;
877		n++;
878	}
879	kt->pfrkt_cnt += n;
880}
881
882int
883pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
884{
885	struct pfr_kentry	*p;
886	int			 rv;
887
888	p = pfr_lookup_addr(kt, ad, 1);
889	if (p != NULL)
890		return (0);
891	p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
892	if (p == NULL)
893		return (ENOMEM);
894
895	rv = pfr_route_kentry(kt, p);
896	if (rv)
897		return (rv);
898
899	p->pfrke_counters.pfrkc_tzero = tzero;
900	kt->pfrkt_cnt++;
901
902	return (0);
903}
904
905static void
906pfr_remove_kentries(struct pfr_ktable *kt,
907    struct pfr_kentryworkq *workq)
908{
909	struct pfr_kentry	*p;
910	int			 n = 0;
911
912	SLIST_FOREACH(p, workq, pfrke_workq) {
913		pfr_unroute_kentry(kt, p);
914		n++;
915	}
916	kt->pfrkt_cnt -= n;
917	pfr_destroy_kentries(workq);
918}
919
920static void
921pfr_clean_node_mask(struct pfr_ktable *kt,
922    struct pfr_kentryworkq *workq)
923{
924	struct pfr_kentry	*p;
925
926	SLIST_FOREACH(p, workq, pfrke_workq)
927		pfr_unroute_kentry(kt, p);
928}
929
930static void
931pfr_clear_kentry_counters(struct pfr_kentry *p, int pfr_dir, int pfr_op)
932{
933	counter_u64_zero(p->pfrke_counters.pfrkc_packets[pfr_dir][pfr_op]);
934	counter_u64_zero(p->pfrke_counters.pfrkc_bytes[pfr_dir][pfr_op]);
935}
936
937static void
938pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
939    long tzero, int negchange)
940{
941	struct pfr_kentry	*p;
942	int			pfr_dir, pfr_op;
943
944	SLIST_FOREACH(p, workq, pfrke_workq) {
945		if (negchange)
946			p->pfrke_not = !p->pfrke_not;
947		if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
948			for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir++)
949				for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX;
950				    pfr_op++)
951					pfr_clear_kentry_counters(p, pfr_dir,
952					    pfr_op);
953		p->pfrke_counters.pfrkc_tzero = tzero;
954	}
955}
956
957static void
958pfr_reset_feedback(struct pfr_addr *addr, int size)
959{
960	struct pfr_addr	*ad;
961	int		i;
962
963	for (i = 0, ad = addr; i < size; i++, ad++)
964		ad->pfra_fback = PFR_FB_NONE;
965}
966
967static void
968pfr_prepare_network(union sockaddr_union *sa, int af, int net)
969{
970	int	i;
971
972	bzero(sa, sizeof(*sa));
973	if (af == AF_INET) {
974		sa->sin.sin_len = sizeof(sa->sin);
975		sa->sin.sin_family = AF_INET;
976		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
977	} else if (af == AF_INET6) {
978		sa->sin6.sin6_len = sizeof(sa->sin6);
979		sa->sin6.sin6_family = AF_INET6;
980		for (i = 0; i < 4; i++) {
981			if (net <= 32) {
982				sa->sin6.sin6_addr.s6_addr32[i] =
983				    net ? htonl(-1 << (32-net)) : 0;
984				break;
985			}
986			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
987			net -= 32;
988		}
989	}
990}
991
992static int
993pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
994{
995	union sockaddr_union	 mask;
996	struct radix_node	*rn;
997	struct radix_head	*head = NULL;
998
999	PF_RULES_WASSERT();
1000
1001	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1002	if (ke->pfrke_af == AF_INET)
1003		head = &kt->pfrkt_ip4->rh;
1004	else if (ke->pfrke_af == AF_INET6)
1005		head = &kt->pfrkt_ip6->rh;
1006
1007	if (KENTRY_NETWORK(ke)) {
1008		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1009		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1010	} else
1011		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1012
1013	return (rn == NULL ? -1 : 0);
1014}
1015
1016static int
1017pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1018{
1019	union sockaddr_union	 mask;
1020	struct radix_node	*rn;
1021	struct radix_head	*head = NULL;
1022
1023	if (ke->pfrke_af == AF_INET)
1024		head = &kt->pfrkt_ip4->rh;
1025	else if (ke->pfrke_af == AF_INET6)
1026		head = &kt->pfrkt_ip6->rh;
1027
1028	if (KENTRY_NETWORK(ke)) {
1029		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1030		rn = rn_delete(&ke->pfrke_sa, &mask, head);
1031	} else
1032		rn = rn_delete(&ke->pfrke_sa, NULL, head);
1033
1034	if (rn == NULL) {
1035		printf("pfr_unroute_kentry: delete failed.\n");
1036		return (-1);
1037	}
1038	return (0);
1039}
1040
1041static void
1042pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1043{
1044	bzero(ad, sizeof(*ad));
1045	if (ke == NULL)
1046		return;
1047	ad->pfra_af = ke->pfrke_af;
1048	ad->pfra_net = ke->pfrke_net;
1049	ad->pfra_not = ke->pfrke_not;
1050	if (ad->pfra_af == AF_INET)
1051		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1052	else if (ad->pfra_af == AF_INET6)
1053		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1054}
1055
1056static void
1057pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1058    const struct pfr_walktree *w)
1059{
1060	int dir, op;
1061	const struct pfr_kcounters *kc = &ke->pfrke_counters;
1062
1063	pfr_copyout_addr(&as->pfras_a, ke);
1064	as->pfras_tzero = kc->pfrkc_tzero;
1065
1066	if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS)) {
1067		bzero(as->pfras_packets, sizeof(as->pfras_packets));
1068		bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1069		as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1070		return;
1071	}
1072
1073	for (dir = 0; dir < PFR_DIR_MAX; dir ++) {
1074		for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1075			as->pfras_packets[dir][op] =
1076			    counter_u64_fetch(kc->pfrkc_packets[dir][op]);
1077			as->pfras_bytes[dir][op] =
1078			    counter_u64_fetch(kc->pfrkc_bytes[dir][op]);
1079		}
1080	}
1081}
1082
1083static int
1084pfr_walktree(struct radix_node *rn, void *arg)
1085{
1086	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1087	struct pfr_walktree	*w = arg;
1088
1089	switch (w->pfrw_op) {
1090	case PFRW_MARK:
1091		ke->pfrke_mark = 0;
1092		break;
1093	case PFRW_SWEEP:
1094		if (ke->pfrke_mark)
1095			break;
1096		/* FALLTHROUGH */
1097	case PFRW_ENQUEUE:
1098		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1099		w->pfrw_cnt++;
1100		break;
1101	case PFRW_GET_ADDRS:
1102		if (w->pfrw_free-- > 0) {
1103			pfr_copyout_addr(w->pfrw_addr, ke);
1104			w->pfrw_addr++;
1105		}
1106		break;
1107	case PFRW_GET_ASTATS:
1108		if (w->pfrw_free-- > 0) {
1109			struct pfr_astats as;
1110
1111			pfr_copyout_astats(&as, ke, w);
1112
1113			bcopy(&as, w->pfrw_astats, sizeof(as));
1114			w->pfrw_astats++;
1115		}
1116		break;
1117	case PFRW_POOL_GET:
1118		if (ke->pfrke_not)
1119			break; /* negative entries are ignored */
1120		if (!w->pfrw_cnt--) {
1121			w->pfrw_kentry = ke;
1122			return (1); /* finish search */
1123		}
1124		break;
1125	case PFRW_DYNADDR_UPDATE:
1126	    {
1127		union sockaddr_union	pfr_mask;
1128
1129		if (ke->pfrke_af == AF_INET) {
1130			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1131				break;
1132			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1133			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1134			    AF_INET);
1135			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1136			    AF_INET);
1137		} else if (ke->pfrke_af == AF_INET6){
1138			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1139				break;
1140			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1141			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1142			    AF_INET6);
1143			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1144			    AF_INET6);
1145		}
1146		break;
1147	    }
1148	}
1149	return (0);
1150}
1151
1152int
1153pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1154{
1155	struct pfr_ktableworkq	 workq;
1156	struct pfr_ktable	*p;
1157	int			 xdel = 0;
1158
1159	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1160	if (pfr_fix_anchor(filter->pfrt_anchor))
1161		return (EINVAL);
1162	if (pfr_table_count(filter, flags) < 0)
1163		return (ENOENT);
1164
1165	SLIST_INIT(&workq);
1166	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1167		if (pfr_skip_table(filter, p, flags))
1168			continue;
1169		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1170			continue;
1171		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1172			continue;
1173		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1174		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1175		xdel++;
1176	}
1177	if (!(flags & PFR_FLAG_DUMMY))
1178		pfr_setflags_ktables(&workq);
1179	if (ndel != NULL)
1180		*ndel = xdel;
1181	return (0);
1182}
1183
1184int
1185pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1186{
1187	struct pfr_ktableworkq	 addq, changeq;
1188	struct pfr_ktable	*p, *q, *r, key;
1189	int			 i, rv, xadd = 0;
1190	long			 tzero = time_second;
1191
1192	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1193	SLIST_INIT(&addq);
1194	SLIST_INIT(&changeq);
1195	for (i = 0; i < size; i++) {
1196		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1197		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1198		    flags & PFR_FLAG_USERIOCTL))
1199			senderr(EINVAL);
1200		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1201		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1202		if (p == NULL) {
1203			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1204			if (p == NULL)
1205				senderr(ENOMEM);
1206			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1207				if (!pfr_ktable_compare(p, q)) {
1208					pfr_destroy_ktable(p, 0);
1209					goto _skip;
1210				}
1211			}
1212			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1213			xadd++;
1214			if (!key.pfrkt_anchor[0])
1215				goto _skip;
1216
1217			/* find or create root table */
1218			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1219			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1220			if (r != NULL) {
1221				p->pfrkt_root = r;
1222				goto _skip;
1223			}
1224			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1225				if (!pfr_ktable_compare(&key, q)) {
1226					p->pfrkt_root = q;
1227					goto _skip;
1228				}
1229			}
1230			key.pfrkt_flags = 0;
1231			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1232			if (r == NULL)
1233				senderr(ENOMEM);
1234			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1235			p->pfrkt_root = r;
1236		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1237			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1238				if (!pfr_ktable_compare(&key, q))
1239					goto _skip;
1240			p->pfrkt_nflags = (p->pfrkt_flags &
1241			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1242			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1243			xadd++;
1244		}
1245_skip:
1246	;
1247	}
1248	if (!(flags & PFR_FLAG_DUMMY)) {
1249		pfr_insert_ktables(&addq);
1250		pfr_setflags_ktables(&changeq);
1251	} else
1252		 pfr_destroy_ktables(&addq, 0);
1253	if (nadd != NULL)
1254		*nadd = xadd;
1255	return (0);
1256_bad:
1257	pfr_destroy_ktables(&addq, 0);
1258	return (rv);
1259}
1260
1261int
1262pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1263{
1264	struct pfr_ktableworkq	 workq;
1265	struct pfr_ktable	*p, *q, key;
1266	int			 i, xdel = 0;
1267
1268	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1269	SLIST_INIT(&workq);
1270	for (i = 0; i < size; i++) {
1271		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1272		if (pfr_validate_table(&key.pfrkt_t, 0,
1273		    flags & PFR_FLAG_USERIOCTL))
1274			return (EINVAL);
1275		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1276		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1277			SLIST_FOREACH(q, &workq, pfrkt_workq)
1278				if (!pfr_ktable_compare(p, q))
1279					goto _skip;
1280			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1281			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1282			xdel++;
1283		}
1284_skip:
1285	;
1286	}
1287
1288	if (!(flags & PFR_FLAG_DUMMY))
1289		pfr_setflags_ktables(&workq);
1290	if (ndel != NULL)
1291		*ndel = xdel;
1292	return (0);
1293}
1294
1295int
1296pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1297	int flags)
1298{
1299	struct pfr_ktable	*p;
1300	int			 n, nn;
1301
1302	PF_RULES_RASSERT();
1303
1304	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1305	if (pfr_fix_anchor(filter->pfrt_anchor))
1306		return (EINVAL);
1307	n = nn = pfr_table_count(filter, flags);
1308	if (n < 0)
1309		return (ENOENT);
1310	if (n > *size) {
1311		*size = n;
1312		return (0);
1313	}
1314	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1315		if (pfr_skip_table(filter, p, flags))
1316			continue;
1317		if (n-- <= 0)
1318			continue;
1319		bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1320	}
1321
1322	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1323
1324	*size = nn;
1325	return (0);
1326}
1327
1328int
1329pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1330	int flags)
1331{
1332	struct pfr_ktable	*p;
1333	struct pfr_ktableworkq	 workq;
1334	int			 n, nn;
1335	long			 tzero = time_second;
1336	int			 pfr_dir, pfr_op;
1337
1338	/* XXX PFR_FLAG_CLSTATS disabled */
1339	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1340	if (pfr_fix_anchor(filter->pfrt_anchor))
1341		return (EINVAL);
1342	n = nn = pfr_table_count(filter, flags);
1343	if (n < 0)
1344		return (ENOENT);
1345	if (n > *size) {
1346		*size = n;
1347		return (0);
1348	}
1349	SLIST_INIT(&workq);
1350	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1351		if (pfr_skip_table(filter, p, flags))
1352			continue;
1353		if (n-- <= 0)
1354			continue;
1355		bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1356		    sizeof(struct pfr_table));
1357		for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1358			for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1359				tbl->pfrts_packets[pfr_dir][pfr_op] =
1360				    counter_u64_fetch(
1361					p->pfrkt_packets[pfr_dir][pfr_op]);
1362				tbl->pfrts_bytes[pfr_dir][pfr_op] =
1363				    counter_u64_fetch(
1364					p->pfrkt_bytes[pfr_dir][pfr_op]);
1365			}
1366		}
1367		tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match);
1368		tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch);
1369		tbl->pfrts_tzero = p->pfrkt_tzero;
1370		tbl->pfrts_cnt = p->pfrkt_cnt;
1371		for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1372			tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1373		tbl++;
1374		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1375	}
1376	if (flags & PFR_FLAG_CLSTATS)
1377		pfr_clstats_ktables(&workq, tzero,
1378		    flags & PFR_FLAG_ADDRSTOO);
1379
1380	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1381
1382	*size = nn;
1383	return (0);
1384}
1385
1386int
1387pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1388{
1389	struct pfr_ktableworkq	 workq;
1390	struct pfr_ktable	*p, key;
1391	int			 i, xzero = 0;
1392	long			 tzero = time_second;
1393
1394	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1395	SLIST_INIT(&workq);
1396	for (i = 0; i < size; i++) {
1397		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1398		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1399			return (EINVAL);
1400		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1401		if (p != NULL) {
1402			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1403			xzero++;
1404		}
1405	}
1406	if (!(flags & PFR_FLAG_DUMMY))
1407		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1408	if (nzero != NULL)
1409		*nzero = xzero;
1410	return (0);
1411}
1412
1413int
1414pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1415	int *nchange, int *ndel, int flags)
1416{
1417	struct pfr_ktableworkq	 workq;
1418	struct pfr_ktable	*p, *q, key;
1419	int			 i, xchange = 0, xdel = 0;
1420
1421	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1422	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1423	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1424	    (setflag & clrflag))
1425		return (EINVAL);
1426	SLIST_INIT(&workq);
1427	for (i = 0; i < size; i++) {
1428		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1429		if (pfr_validate_table(&key.pfrkt_t, 0,
1430		    flags & PFR_FLAG_USERIOCTL))
1431			return (EINVAL);
1432		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1433		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1434			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1435			    ~clrflag;
1436			if (p->pfrkt_nflags == p->pfrkt_flags)
1437				goto _skip;
1438			SLIST_FOREACH(q, &workq, pfrkt_workq)
1439				if (!pfr_ktable_compare(p, q))
1440					goto _skip;
1441			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1442			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1443			    (clrflag & PFR_TFLAG_PERSIST) &&
1444			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1445				xdel++;
1446			else
1447				xchange++;
1448		}
1449_skip:
1450	;
1451	}
1452	if (!(flags & PFR_FLAG_DUMMY))
1453		pfr_setflags_ktables(&workq);
1454	if (nchange != NULL)
1455		*nchange = xchange;
1456	if (ndel != NULL)
1457		*ndel = xdel;
1458	return (0);
1459}
1460
1461int
1462pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1463{
1464	struct pfr_ktableworkq	 workq;
1465	struct pfr_ktable	*p;
1466	struct pf_ruleset	*rs;
1467	int			 xdel = 0;
1468
1469	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1470	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1471	if (rs == NULL)
1472		return (ENOMEM);
1473	SLIST_INIT(&workq);
1474	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1475		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1476		    pfr_skip_table(trs, p, 0))
1477			continue;
1478		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1479		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1480		xdel++;
1481	}
1482	if (!(flags & PFR_FLAG_DUMMY)) {
1483		pfr_setflags_ktables(&workq);
1484		if (ticket != NULL)
1485			*ticket = ++rs->tticket;
1486		rs->topen = 1;
1487	} else
1488		pf_remove_if_empty_ruleset(rs);
1489	if (ndel != NULL)
1490		*ndel = xdel;
1491	return (0);
1492}
1493
1494int
1495pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1496    int *nadd, int *naddr, u_int32_t ticket, int flags)
1497{
1498	struct pfr_ktableworkq	 tableq;
1499	struct pfr_kentryworkq	 addrq;
1500	struct pfr_ktable	*kt, *rt, *shadow, key;
1501	struct pfr_kentry	*p;
1502	struct pfr_addr		*ad;
1503	struct pf_ruleset	*rs;
1504	int			 i, rv, xadd = 0, xaddr = 0;
1505
1506	PF_RULES_WASSERT();
1507
1508	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1509	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1510		return (EINVAL);
1511	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1512	    flags & PFR_FLAG_USERIOCTL))
1513		return (EINVAL);
1514	rs = pf_find_ruleset(tbl->pfrt_anchor);
1515	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1516		return (EBUSY);
1517	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1518	SLIST_INIT(&tableq);
1519	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1520	if (kt == NULL) {
1521		kt = pfr_create_ktable(tbl, 0, 1);
1522		if (kt == NULL)
1523			return (ENOMEM);
1524		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1525		xadd++;
1526		if (!tbl->pfrt_anchor[0])
1527			goto _skip;
1528
1529		/* find or create root table */
1530		bzero(&key, sizeof(key));
1531		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1532		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1533		if (rt != NULL) {
1534			kt->pfrkt_root = rt;
1535			goto _skip;
1536		}
1537		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1538		if (rt == NULL) {
1539			pfr_destroy_ktables(&tableq, 0);
1540			return (ENOMEM);
1541		}
1542		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1543		kt->pfrkt_root = rt;
1544	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1545		xadd++;
1546_skip:
1547	shadow = pfr_create_ktable(tbl, 0, 0);
1548	if (shadow == NULL) {
1549		pfr_destroy_ktables(&tableq, 0);
1550		return (ENOMEM);
1551	}
1552	SLIST_INIT(&addrq);
1553	for (i = 0, ad = addr; i < size; i++, ad++) {
1554		if (pfr_validate_addr(ad))
1555			senderr(EINVAL);
1556		if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1557			continue;
1558		p = pfr_create_kentry(ad,
1559		    (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
1560		if (p == NULL)
1561			senderr(ENOMEM);
1562		if (pfr_route_kentry(shadow, p)) {
1563			pfr_destroy_kentry(p);
1564			continue;
1565		}
1566		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1567		xaddr++;
1568	}
1569	if (!(flags & PFR_FLAG_DUMMY)) {
1570		if (kt->pfrkt_shadow != NULL)
1571			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1572		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1573		pfr_insert_ktables(&tableq);
1574		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1575		    xaddr : NO_ADDRESSES;
1576		kt->pfrkt_shadow = shadow;
1577	} else {
1578		pfr_clean_node_mask(shadow, &addrq);
1579		pfr_destroy_ktable(shadow, 0);
1580		pfr_destroy_ktables(&tableq, 0);
1581		pfr_destroy_kentries(&addrq);
1582	}
1583	if (nadd != NULL)
1584		*nadd = xadd;
1585	if (naddr != NULL)
1586		*naddr = xaddr;
1587	return (0);
1588_bad:
1589	pfr_destroy_ktable(shadow, 0);
1590	pfr_destroy_ktables(&tableq, 0);
1591	pfr_destroy_kentries(&addrq);
1592	return (rv);
1593}
1594
1595int
1596pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1597{
1598	struct pfr_ktableworkq	 workq;
1599	struct pfr_ktable	*p;
1600	struct pf_ruleset	*rs;
1601	int			 xdel = 0;
1602
1603	PF_RULES_WASSERT();
1604
1605	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1606	rs = pf_find_ruleset(trs->pfrt_anchor);
1607	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1608		return (0);
1609	SLIST_INIT(&workq);
1610	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1611		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1612		    pfr_skip_table(trs, p, 0))
1613			continue;
1614		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1615		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1616		xdel++;
1617	}
1618	if (!(flags & PFR_FLAG_DUMMY)) {
1619		pfr_setflags_ktables(&workq);
1620		rs->topen = 0;
1621		pf_remove_if_empty_ruleset(rs);
1622	}
1623	if (ndel != NULL)
1624		*ndel = xdel;
1625	return (0);
1626}
1627
1628int
1629pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1630    int *nchange, int flags)
1631{
1632	struct pfr_ktable	*p, *q;
1633	struct pfr_ktableworkq	 workq;
1634	struct pf_ruleset	*rs;
1635	int			 xadd = 0, xchange = 0;
1636	long			 tzero = time_second;
1637
1638	PF_RULES_WASSERT();
1639
1640	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1641	rs = pf_find_ruleset(trs->pfrt_anchor);
1642	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1643		return (EBUSY);
1644
1645	SLIST_INIT(&workq);
1646	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1647		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1648		    pfr_skip_table(trs, p, 0))
1649			continue;
1650		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1651		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1652			xchange++;
1653		else
1654			xadd++;
1655	}
1656
1657	if (!(flags & PFR_FLAG_DUMMY)) {
1658		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1659			q = SLIST_NEXT(p, pfrkt_workq);
1660			pfr_commit_ktable(p, tzero);
1661		}
1662		rs->topen = 0;
1663		pf_remove_if_empty_ruleset(rs);
1664	}
1665	if (nadd != NULL)
1666		*nadd = xadd;
1667	if (nchange != NULL)
1668		*nchange = xchange;
1669
1670	return (0);
1671}
1672
1673static void
1674pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1675{
1676	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1677	int			 nflags;
1678
1679	PF_RULES_WASSERT();
1680
1681	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1682		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1683			pfr_clstats_ktable(kt, tzero, 1);
1684	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1685		/* kt might contain addresses */
1686		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1687		struct pfr_kentry	*p, *q, *next;
1688		struct pfr_addr		 ad;
1689
1690		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1691		pfr_mark_addrs(kt);
1692		SLIST_INIT(&addq);
1693		SLIST_INIT(&changeq);
1694		SLIST_INIT(&delq);
1695		SLIST_INIT(&garbageq);
1696		pfr_clean_node_mask(shadow, &addrq);
1697		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1698			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1699			pfr_copyout_addr(&ad, p);
1700			q = pfr_lookup_addr(kt, &ad, 1);
1701			if (q != NULL) {
1702				if (q->pfrke_not != p->pfrke_not)
1703					SLIST_INSERT_HEAD(&changeq, q,
1704					    pfrke_workq);
1705				q->pfrke_mark = 1;
1706				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1707			} else {
1708				p->pfrke_counters.pfrkc_tzero = tzero;
1709				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1710			}
1711		}
1712		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1713		pfr_insert_kentries(kt, &addq, tzero);
1714		pfr_remove_kentries(kt, &delq);
1715		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
1716		pfr_destroy_kentries(&garbageq);
1717	} else {
1718		/* kt cannot contain addresses */
1719		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1720		    shadow->pfrkt_ip4);
1721		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1722		    shadow->pfrkt_ip6);
1723		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1724		pfr_clstats_ktable(kt, tzero, 1);
1725	}
1726	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1727	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1728		& ~PFR_TFLAG_INACTIVE;
1729	pfr_destroy_ktable(shadow, 0);
1730	kt->pfrkt_shadow = NULL;
1731	pfr_setflags_ktable(kt, nflags);
1732}
1733
1734static int
1735pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1736{
1737	int i;
1738
1739	if (!tbl->pfrt_name[0])
1740		return (-1);
1741	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1742		 return (-1);
1743	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1744		return (-1);
1745	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1746		if (tbl->pfrt_name[i])
1747			return (-1);
1748	if (pfr_fix_anchor(tbl->pfrt_anchor))
1749		return (-1);
1750	if (tbl->pfrt_flags & ~allowedflags)
1751		return (-1);
1752	return (0);
1753}
1754
1755/*
1756 * Rewrite anchors referenced by tables to remove slashes
1757 * and check for validity.
1758 */
1759static int
1760pfr_fix_anchor(char *anchor)
1761{
1762	size_t siz = MAXPATHLEN;
1763	int i;
1764
1765	if (anchor[0] == '/') {
1766		char *path;
1767		int off;
1768
1769		path = anchor;
1770		off = 1;
1771		while (*++path == '/')
1772			off++;
1773		bcopy(path, anchor, siz - off);
1774		memset(anchor + siz - off, 0, off);
1775	}
1776	if (anchor[siz - 1])
1777		return (-1);
1778	for (i = strlen(anchor); i < siz; i++)
1779		if (anchor[i])
1780			return (-1);
1781	return (0);
1782}
1783
1784int
1785pfr_table_count(struct pfr_table *filter, int flags)
1786{
1787	struct pf_ruleset *rs;
1788
1789	PF_RULES_ASSERT();
1790
1791	if (flags & PFR_FLAG_ALLRSETS)
1792		return (pfr_ktable_cnt);
1793	if (filter->pfrt_anchor[0]) {
1794		rs = pf_find_ruleset(filter->pfrt_anchor);
1795		return ((rs != NULL) ? rs->tables : -1);
1796	}
1797	return (pf_main_ruleset.tables);
1798}
1799
1800static int
1801pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1802{
1803	if (flags & PFR_FLAG_ALLRSETS)
1804		return (0);
1805	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1806		return (1);
1807	return (0);
1808}
1809
1810static void
1811pfr_insert_ktables(struct pfr_ktableworkq *workq)
1812{
1813	struct pfr_ktable	*p;
1814
1815	SLIST_FOREACH(p, workq, pfrkt_workq)
1816		pfr_insert_ktable(p);
1817}
1818
1819static void
1820pfr_insert_ktable(struct pfr_ktable *kt)
1821{
1822
1823	PF_RULES_WASSERT();
1824
1825	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1826	pfr_ktable_cnt++;
1827	if (kt->pfrkt_root != NULL)
1828		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1829			pfr_setflags_ktable(kt->pfrkt_root,
1830			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1831}
1832
1833static void
1834pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1835{
1836	struct pfr_ktable	*p, *q;
1837
1838	for (p = SLIST_FIRST(workq); p; p = q) {
1839		q = SLIST_NEXT(p, pfrkt_workq);
1840		pfr_setflags_ktable(p, p->pfrkt_nflags);
1841	}
1842}
1843
1844static void
1845pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1846{
1847	struct pfr_kentryworkq	addrq;
1848
1849	PF_RULES_WASSERT();
1850
1851	if (!(newf & PFR_TFLAG_REFERENCED) &&
1852	    !(newf & PFR_TFLAG_REFDANCHOR) &&
1853	    !(newf & PFR_TFLAG_PERSIST))
1854		newf &= ~PFR_TFLAG_ACTIVE;
1855	if (!(newf & PFR_TFLAG_ACTIVE))
1856		newf &= ~PFR_TFLAG_USRMASK;
1857	if (!(newf & PFR_TFLAG_SETMASK)) {
1858		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1859		if (kt->pfrkt_root != NULL)
1860			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1861				pfr_setflags_ktable(kt->pfrkt_root,
1862				    kt->pfrkt_root->pfrkt_flags &
1863					~PFR_TFLAG_REFDANCHOR);
1864		pfr_destroy_ktable(kt, 1);
1865		pfr_ktable_cnt--;
1866		return;
1867	}
1868	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1869		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1870		pfr_remove_kentries(kt, &addrq);
1871	}
1872	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1873		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1874		kt->pfrkt_shadow = NULL;
1875	}
1876	kt->pfrkt_flags = newf;
1877}
1878
1879static void
1880pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1881{
1882	struct pfr_ktable	*p;
1883
1884	SLIST_FOREACH(p, workq, pfrkt_workq)
1885		pfr_clstats_ktable(p, tzero, recurse);
1886}
1887
1888static void
1889pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1890{
1891	struct pfr_kentryworkq	 addrq;
1892	int			 pfr_dir, pfr_op;
1893
1894	if (recurse) {
1895		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1896		pfr_clstats_kentries(kt, &addrq, tzero, 0);
1897	}
1898	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1899		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1900			counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]);
1901			counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]);
1902		}
1903	}
1904	counter_u64_zero(kt->pfrkt_match);
1905	counter_u64_zero(kt->pfrkt_nomatch);
1906	kt->pfrkt_tzero = tzero;
1907}
1908
1909static struct pfr_ktable *
1910pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1911{
1912	struct pfr_ktable	*kt;
1913	struct pf_ruleset	*rs;
1914	int			 pfr_dir, pfr_op;
1915
1916	PF_RULES_WASSERT();
1917
1918	kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1919	if (kt == NULL)
1920		return (NULL);
1921	kt->pfrkt_t = *tbl;
1922
1923	if (attachruleset) {
1924		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1925		if (!rs) {
1926			pfr_destroy_ktable(kt, 0);
1927			return (NULL);
1928		}
1929		kt->pfrkt_rs = rs;
1930		rs->tables++;
1931	}
1932
1933	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1934		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1935			kt->pfrkt_packets[pfr_dir][pfr_op] =
1936			    counter_u64_alloc(M_NOWAIT);
1937			if (! kt->pfrkt_packets[pfr_dir][pfr_op]) {
1938				pfr_destroy_ktable(kt, 0);
1939				return (NULL);
1940			}
1941			kt->pfrkt_bytes[pfr_dir][pfr_op] =
1942			    counter_u64_alloc(M_NOWAIT);
1943			if (! kt->pfrkt_bytes[pfr_dir][pfr_op]) {
1944				pfr_destroy_ktable(kt, 0);
1945				return (NULL);
1946			}
1947		}
1948	}
1949	kt->pfrkt_match = counter_u64_alloc(M_NOWAIT);
1950	if (! kt->pfrkt_match) {
1951		pfr_destroy_ktable(kt, 0);
1952		return (NULL);
1953	}
1954
1955	kt->pfrkt_nomatch = counter_u64_alloc(M_NOWAIT);
1956	if (! kt->pfrkt_nomatch) {
1957		pfr_destroy_ktable(kt, 0);
1958		return (NULL);
1959	}
1960
1961	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1962	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1963	    !rn_inithead((void **)&kt->pfrkt_ip6,
1964	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1965		pfr_destroy_ktable(kt, 0);
1966		return (NULL);
1967	}
1968	kt->pfrkt_tzero = tzero;
1969
1970	return (kt);
1971}
1972
1973static void
1974pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1975{
1976	struct pfr_ktable	*p, *q;
1977
1978	for (p = SLIST_FIRST(workq); p; p = q) {
1979		q = SLIST_NEXT(p, pfrkt_workq);
1980		pfr_destroy_ktable(p, flushaddr);
1981	}
1982}
1983
1984static void
1985pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1986{
1987	struct pfr_kentryworkq	 addrq;
1988	int			 pfr_dir, pfr_op;
1989
1990	if (flushaddr) {
1991		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1992		pfr_clean_node_mask(kt, &addrq);
1993		pfr_destroy_kentries(&addrq);
1994	}
1995	if (kt->pfrkt_ip4 != NULL)
1996		rn_detachhead((void **)&kt->pfrkt_ip4);
1997	if (kt->pfrkt_ip6 != NULL)
1998		rn_detachhead((void **)&kt->pfrkt_ip6);
1999	if (kt->pfrkt_shadow != NULL)
2000		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2001	if (kt->pfrkt_rs != NULL) {
2002		kt->pfrkt_rs->tables--;
2003		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2004	}
2005	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2006		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2007			counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]);
2008			counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]);
2009		}
2010	}
2011	counter_u64_free(kt->pfrkt_match);
2012	counter_u64_free(kt->pfrkt_nomatch);
2013
2014	free(kt, M_PFTABLE);
2015}
2016
2017static int
2018pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2019{
2020	int d;
2021
2022	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2023		return (d);
2024	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2025}
2026
2027static struct pfr_ktable *
2028pfr_lookup_table(struct pfr_table *tbl)
2029{
2030	/* struct pfr_ktable start like a struct pfr_table */
2031	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2032	    (struct pfr_ktable *)tbl));
2033}
2034
2035int
2036pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2037{
2038	struct pfr_kentry	*ke = NULL;
2039	int			 match;
2040
2041	PF_RULES_RASSERT();
2042
2043	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2044		kt = kt->pfrkt_root;
2045	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2046		return (0);
2047
2048	switch (af) {
2049#ifdef INET
2050	case AF_INET:
2051	    {
2052		struct sockaddr_in sin;
2053
2054		bzero(&sin, sizeof(sin));
2055		sin.sin_len = sizeof(sin);
2056		sin.sin_family = AF_INET;
2057		sin.sin_addr.s_addr = a->addr32[0];
2058		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2059		if (ke && KENTRY_RNF_ROOT(ke))
2060			ke = NULL;
2061		break;
2062	    }
2063#endif /* INET */
2064#ifdef INET6
2065	case AF_INET6:
2066	    {
2067		struct sockaddr_in6 sin6;
2068
2069		bzero(&sin6, sizeof(sin6));
2070		sin6.sin6_len = sizeof(sin6);
2071		sin6.sin6_family = AF_INET6;
2072		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2073		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2074		if (ke && KENTRY_RNF_ROOT(ke))
2075			ke = NULL;
2076		break;
2077	    }
2078#endif /* INET6 */
2079	}
2080	match = (ke && !ke->pfrke_not);
2081	if (match)
2082		counter_u64_add(kt->pfrkt_match, 1);
2083	else
2084		counter_u64_add(kt->pfrkt_nomatch, 1);
2085	return (match);
2086}
2087
2088void
2089pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2090    u_int64_t len, int dir_out, int op_pass, int notrule)
2091{
2092	struct pfr_kentry	*ke = NULL;
2093
2094	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2095		kt = kt->pfrkt_root;
2096	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2097		return;
2098
2099	switch (af) {
2100#ifdef INET
2101	case AF_INET:
2102	    {
2103		struct sockaddr_in sin;
2104
2105		bzero(&sin, sizeof(sin));
2106		sin.sin_len = sizeof(sin);
2107		sin.sin_family = AF_INET;
2108		sin.sin_addr.s_addr = a->addr32[0];
2109		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2110		if (ke && KENTRY_RNF_ROOT(ke))
2111			ke = NULL;
2112		break;
2113	    }
2114#endif /* INET */
2115#ifdef INET6
2116	case AF_INET6:
2117	    {
2118		struct sockaddr_in6 sin6;
2119
2120		bzero(&sin6, sizeof(sin6));
2121		sin6.sin6_len = sizeof(sin6);
2122		sin6.sin6_family = AF_INET6;
2123		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2124		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2125		if (ke && KENTRY_RNF_ROOT(ke))
2126			ke = NULL;
2127		break;
2128	    }
2129#endif /* INET6 */
2130	default:
2131		panic("%s: unknown address family %u", __func__, af);
2132	}
2133	if ((ke == NULL || ke->pfrke_not) != notrule) {
2134		if (op_pass != PFR_OP_PASS)
2135			DPFPRINTF(PF_DEBUG_URGENT,
2136			    ("pfr_update_stats: assertion failed.\n"));
2137		op_pass = PFR_OP_XPASS;
2138	}
2139	counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1);
2140	counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len);
2141	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2142	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2143		counter_u64_add(ke->pfrke_counters.
2144		    pfrkc_packets[dir_out][op_pass], 1);
2145		counter_u64_add(ke->pfrke_counters.
2146		    pfrkc_bytes[dir_out][op_pass], len);
2147	}
2148}
2149
2150struct pfr_ktable *
2151pfr_attach_table(struct pf_ruleset *rs, char *name)
2152{
2153	struct pfr_ktable	*kt, *rt;
2154	struct pfr_table	 tbl;
2155	struct pf_anchor	*ac = rs->anchor;
2156
2157	PF_RULES_WASSERT();
2158
2159	bzero(&tbl, sizeof(tbl));
2160	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2161	if (ac != NULL)
2162		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2163	kt = pfr_lookup_table(&tbl);
2164	if (kt == NULL) {
2165		kt = pfr_create_ktable(&tbl, time_second, 1);
2166		if (kt == NULL)
2167			return (NULL);
2168		if (ac != NULL) {
2169			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2170			rt = pfr_lookup_table(&tbl);
2171			if (rt == NULL) {
2172				rt = pfr_create_ktable(&tbl, 0, 1);
2173				if (rt == NULL) {
2174					pfr_destroy_ktable(kt, 0);
2175					return (NULL);
2176				}
2177				pfr_insert_ktable(rt);
2178			}
2179			kt->pfrkt_root = rt;
2180		}
2181		pfr_insert_ktable(kt);
2182	}
2183	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2184		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2185	return (kt);
2186}
2187
2188void
2189pfr_detach_table(struct pfr_ktable *kt)
2190{
2191
2192	PF_RULES_WASSERT();
2193	KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2194	    __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2195
2196	if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2197		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2198}
2199
2200int
2201pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2202    sa_family_t af)
2203{
2204	struct pf_addr		 *addr, *cur, *mask;
2205	union sockaddr_union	 uaddr, umask;
2206	struct pfr_kentry	*ke, *ke2 = NULL;
2207	int			 idx = -1, use_counter = 0;
2208
2209	switch (af) {
2210	case AF_INET:
2211		uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2212		uaddr.sin.sin_family = AF_INET;
2213		break;
2214	case AF_INET6:
2215		uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2216		uaddr.sin6.sin6_family = AF_INET6;
2217		break;
2218	}
2219	addr = SUNION2PF(&uaddr, af);
2220
2221	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2222		kt = kt->pfrkt_root;
2223	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2224		return (-1);
2225
2226	if (pidx != NULL)
2227		idx = *pidx;
2228	if (counter != NULL && idx >= 0)
2229		use_counter = 1;
2230	if (idx < 0)
2231		idx = 0;
2232
2233_next_block:
2234	ke = pfr_kentry_byidx(kt, idx, af);
2235	if (ke == NULL) {
2236		counter_u64_add(kt->pfrkt_nomatch, 1);
2237		return (1);
2238	}
2239	pfr_prepare_network(&umask, af, ke->pfrke_net);
2240	cur = SUNION2PF(&ke->pfrke_sa, af);
2241	mask = SUNION2PF(&umask, af);
2242
2243	if (use_counter) {
2244		/* is supplied address within block? */
2245		if (!PF_MATCHA(0, cur, mask, counter, af)) {
2246			/* no, go to next block in table */
2247			idx++;
2248			use_counter = 0;
2249			goto _next_block;
2250		}
2251		PF_ACPY(addr, counter, af);
2252	} else {
2253		/* use first address of block */
2254		PF_ACPY(addr, cur, af);
2255	}
2256
2257	if (!KENTRY_NETWORK(ke)) {
2258		/* this is a single IP address - no possible nested block */
2259		PF_ACPY(counter, addr, af);
2260		*pidx = idx;
2261		counter_u64_add(kt->pfrkt_match, 1);
2262		return (0);
2263	}
2264	for (;;) {
2265		/* we don't want to use a nested block */
2266		switch (af) {
2267		case AF_INET:
2268			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2269			    &kt->pfrkt_ip4->rh);
2270			break;
2271		case AF_INET6:
2272			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2273			    &kt->pfrkt_ip6->rh);
2274			break;
2275		}
2276		/* no need to check KENTRY_RNF_ROOT() here */
2277		if (ke2 == ke) {
2278			/* lookup return the same block - perfect */
2279			PF_ACPY(counter, addr, af);
2280			*pidx = idx;
2281			counter_u64_add(kt->pfrkt_match, 1);
2282			return (0);
2283		}
2284
2285		/* we need to increase the counter past the nested block */
2286		pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2287		PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2288		PF_AINC(addr, af);
2289		if (!PF_MATCHA(0, cur, mask, addr, af)) {
2290			/* ok, we reached the end of our main block */
2291			/* go to next block in table */
2292			idx++;
2293			use_counter = 0;
2294			goto _next_block;
2295		}
2296	}
2297}
2298
2299static struct pfr_kentry *
2300pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2301{
2302	struct pfr_walktree	w;
2303
2304	bzero(&w, sizeof(w));
2305	w.pfrw_op = PFRW_POOL_GET;
2306	w.pfrw_cnt = idx;
2307
2308	switch (af) {
2309#ifdef INET
2310	case AF_INET:
2311		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2312		return (w.pfrw_kentry);
2313#endif /* INET */
2314#ifdef INET6
2315	case AF_INET6:
2316		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2317		return (w.pfrw_kentry);
2318#endif /* INET6 */
2319	default:
2320		return (NULL);
2321	}
2322}
2323
2324void
2325pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2326{
2327	struct pfr_walktree	w;
2328
2329	bzero(&w, sizeof(w));
2330	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2331	w.pfrw_dyn = dyn;
2332
2333	dyn->pfid_acnt4 = 0;
2334	dyn->pfid_acnt6 = 0;
2335	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2336		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2337	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2338		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2339}
2340