pf_table.c revision 126259
1/*	$OpenBSD: pf_table.c,v 1.41 2003/08/22 15:19:23 henning Exp $	*/
2
3/*
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 *    - Redistributions of source code must retain the above copyright
12 *      notice, this list of conditions and the following disclaimer.
13 *    - Redistributions in binary form must reproduce the above
14 *      copyright notice, this list of conditions and the following
15 *      disclaimer in the documentation and/or other materials provided
16 *      with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/socket.h>
36#include <sys/mbuf.h>
37#include <sys/kernel.h>
38
39#include <net/if.h>
40#include <net/route.h>
41#include <netinet/in.h>
42#include <netinet/ip_ipsp.h>
43#include <net/pfvar.h>
44
45#define ACCEPT_FLAGS(oklist)			\
46	do {					\
47		if ((flags & ~(oklist)) &	\
48		    PFR_FLAG_ALLMASK)		\
49			return (EINVAL);	\
50	} while (0)
51
52#define	FILLIN_SIN(sin, addr)			\
53	do {					\
54		(sin).sin_len = sizeof(sin);	\
55		(sin).sin_family = AF_INET;	\
56		(sin).sin_addr = (addr);	\
57	} while (0)
58
59#define	FILLIN_SIN6(sin6, addr)			\
60	do {					\
61		(sin6).sin6_len = sizeof(sin6);	\
62		(sin6).sin6_family = AF_INET6;	\
63		(sin6).sin6_addr = (addr);	\
64	} while (0)
65
66#define SWAP(type, a1, a2)			\
67	do {					\
68		type tmp = a1;			\
69		a1 = a2;			\
70		a2 = tmp;			\
71	} while (0)
72
73#define SUNION2PF(su, af) (((af)==AF_INET) ?	\
74        (struct pf_addr *)&(su)->sin.sin_addr :	\
75        (struct pf_addr *)&(su)->sin6.sin6_addr)
76
77#define	AF_BITS(af)		(((af)==AF_INET)?32:128)
78#define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
79#define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
80#define KENTRY_RNF_ROOT(ke) \
81		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
82
83#define NO_ADDRESSES		(-1)
84#define ENQUEUE_UNMARKED_ONLY	(1)
85#define INVERT_NEG_FLAG		(1)
86
87struct pfr_walktree {
88	enum pfrw_op {
89		PFRW_MARK,
90		PFRW_SWEEP,
91		PFRW_ENQUEUE,
92		PFRW_GET_ADDRS,
93		PFRW_GET_ASTATS,
94		PFRW_POOL_GET
95	}	 pfrw_op;
96	union {
97		struct pfr_addr		*pfrw1_addr;
98		struct pfr_astats	*pfrw1_astats;
99		struct pfr_kentryworkq	*pfrw1_workq;
100		struct pfr_kentry	*pfrw1_kentry;
101	}	 pfrw_1;
102	int	 pfrw_free;
103};
104#define pfrw_addr	pfrw_1.pfrw1_addr
105#define pfrw_astats	pfrw_1.pfrw1_astats
106#define pfrw_workq	pfrw_1.pfrw1_workq
107#define pfrw_kentry	pfrw_1.pfrw1_kentry
108#define pfrw_cnt	pfrw_free
109
110#define senderr(e)	do { rv = (e); goto _bad; } while (0)
111
112struct pool		 pfr_ktable_pl;
113struct pool		 pfr_kentry_pl;
114struct sockaddr_in	 pfr_sin;
115struct sockaddr_in6	 pfr_sin6;
116union  sockaddr_union	 pfr_mask;
117struct pf_addr		 pfr_ffaddr;
118
119void			 pfr_copyout_addr(struct pfr_addr *,
120			    struct pfr_kentry *ke);
121int			 pfr_validate_addr(struct pfr_addr *);
122void			 pfr_enqueue_addrs(struct pfr_ktable *,
123			    struct pfr_kentryworkq *, int *, int);
124void			 pfr_mark_addrs(struct pfr_ktable *);
125struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
126			    struct pfr_addr *, int);
127struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *);
128void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
129void			 pfr_destroy_kentry(struct pfr_kentry *);
130void			 pfr_insert_kentries(struct pfr_ktable *,
131			    struct pfr_kentryworkq *, long);
132void			 pfr_remove_kentries(struct pfr_ktable *,
133			    struct pfr_kentryworkq *);
134void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
135			    int);
136void			 pfr_reset_feedback(struct pfr_addr *, int);
137void			 pfr_prepare_network(union sockaddr_union *, int, int);
138int			 pfr_route_kentry(struct pfr_ktable *,
139			    struct pfr_kentry *);
140int			 pfr_unroute_kentry(struct pfr_ktable *,
141			    struct pfr_kentry *);
142int			 pfr_walktree(struct radix_node *, void *);
143int			 pfr_validate_table(struct pfr_table *, int);
144void			 pfr_commit_ktable(struct pfr_ktable *, long);
145void			 pfr_insert_ktables(struct pfr_ktableworkq *);
146void			 pfr_insert_ktable(struct pfr_ktable *);
147void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
148void			 pfr_setflags_ktable(struct pfr_ktable *, int);
149void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
150			    int);
151void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
152struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
153void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
154void			 pfr_destroy_ktable(struct pfr_ktable *, int);
155int			 pfr_ktable_compare(struct pfr_ktable *,
156			    struct pfr_ktable *);
157struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
158void			 pfr_clean_node_mask(struct pfr_ktable *,
159			    struct pfr_kentryworkq *);
160int			 pfr_table_count(struct pfr_table *, int);
161int			 pfr_skip_table(struct pfr_table *,
162			    struct pfr_ktable *, int);
163struct pfr_kentry       *pfr_kentry_byidx(struct pfr_ktable *, int, int);
164
165RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
166RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
167
168struct pfr_ktablehead	 pfr_ktables;
169struct pfr_table	 pfr_nulltable;
170int			 pfr_ktable_cnt;
171
172void
173pfr_initialize(void)
174{
175	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
176	    "pfrktable", NULL);
177	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
178	    "pfrkentry", NULL);
179
180	pfr_sin.sin_len = sizeof(pfr_sin);
181	pfr_sin.sin_family = AF_INET;
182	pfr_sin6.sin6_len = sizeof(pfr_sin6);
183	pfr_sin6.sin6_family = AF_INET6;
184
185	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
186}
187
188int
189pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
190{
191	struct pfr_ktable	*kt;
192	struct pfr_kentryworkq	 workq;
193	int			 s;
194
195	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
196	if (pfr_validate_table(tbl, 0))
197		return (EINVAL);
198	kt = pfr_lookup_table(tbl);
199	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
200		return (ESRCH);
201	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
202		return (EPERM);
203	pfr_enqueue_addrs(kt, &workq, ndel, 0);
204
205	if (!(flags & PFR_FLAG_DUMMY)) {
206		if (flags & PFR_FLAG_ATOMIC)
207			s = splsoftnet();
208		pfr_remove_kentries(kt, &workq);
209		if (flags & PFR_FLAG_ATOMIC)
210			splx(s);
211		if (kt->pfrkt_cnt) {
212			printf("pfr_clr_addrs: corruption detected (%d).\n",
213			    kt->pfrkt_cnt);
214			kt->pfrkt_cnt = 0;
215		}
216	}
217	return (0);
218}
219
220int
221pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
222    int *nadd, int flags)
223{
224	struct pfr_ktable	*kt, *tmpkt;
225	struct pfr_kentryworkq	 workq;
226	struct pfr_kentry	*p, *q;
227	struct pfr_addr		 ad;
228	int			 i, rv, s, xadd = 0;
229	long			 tzero = time.tv_sec;
230
231	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
232	if (pfr_validate_table(tbl, 0))
233		return (EINVAL);
234	kt = pfr_lookup_table(tbl);
235	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
236		return (ESRCH);
237	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
238		return (EPERM);
239	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
240	if (tmpkt == NULL)
241		return (ENOMEM);
242	SLIST_INIT(&workq);
243	for (i = 0; i < size; i++) {
244		if (copyin(addr+i, &ad, sizeof(ad)))
245			senderr(EFAULT);
246		if (pfr_validate_addr(&ad))
247			senderr(EINVAL);
248		p = pfr_lookup_addr(kt, &ad, 1);
249		q = pfr_lookup_addr(tmpkt, &ad, 1);
250		if (flags & PFR_FLAG_FEEDBACK) {
251			if (q != NULL)
252				ad.pfra_fback = PFR_FB_DUPLICATE;
253			else if (p == NULL)
254				ad.pfra_fback = PFR_FB_ADDED;
255			else if (p->pfrke_not != ad.pfra_not)
256				ad.pfra_fback = PFR_FB_CONFLICT;
257			else
258				ad.pfra_fback = PFR_FB_NONE;
259		}
260		if (p == NULL && q == NULL) {
261			p = pfr_create_kentry(&ad);
262			if (p == NULL)
263				senderr(ENOMEM);
264			if (pfr_route_kentry(tmpkt, p)) {
265				pfr_destroy_kentry(p);
266				ad.pfra_fback = PFR_FB_NONE;
267			} else {
268				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
269				xadd++;
270			}
271		}
272		if (flags & PFR_FLAG_FEEDBACK)
273			if (copyout(&ad, addr+i, sizeof(ad)))
274				senderr(EFAULT);
275	}
276	pfr_clean_node_mask(tmpkt, &workq);
277	if (!(flags & PFR_FLAG_DUMMY)) {
278		if (flags & PFR_FLAG_ATOMIC)
279			s = splsoftnet();
280		pfr_insert_kentries(kt, &workq, tzero);
281		if (flags & PFR_FLAG_ATOMIC)
282			splx(s);
283	} else
284		pfr_destroy_kentries(&workq);
285	if (nadd != NULL)
286		*nadd = xadd;
287	pfr_destroy_ktable(tmpkt, 0);
288	return (0);
289_bad:
290	pfr_clean_node_mask(tmpkt, &workq);
291	pfr_destroy_kentries(&workq);
292	if (flags & PFR_FLAG_FEEDBACK)
293		pfr_reset_feedback(addr, size);
294	pfr_destroy_ktable(tmpkt, 0);
295	return (rv);
296}
297
298int
299pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
300    int *ndel, int flags)
301{
302	struct pfr_ktable	*kt;
303	struct pfr_kentryworkq	 workq;
304	struct pfr_kentry	*p;
305	struct pfr_addr		 ad;
306	int			 i, rv, s, xdel = 0;
307
308	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
309	if (pfr_validate_table(tbl, 0))
310		return (EINVAL);
311	kt = pfr_lookup_table(tbl);
312	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
313		return (ESRCH);
314	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
315		return (EPERM);
316	pfr_mark_addrs(kt);
317	SLIST_INIT(&workq);
318	for (i = 0; i < size; i++) {
319		if (copyin(addr+i, &ad, sizeof(ad)))
320			senderr(EFAULT);
321		if (pfr_validate_addr(&ad))
322			senderr(EINVAL);
323		p = pfr_lookup_addr(kt, &ad, 1);
324		if (flags & PFR_FLAG_FEEDBACK) {
325			if (p == NULL)
326				ad.pfra_fback = PFR_FB_NONE;
327			else if (p->pfrke_not != ad.pfra_not)
328				ad.pfra_fback = PFR_FB_CONFLICT;
329			else if (p->pfrke_mark)
330				ad.pfra_fback = PFR_FB_DUPLICATE;
331			else
332				ad.pfra_fback = PFR_FB_DELETED;
333		}
334		if (p != NULL && p->pfrke_not == ad.pfra_not &&
335		    !p->pfrke_mark) {
336			p->pfrke_mark = 1;
337			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
338			xdel++;
339		}
340		if (flags & PFR_FLAG_FEEDBACK)
341			if (copyout(&ad, addr+i, sizeof(ad)))
342				senderr(EFAULT);
343	}
344	if (!(flags & PFR_FLAG_DUMMY)) {
345		if (flags & PFR_FLAG_ATOMIC)
346			s = splsoftnet();
347		pfr_remove_kentries(kt, &workq);
348		if (flags & PFR_FLAG_ATOMIC)
349			splx(s);
350	}
351	if (ndel != NULL)
352		*ndel = xdel;
353	return (0);
354_bad:
355	if (flags & PFR_FLAG_FEEDBACK)
356		pfr_reset_feedback(addr, size);
357	return (rv);
358}
359
360int
361pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
362    int *size2, int *nadd, int *ndel, int *nchange, int flags)
363{
364	struct pfr_ktable	*kt, *tmpkt;
365	struct pfr_kentryworkq	 addq, delq, changeq;
366	struct pfr_kentry	*p, *q;
367	struct pfr_addr		 ad;
368	int			 i, rv, s, xadd = 0, xdel = 0, xchange = 0;
369	long			 tzero = time.tv_sec;
370
371	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
372	if (pfr_validate_table(tbl, 0))
373		return (EINVAL);
374	kt = pfr_lookup_table(tbl);
375	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
376		return (ESRCH);
377	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
378		return (EPERM);
379	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
380	if (tmpkt == NULL)
381		return (ENOMEM);
382	pfr_mark_addrs(kt);
383	SLIST_INIT(&addq);
384	SLIST_INIT(&delq);
385	SLIST_INIT(&changeq);
386	for (i = 0; i < size; i++) {
387		if (copyin(addr+i, &ad, sizeof(ad)))
388			senderr(EFAULT);
389		if (pfr_validate_addr(&ad))
390			senderr(EINVAL);
391		ad.pfra_fback = PFR_FB_NONE;
392		p = pfr_lookup_addr(kt, &ad, 1);
393		if (p != NULL) {
394			if (p->pfrke_mark) {
395				ad.pfra_fback = PFR_FB_DUPLICATE;
396				goto _skip;
397			}
398			p->pfrke_mark = 1;
399			if (p->pfrke_not != ad.pfra_not) {
400				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
401				ad.pfra_fback = PFR_FB_CHANGED;
402				xchange++;
403			}
404		} else {
405			q = pfr_lookup_addr(tmpkt, &ad, 1);
406			if (q != NULL) {
407				ad.pfra_fback = PFR_FB_DUPLICATE;
408				goto _skip;
409			}
410			p = pfr_create_kentry(&ad);
411			if (p == NULL)
412				senderr(ENOMEM);
413			if (pfr_route_kentry(tmpkt, p)) {
414				pfr_destroy_kentry(p);
415				ad.pfra_fback = PFR_FB_NONE;
416			} else {
417				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
418				ad.pfra_fback = PFR_FB_ADDED;
419				xadd++;
420			}
421		}
422_skip:
423		if (flags & PFR_FLAG_FEEDBACK)
424			if (copyout(&ad, addr+i, sizeof(ad)))
425				senderr(EFAULT);
426	}
427	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
428	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
429		if (*size2 < size+xdel) {
430			*size2 = size+xdel;
431			senderr(0);
432		}
433		i = 0;
434		SLIST_FOREACH(p, &delq, pfrke_workq) {
435			pfr_copyout_addr(&ad, p);
436			ad.pfra_fback = PFR_FB_DELETED;
437			if (copyout(&ad, addr+size+i, sizeof(ad)))
438				senderr(EFAULT);
439			i++;
440		}
441	}
442	pfr_clean_node_mask(tmpkt, &addq);
443	if (!(flags & PFR_FLAG_DUMMY)) {
444		if (flags & PFR_FLAG_ATOMIC)
445			s = splsoftnet();
446		pfr_insert_kentries(kt, &addq, tzero);
447		pfr_remove_kentries(kt, &delq);
448		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
449		if (flags & PFR_FLAG_ATOMIC)
450			splx(s);
451	} else
452		pfr_destroy_kentries(&addq);
453	if (nadd != NULL)
454		*nadd = xadd;
455	if (ndel != NULL)
456		*ndel = xdel;
457	if (nchange != NULL)
458		*nchange = xchange;
459	if ((flags & PFR_FLAG_FEEDBACK) && *size2)
460		*size2 = size+xdel;
461	pfr_destroy_ktable(tmpkt, 0);
462	return (0);
463_bad:
464	pfr_clean_node_mask(tmpkt, &addq);
465	pfr_destroy_kentries(&addq);
466	if (flags & PFR_FLAG_FEEDBACK)
467		pfr_reset_feedback(addr, size);
468	pfr_destroy_ktable(tmpkt, 0);
469	return (rv);
470}
471
472int
473pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
474	int *nmatch, int flags)
475{
476	struct pfr_ktable	*kt;
477	struct pfr_kentry	*p;
478	struct pfr_addr		 ad;
479	int			 i, xmatch = 0;
480
481	ACCEPT_FLAGS(PFR_FLAG_REPLACE);
482	if (pfr_validate_table(tbl, 0))
483		return (EINVAL);
484	kt = pfr_lookup_table(tbl);
485	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
486		return (ESRCH);
487
488	for (i = 0; i < size; i++) {
489		if (copyin(addr+i, &ad, sizeof(ad)))
490			return (EFAULT);
491		if (pfr_validate_addr(&ad))
492			return (EINVAL);
493		if (ADDR_NETWORK(&ad))
494			return (EINVAL);
495		p = pfr_lookup_addr(kt, &ad, 0);
496		if (flags & PFR_FLAG_REPLACE)
497			pfr_copyout_addr(&ad, p);
498		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
499		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
500		if (p != NULL && !p->pfrke_not)
501			xmatch++;
502		if (copyout(&ad, addr+i, sizeof(ad)))
503			return (EFAULT);
504	}
505	if (nmatch != NULL)
506		*nmatch = xmatch;
507	return (0);
508}
509
510int
511pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
512	int flags)
513{
514	struct pfr_ktable	*kt;
515	struct pfr_walktree	 w;
516	int			 rv;
517
518	ACCEPT_FLAGS(0);
519	if (pfr_validate_table(tbl, 0))
520		return (EINVAL);
521	kt = pfr_lookup_table(tbl);
522	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
523		return (ESRCH);
524	if (kt->pfrkt_cnt > *size) {
525		*size = kt->pfrkt_cnt;
526		return (0);
527	}
528
529	bzero(&w, sizeof(w));
530	w.pfrw_op = PFRW_GET_ADDRS;
531	w.pfrw_addr = addr;
532	w.pfrw_free = kt->pfrkt_cnt;
533	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
534	if (!rv)
535		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
536	if (rv)
537		return (rv);
538
539	if (w.pfrw_free) {
540		printf("pfr_get_addrs: corruption detected (%d).\n",
541		    w.pfrw_free);
542		return (ENOTTY);
543	}
544	*size = kt->pfrkt_cnt;
545	return (0);
546}
547
548int
549pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
550	int flags)
551{
552	struct pfr_ktable	*kt;
553	struct pfr_walktree	 w;
554	struct pfr_kentryworkq	 workq;
555	int			 rv, s;
556	long			 tzero = time.tv_sec;
557
558	ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
559	if (pfr_validate_table(tbl, 0))
560		return (EINVAL);
561	kt = pfr_lookup_table(tbl);
562	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
563		return (ESRCH);
564	if (kt->pfrkt_cnt > *size) {
565		*size = kt->pfrkt_cnt;
566		return (0);
567	}
568
569	bzero(&w, sizeof(w));
570	w.pfrw_op = PFRW_GET_ASTATS;
571	w.pfrw_astats = addr;
572	w.pfrw_free = kt->pfrkt_cnt;
573	if (flags & PFR_FLAG_ATOMIC)
574		s = splsoftnet();
575	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
576	if (!rv)
577		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
578	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
579		pfr_enqueue_addrs(kt, &workq, NULL, 0);
580		pfr_clstats_kentries(&workq, tzero, 0);
581	}
582	if (flags & PFR_FLAG_ATOMIC)
583		splx(s);
584	if (rv)
585		return (rv);
586
587	if (w.pfrw_free) {
588		printf("pfr_get_astats: corruption detected (%d).\n",
589		    w.pfrw_free);
590		return (ENOTTY);
591	}
592	*size = kt->pfrkt_cnt;
593	return (0);
594}
595
596int
597pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
598    int *nzero, int flags)
599{
600	struct pfr_ktable	*kt;
601	struct pfr_kentryworkq	 workq;
602	struct pfr_kentry	*p;
603	struct pfr_addr		 ad;
604	int			 i, rv, s, xzero = 0;
605
606	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
607	if (pfr_validate_table(tbl, 0))
608		return (EINVAL);
609	kt = pfr_lookup_table(tbl);
610	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
611		return (ESRCH);
612	SLIST_INIT(&workq);
613	for (i = 0; i < size; i++) {
614		if (copyin(addr+i, &ad, sizeof(ad)))
615			senderr(EFAULT);
616		if (pfr_validate_addr(&ad))
617			senderr(EINVAL);
618		p = pfr_lookup_addr(kt, &ad, 1);
619		if (flags & PFR_FLAG_FEEDBACK) {
620			ad.pfra_fback = (p != NULL) ?
621			    PFR_FB_CLEARED : PFR_FB_NONE;
622			if (copyout(&ad, addr+i, sizeof(ad)))
623				senderr(EFAULT);
624		}
625		if (p != NULL) {
626			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
627			xzero++;
628		}
629	}
630
631	if (!(flags & PFR_FLAG_DUMMY)) {
632		if (flags & PFR_FLAG_ATOMIC)
633			s = splsoftnet();
634		pfr_clstats_kentries(&workq, 0, 0);
635		if (flags & PFR_FLAG_ATOMIC)
636			splx(s);
637	}
638	if (nzero != NULL)
639		*nzero = xzero;
640	return (0);
641_bad:
642	if (flags & PFR_FLAG_FEEDBACK)
643		pfr_reset_feedback(addr, size);
644	return (rv);
645}
646
647int
648pfr_validate_addr(struct pfr_addr *ad)
649{
650	int i;
651
652	switch (ad->pfra_af) {
653	case AF_INET:
654		if (ad->pfra_net > 32)
655			return (-1);
656		break;
657	case AF_INET6:
658		if (ad->pfra_net > 128)
659			return (-1);
660		break;
661	default:
662		return (-1);
663	}
664	if (ad->pfra_net < 128 &&
665		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
666			return (-1);
667	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
668		if (((caddr_t)ad)[i])
669			return (-1);
670	if (ad->pfra_not && ad->pfra_not != 1)
671		return (-1);
672	if (ad->pfra_fback)
673		return (-1);
674	return (0);
675}
676
677void
678pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
679	int *naddr, int sweep)
680{
681	struct pfr_walktree	w;
682
683	SLIST_INIT(workq);
684	bzero(&w, sizeof(w));
685	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
686	w.pfrw_workq = workq;
687	if (kt->pfrkt_ip4 != NULL)
688		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
689			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
690	if (kt->pfrkt_ip6 != NULL)
691		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
692			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
693	if (naddr != NULL)
694		*naddr = w.pfrw_cnt;
695}
696
697void
698pfr_mark_addrs(struct pfr_ktable *kt)
699{
700	struct pfr_walktree	w;
701
702	bzero(&w, sizeof(w));
703	w.pfrw_op = PFRW_MARK;
704	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
705		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
706	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
707		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
708}
709
710
711struct pfr_kentry *
712pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
713{
714	union sockaddr_union	 sa, mask;
715	struct radix_node_head	*head;
716	struct pfr_kentry	*ke;
717	int			 s;
718
719	bzero(&sa, sizeof(sa));
720	if (ad->pfra_af == AF_INET) {
721		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
722		head = kt->pfrkt_ip4;
723	} else {
724		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
725		head = kt->pfrkt_ip6;
726	}
727	if (ADDR_NETWORK(ad)) {
728		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
729		s = splsoftnet(); /* rn_lookup makes use of globals */
730		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
731		splx(s);
732		if (ke && KENTRY_RNF_ROOT(ke))
733			ke = NULL;
734	} else {
735		ke = (struct pfr_kentry *)rn_match(&sa, head);
736		if (ke && KENTRY_RNF_ROOT(ke))
737			ke = NULL;
738		if (exact && ke && KENTRY_NETWORK(ke))
739			ke = NULL;
740	}
741	return (ke);
742}
743
744struct pfr_kentry *
745pfr_create_kentry(struct pfr_addr *ad)
746{
747	struct pfr_kentry	*ke;
748
749	ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
750	if (ke == NULL)
751		return (NULL);
752	bzero(ke, sizeof(*ke));
753
754	if (ad->pfra_af == AF_INET)
755		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
756	else
757		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
758	ke->pfrke_af = ad->pfra_af;
759	ke->pfrke_net = ad->pfra_net;
760	ke->pfrke_not = ad->pfra_not;
761	return (ke);
762}
763
764void
765pfr_destroy_kentries(struct pfr_kentryworkq *workq)
766{
767	struct pfr_kentry	*p, *q;
768
769	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
770		q = SLIST_NEXT(p, pfrke_workq);
771		pfr_destroy_kentry(p);
772	}
773}
774
775void
776pfr_destroy_kentry(struct pfr_kentry *ke)
777{
778	pool_put(&pfr_kentry_pl, ke);
779}
780
781void
782pfr_insert_kentries(struct pfr_ktable *kt,
783    struct pfr_kentryworkq *workq, long tzero)
784{
785	struct pfr_kentry	*p;
786	int			 rv, n = 0;
787
788	SLIST_FOREACH(p, workq, pfrke_workq) {
789		rv = pfr_route_kentry(kt, p);
790		if (rv) {
791			printf("pfr_insert_kentries: cannot route entry "
792			    "(code=%d).\n", rv);
793			break;
794		}
795		p->pfrke_tzero = tzero;
796		n++;
797	}
798	kt->pfrkt_cnt += n;
799}
800
801void
802pfr_remove_kentries(struct pfr_ktable *kt,
803    struct pfr_kentryworkq *workq)
804{
805	struct pfr_kentry	*p;
806	int			 n = 0;
807
808	SLIST_FOREACH(p, workq, pfrke_workq) {
809		pfr_unroute_kentry(kt, p);
810		n++;
811	}
812	kt->pfrkt_cnt -= n;
813	pfr_destroy_kentries(workq);
814}
815
816void
817pfr_clean_node_mask(struct pfr_ktable *kt,
818    struct pfr_kentryworkq *workq)
819{
820        struct pfr_kentry       *p;
821
822        SLIST_FOREACH(p, workq, pfrke_workq)
823                pfr_unroute_kentry(kt, p);
824}
825
826void
827pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
828{
829	struct pfr_kentry	*p;
830	int			 s;
831
832	SLIST_FOREACH(p, workq, pfrke_workq) {
833		s = splsoftnet();
834		if (negchange)
835			p->pfrke_not = !p->pfrke_not;
836		bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
837		bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
838		splx(s);
839		p->pfrke_tzero = tzero;
840	}
841}
842
843void
844pfr_reset_feedback(struct pfr_addr *addr, int size)
845{
846	struct pfr_addr	ad;
847	int		i;
848
849	for (i = 0; i < size; i++) {
850		if (copyin(addr+i, &ad, sizeof(ad)))
851			break;
852		ad.pfra_fback = PFR_FB_NONE;
853		if (copyout(&ad, addr+i, sizeof(ad)))
854			break;
855	}
856}
857
858void
859pfr_prepare_network(union sockaddr_union *sa, int af, int net)
860{
861	int	i;
862
863	bzero(sa, sizeof(*sa));
864	if (af == AF_INET) {
865		sa->sin.sin_len = sizeof(sa->sin);
866		sa->sin.sin_family = AF_INET;
867		sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
868	} else {
869		sa->sin6.sin6_len = sizeof(sa->sin6);
870		sa->sin6.sin6_family = AF_INET6;
871		for (i = 0; i < 4; i++) {
872			if (net <= 32) {
873				sa->sin6.sin6_addr.s6_addr32[i] =
874				    htonl(-1 << (32-net));
875				break;
876			}
877			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
878			net -= 32;
879		}
880	}
881}
882
883int
884pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
885{
886	union sockaddr_union	 mask;
887	struct radix_node	*rn;
888	struct radix_node_head	*head;
889	int			 s;
890
891	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
892	if (ke->pfrke_af == AF_INET)
893		head = kt->pfrkt_ip4;
894	else
895		head = kt->pfrkt_ip6;
896
897	s = splsoftnet();
898	if (KENTRY_NETWORK(ke)) {
899		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
900		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
901	} else
902		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
903	splx(s);
904
905	return (rn == NULL ? -1 : 0);
906}
907
908int
909pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
910{
911	union sockaddr_union	 mask;
912	struct radix_node	*rn;
913	struct radix_node_head	*head;
914	int			 s;
915
916	if (ke->pfrke_af == AF_INET)
917		head = kt->pfrkt_ip4;
918	else
919		head = kt->pfrkt_ip6;
920
921	s = splsoftnet();
922	if (KENTRY_NETWORK(ke)) {
923		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
924		rn = rn_delete(&ke->pfrke_sa, &mask, head);
925	} else
926		rn = rn_delete(&ke->pfrke_sa, NULL, head);
927	splx(s);
928
929	if (rn == NULL) {
930		printf("pfr_unroute_kentry: delete failed.\n");
931		return (-1);
932	}
933	return (0);
934}
935
936void
937pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
938{
939	bzero(ad, sizeof(*ad));
940	if (ke == NULL)
941		return;
942	ad->pfra_af = ke->pfrke_af;
943	ad->pfra_net = ke->pfrke_net;
944	ad->pfra_not = ke->pfrke_not;
945	if (ad->pfra_af == AF_INET)
946		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
947	else
948		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
949}
950
951int
952pfr_walktree(struct radix_node *rn, void *arg)
953{
954	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
955	struct pfr_walktree	*w = arg;
956	int			 s;
957
958	switch (w->pfrw_op) {
959	case PFRW_MARK:
960		ke->pfrke_mark = 0;
961		break;
962	case PFRW_SWEEP:
963		if (ke->pfrke_mark)
964			break;
965		/* fall trough */
966	case PFRW_ENQUEUE:
967		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
968		w->pfrw_cnt++;
969		break;
970	case PFRW_GET_ADDRS:
971		if (w->pfrw_free-- > 0) {
972			struct pfr_addr ad;
973
974			pfr_copyout_addr(&ad, ke);
975			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
976				return (EFAULT);
977			w->pfrw_addr++;
978		}
979		break;
980	case PFRW_GET_ASTATS:
981		if (w->pfrw_free-- > 0) {
982			struct pfr_astats as;
983
984			pfr_copyout_addr(&as.pfras_a, ke);
985
986			s = splsoftnet();
987			bcopy(ke->pfrke_packets, as.pfras_packets,
988			    sizeof(as.pfras_packets));
989			bcopy(ke->pfrke_bytes, as.pfras_bytes,
990			    sizeof(as.pfras_bytes));
991			splx(s);
992			as.pfras_tzero = ke->pfrke_tzero;
993
994			if (copyout(&as, w->pfrw_astats, sizeof(as)))
995				return (EFAULT);
996			w->pfrw_astats++;
997		}
998		break;
999	case PFRW_POOL_GET:
1000		if (ke->pfrke_not)
1001			break; /* negative entries are ignored */
1002		if (!w->pfrw_cnt--) {
1003			w->pfrw_kentry = ke;
1004			return (1); /* finish search */
1005		}
1006		break;
1007	}
1008	return (0);
1009}
1010
1011int
1012pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1013{
1014	struct pfr_ktableworkq	 workq;
1015	struct pfr_ktable	*p;
1016	int			 s, xdel = 0;
1017
1018	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1019	if (pfr_table_count(filter, flags) < 0)
1020		return (ENOENT);
1021
1022	SLIST_INIT(&workq);
1023	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1024		if (pfr_skip_table(filter, p, flags))
1025			continue;
1026		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1027			continue;
1028		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1029		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1030		xdel++;
1031	}
1032	if (!(flags & PFR_FLAG_DUMMY)) {
1033		if (flags & PFR_FLAG_ATOMIC)
1034			s = splsoftnet();
1035		pfr_setflags_ktables(&workq);
1036		if (flags & PFR_FLAG_ATOMIC)
1037			splx(s);
1038	}
1039	if (ndel != NULL)
1040		*ndel = xdel;
1041	return (0);
1042}
1043
1044int
1045pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1046{
1047	struct pfr_ktableworkq	 addq, changeq;
1048	struct pfr_ktable	*p, *q, *r, key;
1049	int			 i, rv, s, xadd = 0;
1050	long			 tzero = time.tv_sec;
1051
1052	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1053	SLIST_INIT(&addq);
1054	SLIST_INIT(&changeq);
1055	for (i = 0; i < size; i++) {
1056		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1057			senderr(EFAULT);
1058		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK))
1059			senderr(EINVAL);
1060		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1061		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1062		if (p == NULL) {
1063			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1064			if (p == NULL)
1065				senderr(ENOMEM);
1066			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1067				if (!pfr_ktable_compare(p, q))
1068					goto _skip;
1069			}
1070			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1071			xadd++;
1072			if (!key.pfrkt_anchor[0])
1073				goto _skip;
1074
1075			/* find or create root table */
1076			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1077			bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1078			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1079			if (r != NULL) {
1080				p->pfrkt_root = r;
1081				goto _skip;
1082			}
1083			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1084				if (!pfr_ktable_compare(&key, q)) {
1085					p->pfrkt_root = q;
1086					goto _skip;
1087				}
1088			}
1089			key.pfrkt_flags = 0;
1090			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1091			if (r == NULL)
1092				senderr(ENOMEM);
1093			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1094			p->pfrkt_root = r;
1095		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1096			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1097				if (!pfr_ktable_compare(&key, q))
1098					goto _skip;
1099			p->pfrkt_nflags = (p->pfrkt_flags &
1100			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1101			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1102			xadd++;
1103		}
1104_skip:
1105	;
1106	}
1107	if (!(flags & PFR_FLAG_DUMMY)) {
1108		if (flags & PFR_FLAG_ATOMIC)
1109			s = splsoftnet();
1110		pfr_insert_ktables(&addq);
1111		pfr_setflags_ktables(&changeq);
1112		if (flags & PFR_FLAG_ATOMIC)
1113			splx(s);
1114	} else
1115		 pfr_destroy_ktables(&addq, 0);
1116	if (nadd != NULL)
1117		*nadd = xadd;
1118	return (0);
1119_bad:
1120	pfr_destroy_ktables(&addq, 0);
1121	return (rv);
1122}
1123
1124int
1125pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1126{
1127	struct pfr_ktableworkq	 workq;
1128	struct pfr_ktable	*p, *q, key;
1129	int			 i, s, xdel = 0;
1130
1131	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1132	SLIST_INIT(&workq);
1133	for (i = 0; i < size; i++) {
1134		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1135			return (EFAULT);
1136		if (pfr_validate_table(&key.pfrkt_t, 0))
1137			return (EINVAL);
1138		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1139		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1140			SLIST_FOREACH(q, &workq, pfrkt_workq)
1141				if (!pfr_ktable_compare(p, q))
1142					goto _skip;
1143			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1144			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1145			xdel++;
1146		}
1147_skip:
1148	;
1149	}
1150
1151	if (!(flags & PFR_FLAG_DUMMY)) {
1152		if (flags & PFR_FLAG_ATOMIC)
1153			s = splsoftnet();
1154		pfr_setflags_ktables(&workq);
1155		if (flags & PFR_FLAG_ATOMIC)
1156			splx(s);
1157	}
1158	if (ndel != NULL)
1159		*ndel = xdel;
1160	return (0);
1161}
1162
1163int
1164pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1165	int flags)
1166{
1167	struct pfr_ktable	*p;
1168	int			 n, nn;
1169
1170	ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1171	n = nn = pfr_table_count(filter, flags);
1172	if (n < 0)
1173		return (ENOENT);
1174	if (n > *size) {
1175		*size = n;
1176		return (0);
1177	}
1178	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1179		if (pfr_skip_table(filter, p, flags))
1180			continue;
1181		if (n-- <= 0)
1182			continue;
1183		if (copyout(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1184			return (EFAULT);
1185	}
1186	if (n) {
1187		printf("pfr_get_tables: corruption detected (%d).\n", n);
1188		return (ENOTTY);
1189	}
1190	*size = nn;
1191	return (0);
1192}
1193
1194int
1195pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1196	int flags)
1197{
1198	struct pfr_ktable	*p;
1199	struct pfr_ktableworkq	 workq;
1200	int			 s, n, nn;
1201	long			 tzero = time.tv_sec;
1202
1203	ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1204					/* XXX PFR_FLAG_CLSTATS disabled */
1205	n = nn = pfr_table_count(filter, flags);
1206	if (n < 0)
1207		return (ENOENT);
1208	if (n > *size) {
1209		*size = n;
1210		return (0);
1211	}
1212	SLIST_INIT(&workq);
1213	if (flags & PFR_FLAG_ATOMIC)
1214		s = splsoftnet();
1215	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1216		if (pfr_skip_table(filter, p, flags))
1217			continue;
1218		if (n-- <= 0)
1219			continue;
1220		if (!(flags & PFR_FLAG_ATOMIC))
1221			s = splsoftnet();
1222		if (copyout(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1223			splx(s);
1224			return (EFAULT);
1225		}
1226		if (!(flags & PFR_FLAG_ATOMIC))
1227			splx(s);
1228		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1229	}
1230	if (flags & PFR_FLAG_CLSTATS)
1231		pfr_clstats_ktables(&workq, tzero,
1232		    flags & PFR_FLAG_ADDRSTOO);
1233	if (flags & PFR_FLAG_ATOMIC)
1234		splx(s);
1235	if (n) {
1236		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1237		return (ENOTTY);
1238	}
1239	*size = nn;
1240	return (0);
1241}
1242
1243int
1244pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1245{
1246	struct pfr_ktableworkq	 workq;
1247	struct pfr_ktable	*p, key;
1248	int			 i, s, xzero = 0;
1249	long			 tzero = time.tv_sec;
1250
1251	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1252	SLIST_INIT(&workq);
1253	for (i = 0; i < size; i++) {
1254		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1255			return (EFAULT);
1256		if (pfr_validate_table(&key.pfrkt_t, 0))
1257			return (EINVAL);
1258		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1259		if (p != NULL) {
1260			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1261			xzero++;
1262		}
1263	}
1264	if (!(flags & PFR_FLAG_DUMMY)) {
1265		if (flags & PFR_FLAG_ATOMIC)
1266			s = splsoftnet();
1267		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1268		if (flags & PFR_FLAG_ATOMIC)
1269			splx(s);
1270	}
1271	if (nzero != NULL)
1272		*nzero = xzero;
1273	return (0);
1274}
1275
1276int
1277pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1278	int *nchange, int *ndel, int flags)
1279{
1280	struct pfr_ktableworkq	 workq;
1281	struct pfr_ktable	*p, *q, key;
1282	int			 i, s, xchange = 0, xdel = 0;
1283
1284	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1285	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1286	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1287	    (setflag & clrflag))
1288		return (EINVAL);
1289	SLIST_INIT(&workq);
1290	for (i = 0; i < size; i++) {
1291		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1292			return (EFAULT);
1293		if (pfr_validate_table(&key.pfrkt_t, 0))
1294			return (EINVAL);
1295		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1296		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1297			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1298			    ~clrflag;
1299			if (p->pfrkt_nflags == p->pfrkt_flags)
1300				goto _skip;
1301			SLIST_FOREACH(q, &workq, pfrkt_workq)
1302				if (!pfr_ktable_compare(p, q))
1303					goto _skip;
1304			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1305			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1306			    (clrflag & PFR_TFLAG_PERSIST) &&
1307			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1308				xdel++;
1309			else
1310				xchange++;
1311		}
1312_skip:
1313	;
1314	}
1315	if (!(flags & PFR_FLAG_DUMMY)) {
1316		if (flags & PFR_FLAG_ATOMIC)
1317			s = splsoftnet();
1318		pfr_setflags_ktables(&workq);
1319		if (flags & PFR_FLAG_ATOMIC)
1320			splx(s);
1321	}
1322	if (nchange != NULL)
1323		*nchange = xchange;
1324	if (ndel != NULL)
1325		*ndel = xdel;
1326	return (0);
1327}
1328
1329int
1330pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1331{
1332	struct pfr_ktableworkq	 workq;
1333	struct pfr_ktable	*p;
1334	struct pf_ruleset	*rs;
1335	int			 xdel = 0;
1336
1337	ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1338	rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1339	if (rs == NULL)
1340		return (ENOMEM);
1341	SLIST_INIT(&workq);
1342	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1343		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1344		    pfr_skip_table(trs, p, 0))
1345			continue;
1346		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1347		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1348		xdel++;
1349	}
1350	if (!(flags & PFR_FLAG_DUMMY)) {
1351		pfr_setflags_ktables(&workq);
1352		if (ticket != NULL)
1353			*ticket = ++rs->tticket;
1354		rs->topen = 1;
1355	} else
1356		pf_remove_if_empty_ruleset(rs);
1357	if (ndel != NULL)
1358		*ndel = xdel;
1359	return (0);
1360}
1361
1362int
1363pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1364    int *nadd, int *naddr, u_int32_t ticket, int flags)
1365{
1366	struct pfr_ktableworkq	 tableq;
1367	struct pfr_kentryworkq	 addrq;
1368	struct pfr_ktable	*kt, *rt, *shadow, key;
1369	struct pfr_kentry	*p;
1370	struct pfr_addr		 ad;
1371	struct pf_ruleset	*rs;
1372	int			 i, rv, xadd = 0, xaddr = 0;
1373
1374	ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1375	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1376		return (EINVAL);
1377	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK))
1378		return (EINVAL);
1379	rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset);
1380	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1381		return (EBUSY);
1382	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1383	SLIST_INIT(&tableq);
1384	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1385	if (kt == NULL) {
1386		kt = pfr_create_ktable(tbl, 0, 1);
1387		if (kt == NULL)
1388			return (ENOMEM);
1389		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1390		xadd++;
1391		if (!tbl->pfrt_anchor[0])
1392			goto _skip;
1393
1394		/* find or create root table */
1395		bzero(&key, sizeof(key));
1396		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1397		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1398		if (rt != NULL) {
1399			kt->pfrkt_root = rt;
1400			goto _skip;
1401		}
1402		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1403		if (rt == NULL) {
1404			pfr_destroy_ktables(&tableq, 0);
1405			return (ENOMEM);
1406		}
1407		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1408		kt->pfrkt_root = rt;
1409	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1410		xadd++;
1411_skip:
1412	shadow = pfr_create_ktable(tbl, 0, 0);
1413	if (shadow == NULL) {
1414		pfr_destroy_ktables(&tableq, 0);
1415		return (ENOMEM);
1416	}
1417	SLIST_INIT(&addrq);
1418	for (i = 0; i < size; i++) {
1419		if (copyin(addr+i, &ad, sizeof(ad)))
1420			senderr(EFAULT);
1421		if (pfr_validate_addr(&ad))
1422			senderr(EINVAL);
1423		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1424			continue;
1425		p = pfr_create_kentry(&ad);
1426		if (p == NULL)
1427			senderr(ENOMEM);
1428		if (pfr_route_kentry(shadow, p)) {
1429			pfr_destroy_kentry(p);
1430			continue;
1431		}
1432		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1433		xaddr++;
1434	}
1435	if (!(flags & PFR_FLAG_DUMMY)) {
1436		if (kt->pfrkt_shadow != NULL)
1437			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1438		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1439		pfr_insert_ktables(&tableq);
1440		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1441		    xaddr : NO_ADDRESSES;
1442		kt->pfrkt_shadow = shadow;
1443	} else {
1444		pfr_clean_node_mask(shadow, &addrq);
1445		pfr_destroy_ktable(shadow, 0);
1446		pfr_destroy_ktables(&tableq, 0);
1447		pfr_destroy_kentries(&addrq);
1448	}
1449	if (nadd != NULL)
1450		*nadd = xadd;
1451	if (naddr != NULL)
1452		*naddr = xaddr;
1453	return (0);
1454_bad:
1455	pfr_destroy_ktable(shadow, 0);
1456	pfr_destroy_ktables(&tableq, 0);
1457	pfr_destroy_kentries(&addrq);
1458	return (rv);
1459}
1460
1461int
1462pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1463    int *nchange, int flags)
1464{
1465	struct pfr_ktable	*p;
1466	struct pfr_ktableworkq	 workq;
1467	struct pf_ruleset	*rs;
1468	int			 s, xadd = 0, xchange = 0;
1469	long			 tzero = time.tv_sec;
1470
1471	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1472	rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1473	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1474		return (EBUSY);
1475
1476	SLIST_INIT(&workq);
1477	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1478		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1479		    pfr_skip_table(trs, p, 0))
1480			continue;
1481		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1482		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1483			xchange++;
1484		else
1485			xadd++;
1486	}
1487
1488	if (!(flags & PFR_FLAG_DUMMY)) {
1489		if (flags & PFR_FLAG_ATOMIC)
1490			s = splsoftnet();
1491		SLIST_FOREACH(p, &workq, pfrkt_workq)
1492			pfr_commit_ktable(p, tzero);
1493		if (flags & PFR_FLAG_ATOMIC)
1494			splx(s);
1495		rs->topen = 0;
1496		pf_remove_if_empty_ruleset(rs);
1497	}
1498	if (nadd != NULL)
1499		*nadd = xadd;
1500	if (nchange != NULL)
1501		*nchange = xchange;
1502
1503	return (0);
1504}
1505
1506void
1507pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1508{
1509	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1510	int			 nflags;
1511
1512	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1513		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1514			pfr_clstats_ktable(kt, tzero, 1);
1515	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1516		/* kt might contain addresses */
1517		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1518		struct pfr_kentry	*p, *q, *next;
1519		struct pfr_addr		 ad;
1520
1521		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1522		pfr_mark_addrs(kt);
1523		SLIST_INIT(&addq);
1524		SLIST_INIT(&changeq);
1525		SLIST_INIT(&delq);
1526		SLIST_INIT(&garbageq);
1527		pfr_clean_node_mask(shadow, &addrq);
1528		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1529			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1530			pfr_copyout_addr(&ad, p);
1531			q = pfr_lookup_addr(kt, &ad, 1);
1532			if (q != NULL) {
1533				if (q->pfrke_not != p->pfrke_not)
1534					SLIST_INSERT_HEAD(&changeq, q,
1535					    pfrke_workq);
1536				q->pfrke_mark = 1;
1537				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1538			} else {
1539				p->pfrke_tzero = tzero;
1540				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1541			}
1542		}
1543		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1544		pfr_insert_kentries(kt, &addq, tzero);
1545		pfr_remove_kentries(kt, &delq);
1546		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1547		pfr_destroy_kentries(&garbageq);
1548	} else {
1549		/* kt cannot contain addresses */
1550		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1551		    shadow->pfrkt_ip4);
1552		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1553		    shadow->pfrkt_ip6);
1554		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1555		pfr_clstats_ktable(kt, tzero, 1);
1556	}
1557	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1558	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1559		& ~PFR_TFLAG_INACTIVE;
1560	pfr_destroy_ktable(shadow, 0);
1561	kt->pfrkt_shadow = NULL;
1562	pfr_setflags_ktable(kt, nflags);
1563}
1564
1565int
1566pfr_validate_table(struct pfr_table *tbl, int allowedflags)
1567{
1568	int i;
1569
1570	if (!tbl->pfrt_name[0])
1571		return (-1);
1572	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1573		return (-1);
1574	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1575		if (tbl->pfrt_name[i])
1576			return (-1);
1577	if (tbl->pfrt_flags & ~allowedflags)
1578		return (-1);
1579	return (0);
1580}
1581
1582int
1583pfr_table_count(struct pfr_table *filter, int flags)
1584{
1585	struct pf_ruleset *rs;
1586	struct pf_anchor *ac;
1587
1588	if (flags & PFR_FLAG_ALLRSETS)
1589		return (pfr_ktable_cnt);
1590	if (filter->pfrt_ruleset[0]) {
1591		rs = pf_find_ruleset(filter->pfrt_anchor,
1592		    filter->pfrt_ruleset);
1593		return ((rs != NULL) ? rs->tables : -1);
1594	}
1595	if (filter->pfrt_anchor[0]) {
1596		ac = pf_find_anchor(filter->pfrt_anchor);
1597		return ((ac != NULL) ? ac->tables : -1);
1598	}
1599	return (pf_main_ruleset.tables);
1600}
1601
1602int
1603pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1604{
1605	if (flags & PFR_FLAG_ALLRSETS)
1606		return (0);
1607	if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1608	    PF_ANCHOR_NAME_SIZE))
1609		return (1);
1610	if (!filter->pfrt_ruleset[0])
1611		return (0);
1612	if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1613	    PF_RULESET_NAME_SIZE))
1614		return (1);
1615	return (0);
1616}
1617
1618void
1619pfr_insert_ktables(struct pfr_ktableworkq *workq)
1620{
1621	struct pfr_ktable	*p;
1622
1623	SLIST_FOREACH(p, workq, pfrkt_workq)
1624		pfr_insert_ktable(p);
1625}
1626
1627void
1628pfr_insert_ktable(struct pfr_ktable *kt)
1629{
1630	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1631	pfr_ktable_cnt++;
1632	if (kt->pfrkt_root != NULL)
1633		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1634			pfr_setflags_ktable(kt->pfrkt_root,
1635			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1636}
1637
1638void
1639pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1640{
1641	struct pfr_ktable	*p;
1642
1643	SLIST_FOREACH(p, workq, pfrkt_workq)
1644		pfr_setflags_ktable(p, p->pfrkt_nflags);
1645}
1646
1647void
1648pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1649{
1650	struct pfr_kentryworkq	addrq;
1651
1652	if (!(newf & PFR_TFLAG_REFERENCED) &&
1653	    !(newf & PFR_TFLAG_PERSIST))
1654		newf &= ~PFR_TFLAG_ACTIVE;
1655	if (!(newf & PFR_TFLAG_ACTIVE))
1656		newf &= ~PFR_TFLAG_USRMASK;
1657	if (!(newf & PFR_TFLAG_SETMASK)) {
1658		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1659		if (kt->pfrkt_root != NULL)
1660			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1661				pfr_setflags_ktable(kt->pfrkt_root,
1662				    kt->pfrkt_root->pfrkt_flags &
1663					~PFR_TFLAG_REFDANCHOR);
1664		pfr_destroy_ktable(kt, 1);
1665		pfr_ktable_cnt--;
1666		return;
1667	}
1668	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1669		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1670		pfr_remove_kentries(kt, &addrq);
1671	}
1672	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1673		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1674		kt->pfrkt_shadow = NULL;
1675	}
1676	kt->pfrkt_flags = newf;
1677}
1678
1679void
1680pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1681{
1682	struct pfr_ktable	*p;
1683
1684	SLIST_FOREACH(p, workq, pfrkt_workq)
1685		pfr_clstats_ktable(p, tzero, recurse);
1686}
1687
1688void
1689pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1690{
1691	struct pfr_kentryworkq	 addrq;
1692	int			 s;
1693
1694	if (recurse) {
1695		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1696		pfr_clstats_kentries(&addrq, tzero, 0);
1697	}
1698	s = splsoftnet();
1699	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1700	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1701	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1702	splx(s);
1703	kt->pfrkt_tzero = tzero;
1704}
1705
1706struct pfr_ktable *
1707pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1708{
1709	struct pfr_ktable	*kt;
1710	struct pf_ruleset	*rs;
1711
1712	kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1713	if (kt == NULL)
1714		return (NULL);
1715	bzero(kt, sizeof(*kt));
1716	kt->pfrkt_t = *tbl;
1717
1718	if (attachruleset) {
1719		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
1720		    tbl->pfrt_ruleset);
1721		if (!rs) {
1722			pfr_destroy_ktable(kt, 0);
1723			return (NULL);
1724		}
1725		kt->pfrkt_rs = rs;
1726		rs->tables++;
1727		if (rs->anchor != NULL)
1728			rs->anchor->tables++;
1729	}
1730
1731	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1732	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1733	    !rn_inithead((void **)&kt->pfrkt_ip6,
1734	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1735		pfr_destroy_ktable(kt, 0);
1736		return (NULL);
1737	}
1738	kt->pfrkt_tzero = tzero;
1739
1740	return (kt);
1741}
1742
1743void
1744pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1745{
1746	struct pfr_ktable	*p, *q;
1747
1748	for (p = SLIST_FIRST(workq); p; p = q) {
1749		q = SLIST_NEXT(p, pfrkt_workq);
1750		pfr_destroy_ktable(p, flushaddr);
1751	}
1752}
1753
1754void
1755pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1756{
1757	struct pfr_kentryworkq	 addrq;
1758
1759	if (flushaddr) {
1760		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1761		pfr_clean_node_mask(kt, &addrq);
1762		pfr_destroy_kentries(&addrq);
1763	}
1764	if (kt->pfrkt_ip4 != NULL)
1765		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1766	if (kt->pfrkt_ip6 != NULL)
1767		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1768	if (kt->pfrkt_shadow != NULL)
1769		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1770	if (kt->pfrkt_rs != NULL) {
1771		kt->pfrkt_rs->tables--;
1772		if (kt->pfrkt_rs->anchor != NULL)
1773			kt->pfrkt_rs->anchor->tables--;
1774		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1775	}
1776	pool_put(&pfr_ktable_pl, kt);
1777}
1778
1779int
1780pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1781{
1782	int d;
1783
1784	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1785		return (d);
1786	if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
1787	    PF_ANCHOR_NAME_SIZE)))
1788		return (d);
1789	return strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
1790	    PF_RULESET_NAME_SIZE);
1791}
1792
1793struct pfr_ktable *
1794pfr_lookup_table(struct pfr_table *tbl)
1795{
1796	/* struct pfr_ktable start like a struct pfr_table */
1797	return RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1798}
1799
1800int
1801pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1802{
1803	struct pfr_kentry	*ke = NULL;
1804	int			 match;
1805
1806	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1807		kt = kt->pfrkt_root;
1808	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1809		return 0;
1810
1811	switch (af) {
1812	case AF_INET:
1813		pfr_sin.sin_addr.s_addr = a->addr32[0];
1814		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1815		if (ke && KENTRY_RNF_ROOT(ke))
1816			ke = NULL;
1817		break;
1818	case AF_INET6:
1819		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1820		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1821		if (ke && KENTRY_RNF_ROOT(ke))
1822			ke = NULL;
1823		break;
1824	}
1825	match = (ke && !ke->pfrke_not);
1826	if (match)
1827		kt->pfrkt_match++;
1828	else
1829		kt->pfrkt_nomatch++;
1830	return (match);
1831}
1832
1833void
1834pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1835    u_int64_t len, int dir_out, int op_pass, int notrule)
1836{
1837	struct pfr_kentry	*ke = NULL;
1838
1839	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1840		kt = kt->pfrkt_root;
1841	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1842		return;
1843
1844	switch (af) {
1845	case AF_INET:
1846		pfr_sin.sin_addr.s_addr = a->addr32[0];
1847		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1848		if (ke && KENTRY_RNF_ROOT(ke))
1849			ke = NULL;
1850		break;
1851	case AF_INET6:
1852		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1853		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1854		if (ke && KENTRY_RNF_ROOT(ke))
1855			ke = NULL;
1856		break;
1857	}
1858	if ((ke == NULL || ke->pfrke_not) != notrule) {
1859		if (op_pass != PFR_OP_PASS)
1860			printf("pfr_update_stats: assertion failed.\n");
1861		op_pass = PFR_OP_XPASS;
1862	}
1863	kt->pfrkt_packets[dir_out][op_pass]++;
1864	kt->pfrkt_bytes[dir_out][op_pass] += len;
1865	if (ke != NULL && op_pass != PFR_OP_XPASS) {
1866		ke->pfrke_packets[dir_out][op_pass]++;
1867		ke->pfrke_bytes[dir_out][op_pass] += len;
1868	}
1869}
1870
1871struct pfr_ktable *
1872pfr_attach_table(struct pf_ruleset *rs, char *name)
1873{
1874	struct pfr_ktable	*kt, *rt;
1875	struct pfr_table	 tbl;
1876	struct pf_anchor	*ac = rs->anchor;
1877
1878	bzero(&tbl, sizeof(tbl));
1879	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1880	if (ac != NULL) {
1881		strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1882		strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
1883	}
1884	kt = pfr_lookup_table(&tbl);
1885	if (kt == NULL) {
1886		kt = pfr_create_ktable(&tbl, time.tv_sec, 1);
1887		if (kt == NULL)
1888			return (NULL);
1889		if (ac != NULL) {
1890			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1891			bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
1892			rt = pfr_lookup_table(&tbl);
1893			if (rt == NULL) {
1894				rt = pfr_create_ktable(&tbl, 0, 1);
1895				if (rt == NULL) {
1896					pfr_destroy_ktable(kt, 0);
1897					return (NULL);
1898				}
1899				pfr_insert_ktable(rt);
1900			}
1901			kt->pfrkt_root = rt;
1902		}
1903		pfr_insert_ktable(kt);
1904	}
1905	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
1906		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
1907	return kt;
1908}
1909
1910void
1911pfr_detach_table(struct pfr_ktable *kt)
1912{
1913	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
1914		printf("pfr_detach_table: refcount = %d.\n",
1915		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
1916	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
1917		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
1918}
1919
1920int
1921pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
1922    struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
1923{
1924	struct pfr_kentry	*ke, *ke2;
1925	struct pf_addr		*addr;
1926	union sockaddr_union	 mask;
1927	int			 idx = -1, use_counter = 0;
1928
1929	addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr :
1930	    (struct pf_addr *)&pfr_sin6.sin6_addr;
1931	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1932		kt = kt->pfrkt_root;
1933	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1934		return (-1);
1935
1936	if (pidx != NULL)
1937		idx = *pidx;
1938	if (counter != NULL && idx >= 0)
1939		use_counter = 1;
1940	if (idx < 0)
1941		idx = 0;
1942
1943_next_block:
1944	ke = pfr_kentry_byidx(kt, idx, af);
1945	if (ke == NULL)
1946		return (1);
1947	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
1948	*raddr = SUNION2PF(&ke->pfrke_sa, af);
1949	*rmask = SUNION2PF(&pfr_mask, af);
1950
1951	if (use_counter) {
1952		/* is supplied address within block? */
1953		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
1954			/* no, go to next block in table */
1955			idx++;
1956			use_counter = 0;
1957			goto _next_block;
1958		}
1959		PF_ACPY(addr, counter, af);
1960	} else {
1961		/* use first address of block */
1962		PF_ACPY(addr, *raddr, af);
1963	}
1964
1965	if (!KENTRY_NETWORK(ke)) {
1966		/* this is a single IP address - no possible nested block */
1967		PF_ACPY(counter, addr, af);
1968		*pidx = idx;
1969		return (0);
1970	}
1971	for (;;) {
1972		/* we don't want to use a nested block */
1973                ke2 = (struct pfr_kentry *)(af == AF_INET ?
1974		    rn_match(&pfr_sin, kt->pfrkt_ip4) :
1975		    rn_match(&pfr_sin6, kt->pfrkt_ip6));
1976		/* no need to check KENTRY_RNF_ROOT() here */
1977		if (ke2 == ke) {
1978			/* lookup return the same block - perfect */
1979			PF_ACPY(counter, addr, af);
1980			*pidx = idx;
1981			return (0);
1982		}
1983
1984		/* we need to increase the counter past the nested block */
1985		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
1986		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
1987		PF_AINC(addr, af);
1988		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
1989			/* ok, we reached the end of our main block */
1990			/* go to next block in table */
1991			idx++;
1992			use_counter = 0;
1993			goto _next_block;
1994		}
1995	}
1996}
1997
1998struct pfr_kentry *
1999pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2000{
2001	struct pfr_walktree	w;
2002
2003        bzero(&w, sizeof(w));
2004        w.pfrw_op = PFRW_POOL_GET;
2005        w.pfrw_cnt = idx;
2006
2007	switch(af) {
2008	case AF_INET:
2009		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2010		return w.pfrw_kentry;
2011	case AF_INET6:
2012		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2013		return w.pfrw_kentry;
2014	default:
2015		return NULL;
2016	}
2017}
2018
2019