1/*	$NetBSD: pf_table.c,v 1.19 2020/12/04 00:41:10 thorpej Exp $	*/
2/*	$OpenBSD: pf_table.c,v 1.70 2007/05/23 11:53:45 markus Exp $	*/
3
4/*
5 * Copyright (c) 2002 Cedric Berger
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 *    - Redistributions of source code must retain the above copyright
13 *      notice, this list of conditions and the following disclaimer.
14 *    - Redistributions in binary form must reproduce the above
15 *      copyright notice, this list of conditions and the following
16 *      disclaimer in the documentation and/or other materials provided
17 *      with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: pf_table.c,v 1.19 2020/12/04 00:41:10 thorpej Exp $");
36
37#ifdef _KERNEL_OPT
38#include "opt_inet.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/socket.h>
44#include <sys/mbuf.h>
45#include <sys/kernel.h>
46
47#include <net/if.h>
48#include <net/route.h>
49#include <netinet/in.h>
50#ifndef __NetBSD__
51#include <netinet/ip_ipsp.h>
52#endif /* !__NetBSD__ */
53#include <net/pfvar.h>
54
55#define ACCEPT_FLAGS(flags, oklist)		\
56	do {					\
57		if ((flags & ~(oklist)) &	\
58		    PFR_FLAG_ALLMASK)		\
59			return (EINVAL);	\
60	} while (0)
61
62#define COPYIN(from, to, size, flags)		\
63	((flags & PFR_FLAG_USERIOCTL) ?		\
64	copyin((from), (to), (size)) :		\
65	(bcopy((from), (to), (size)), 0))
66
67#define COPYOUT(from, to, size, flags)		\
68	((flags & PFR_FLAG_USERIOCTL) ?		\
69	copyout((from), (to), (size)) :		\
70	(bcopy((from), (to), (size)), 0))
71
72#define	FILLIN_SIN(sin, addr)			\
73	do {					\
74		(sin).sin_len = sizeof(sin);	\
75		(sin).sin_family = AF_INET;	\
76		(sin).sin_addr = (addr);	\
77	} while (0)
78
79#define	FILLIN_SIN6(sin6, addr)			\
80	do {					\
81		(sin6).sin6_len = sizeof(sin6);	\
82		(sin6).sin6_family = AF_INET6;	\
83		(sin6).sin6_addr = (addr);	\
84	} while (0)
85
86#define SWAP(type, a1, a2)			\
87	do {					\
88		type tmp = a1;			\
89		a1 = a2;			\
90		a2 = tmp;			\
91	} while (0)
92
93#define SUNION2PF(su, af) (((af)==AF_INET) ?	\
94    (struct pf_addr *)&(su)->sin.sin_addr :	\
95    (struct pf_addr *)&(su)->sin6.sin6_addr)
96
97#define	AF_BITS(af)		(((af)==AF_INET)?32:128)
98#define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
99#define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
100#define KENTRY_RNF_ROOT(ke) \
101		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
102
103#define NO_ADDRESSES		(-1)
104#define ENQUEUE_UNMARKED_ONLY	(1)
105#define INVERT_NEG_FLAG		(1)
106
107struct pfr_walktree {
108	enum pfrw_op {
109		PFRW_MARK,
110		PFRW_SWEEP,
111		PFRW_ENQUEUE,
112		PFRW_GET_ADDRS,
113		PFRW_GET_ASTATS,
114		PFRW_POOL_GET,
115		PFRW_DYNADDR_UPDATE
116	}	 pfrw_op;
117	union {
118		struct pfr_addr		*pfrw1_addr;
119		struct pfr_astats	*pfrw1_astats;
120		struct pfr_kentryworkq	*pfrw1_workq;
121		struct pfr_kentry	*pfrw1_kentry;
122		struct pfi_dynaddr	*pfrw1_dyn;
123	}	 pfrw_1;
124	int	 pfrw_free;
125	int	 pfrw_flags;
126};
127#define pfrw_addr	pfrw_1.pfrw1_addr
128#define pfrw_astats	pfrw_1.pfrw1_astats
129#define pfrw_workq	pfrw_1.pfrw1_workq
130#define pfrw_kentry	pfrw_1.pfrw1_kentry
131#define pfrw_dyn	pfrw_1.pfrw1_dyn
132#define pfrw_cnt	pfrw_free
133
134#define senderr(e)	do { rv = (e); goto _bad; } while (0)
135
136struct pool		 pfr_ktable_pl;
137struct pool		 pfr_kentry_pl;
138struct pool		 pfr_kentry_pl2;
139struct sockaddr_in	 pfr_sin;
140struct sockaddr_in6	 pfr_sin6;
141union sockaddr_union	 pfr_mask;
142struct pf_addr		 pfr_ffaddr;
143
144void			 pfr_copyout_addr(struct pfr_addr *,
145			    struct pfr_kentry *ke);
146int			 pfr_validate_addr(struct pfr_addr *);
147void			 pfr_enqueue_addrs(struct pfr_ktable *,
148			    struct pfr_kentryworkq *, int *, int);
149void			 pfr_mark_addrs(struct pfr_ktable *);
150struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
151			    struct pfr_addr *, int);
152struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *, int);
153void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
154void			 pfr_destroy_kentry(struct pfr_kentry *);
155void			 pfr_insert_kentries(struct pfr_ktable *,
156			    struct pfr_kentryworkq *, long);
157void			 pfr_remove_kentries(struct pfr_ktable *,
158			    struct pfr_kentryworkq *);
159void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
160			    int);
161void			 pfr_reset_feedback(struct pfr_addr *, int, int);
162void			 pfr_prepare_network(union sockaddr_union *, int, int);
163int			 pfr_route_kentry(struct pfr_ktable *,
164			    struct pfr_kentry *);
165int			 pfr_unroute_kentry(struct pfr_ktable *,
166			    struct pfr_kentry *);
167int			 pfr_walktree(struct radix_node *, void *);
168int			 pfr_validate_table(struct pfr_table *, int, int);
169int			 pfr_fix_anchor(char *);
170void			 pfr_commit_ktable(struct pfr_ktable *, long);
171void			 pfr_insert_ktables(struct pfr_ktableworkq *);
172void			 pfr_insert_ktable(struct pfr_ktable *);
173void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
174void			 pfr_setflags_ktable(struct pfr_ktable *, int);
175void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
176			    int);
177void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
178struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
179void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
180void			 pfr_destroy_ktable(struct pfr_ktable *, int);
181int			 pfr_ktable_compare(struct pfr_ktable *,
182			    struct pfr_ktable *);
183struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
184void			 pfr_clean_node_mask(struct pfr_ktable *,
185			    struct pfr_kentryworkq *);
186int			 pfr_table_count(struct pfr_table *, int);
187int			 pfr_skip_table(struct pfr_table *,
188			    struct pfr_ktable *, int);
189struct pfr_kentry	*pfr_kentry_byidx(struct pfr_ktable *, int, int);
190
191RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
192RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
193
194struct pfr_ktablehead	 pfr_ktables;
195struct pfr_table	 pfr_nulltable;
196int			 pfr_ktable_cnt;
197
198void
199pfr_initialize(void)
200{
201#ifdef __NetBSD__
202	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
203	    "pfrktable", &pool_allocator_nointr, IPL_NET);
204	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
205	    "pfrkentry", &pool_allocator_nointr, IPL_NET);
206	pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
207	    "pfrkentry2", NULL, IPL_SOFTNET);
208#else
209	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
210	    "pfrktable", &pool_allocator_oldnointr);
211	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
212	    "pfrkentry", &pool_allocator_oldnointr);
213	pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
214	    "pfrkentry2", NULL);
215#endif /* !__NetBSD__ */
216
217	pfr_sin.sin_len = sizeof(pfr_sin);
218	pfr_sin.sin_family = AF_INET;
219	pfr_sin6.sin6_len = sizeof(pfr_sin6);
220	pfr_sin6.sin6_family = AF_INET6;
221
222	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
223}
224
225#ifdef _MODULE
226void
227pfr_destroy(void)
228{
229	pool_destroy(&pfr_ktable_pl);
230	pool_destroy(&pfr_kentry_pl);
231	pool_destroy(&pfr_kentry_pl2);
232}
233#endif /* _MODULE */
234
235int
236pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
237{
238	struct pfr_ktable	*kt;
239	struct pfr_kentryworkq	 workq;
240	int			 s = 0;
241
242	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
243	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
244		return (EINVAL);
245	kt = pfr_lookup_table(tbl);
246	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
247		return (ESRCH);
248	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
249		return (EPERM);
250	pfr_enqueue_addrs(kt, &workq, ndel, 0);
251
252	if (!(flags & PFR_FLAG_DUMMY)) {
253		if (flags & PFR_FLAG_ATOMIC)
254			s = splsoftnet();
255		pfr_remove_kentries(kt, &workq);
256		if (flags & PFR_FLAG_ATOMIC)
257			splx(s);
258		if (kt->pfrkt_cnt) {
259			printf("pfr_clr_addrs: corruption detected (%d).\n",
260			    kt->pfrkt_cnt);
261			kt->pfrkt_cnt = 0;
262		}
263	}
264	return (0);
265}
266
267int
268pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
269    int *nadd, int flags)
270{
271	struct pfr_ktable	*kt, *tmpkt;
272	struct pfr_kentryworkq	 workq;
273	struct pfr_kentry	*p, *q;
274	struct pfr_addr		 ad;
275	int			 i, rv, s = 0 /* XXX gcc */, xadd = 0;
276	long			 tzero = time_second;
277
278	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
279	    PFR_FLAG_FEEDBACK);
280	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
281		return (EINVAL);
282	kt = pfr_lookup_table(tbl);
283	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
284		return (ESRCH);
285	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
286		return (EPERM);
287	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
288	if (tmpkt == NULL)
289		return (ENOMEM);
290	SLIST_INIT(&workq);
291	for (i = 0; i < size; i++) {
292		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
293			senderr(EFAULT);
294		if (pfr_validate_addr(&ad))
295			senderr(EINVAL);
296		p = pfr_lookup_addr(kt, &ad, 1);
297		q = pfr_lookup_addr(tmpkt, &ad, 1);
298		if (flags & PFR_FLAG_FEEDBACK) {
299			if (q != NULL)
300				ad.pfra_fback = PFR_FB_DUPLICATE;
301			else if (p == NULL)
302				ad.pfra_fback = PFR_FB_ADDED;
303			else if (p->pfrke_not != ad.pfra_not)
304				ad.pfra_fback = PFR_FB_CONFLICT;
305			else
306				ad.pfra_fback = PFR_FB_NONE;
307		}
308		if (p == NULL && q == NULL) {
309			p = pfr_create_kentry(&ad,
310			    !(flags & PFR_FLAG_USERIOCTL));
311			if (p == NULL)
312				senderr(ENOMEM);
313			if (pfr_route_kentry(tmpkt, p)) {
314				pfr_destroy_kentry(p);
315				ad.pfra_fback = PFR_FB_NONE;
316			} else {
317				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
318				xadd++;
319			}
320		}
321		if (flags & PFR_FLAG_FEEDBACK)
322			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
323				senderr(EFAULT);
324	}
325	pfr_clean_node_mask(tmpkt, &workq);
326	if (!(flags & PFR_FLAG_DUMMY)) {
327		if (flags & PFR_FLAG_ATOMIC)
328			s = splsoftnet();
329		pfr_insert_kentries(kt, &workq, tzero);
330		if (flags & PFR_FLAG_ATOMIC)
331			splx(s);
332	} else
333		pfr_destroy_kentries(&workq);
334	if (nadd != NULL)
335		*nadd = xadd;
336	pfr_destroy_ktable(tmpkt, 0);
337	return (0);
338_bad:
339	pfr_clean_node_mask(tmpkt, &workq);
340	pfr_destroy_kentries(&workq);
341	if (flags & PFR_FLAG_FEEDBACK)
342		pfr_reset_feedback(addr, size, flags);
343	pfr_destroy_ktable(tmpkt, 0);
344	return (rv);
345}
346
347int
348pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
349    int *ndel, int flags)
350{
351	struct pfr_ktable	*kt;
352	struct pfr_kentryworkq	 workq;
353	struct pfr_kentry	*p;
354	struct pfr_addr		 ad;
355	int			 i, rv, s = 0 /* XXX gcc */, xdel = 0, log = 1;
356
357	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
358	    PFR_FLAG_FEEDBACK);
359	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
360		return (EINVAL);
361	kt = pfr_lookup_table(tbl);
362	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
363		return (ESRCH);
364	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
365		return (EPERM);
366	/*
367	 * there are two algorithms to choose from here.
368	 * with:
369	 *   n: number of addresses to delete
370	 *   N: number of addresses in the table
371	 *
372	 * one is O(N) and is better for large 'n'
373	 * one is O(n*LOG(N)) and is better for small 'n'
374	 *
375	 * following code try to decide which one is best.
376	 */
377	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
378		log++;
379	if (size > kt->pfrkt_cnt/log) {
380		/* full table scan */
381		pfr_mark_addrs(kt);
382	} else {
383		/* iterate over addresses to delete */
384		for (i = 0; i < size; i++) {
385			if (COPYIN(addr+i, &ad, sizeof(ad), flags))
386				return (EFAULT);
387			if (pfr_validate_addr(&ad))
388				return (EINVAL);
389			p = pfr_lookup_addr(kt, &ad, 1);
390			if (p != NULL)
391				p->pfrke_mark = 0;
392		}
393	}
394	SLIST_INIT(&workq);
395	for (i = 0; i < size; i++) {
396		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
397			senderr(EFAULT);
398		if (pfr_validate_addr(&ad))
399			senderr(EINVAL);
400		p = pfr_lookup_addr(kt, &ad, 1);
401		if (flags & PFR_FLAG_FEEDBACK) {
402			if (p == NULL)
403				ad.pfra_fback = PFR_FB_NONE;
404			else if (p->pfrke_not != ad.pfra_not)
405				ad.pfra_fback = PFR_FB_CONFLICT;
406			else if (p->pfrke_mark)
407				ad.pfra_fback = PFR_FB_DUPLICATE;
408			else
409				ad.pfra_fback = PFR_FB_DELETED;
410		}
411		if (p != NULL && p->pfrke_not == ad.pfra_not &&
412		    !p->pfrke_mark) {
413			p->pfrke_mark = 1;
414			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
415			xdel++;
416		}
417		if (flags & PFR_FLAG_FEEDBACK)
418			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
419				senderr(EFAULT);
420	}
421	if (!(flags & PFR_FLAG_DUMMY)) {
422		if (flags & PFR_FLAG_ATOMIC)
423			s = splsoftnet();
424		pfr_remove_kentries(kt, &workq);
425		if (flags & PFR_FLAG_ATOMIC)
426			splx(s);
427	}
428	if (ndel != NULL)
429		*ndel = xdel;
430	return (0);
431_bad:
432	if (flags & PFR_FLAG_FEEDBACK)
433		pfr_reset_feedback(addr, size, flags);
434	return (rv);
435}
436
437int
438pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
439    int *size2, int *nadd, int *ndel, int *nchange, int flags,
440    u_int32_t ignore_pfrt_flags)
441{
442	struct pfr_ktable	*kt, *tmpkt;
443	struct pfr_kentryworkq	 addq, delq, changeq;
444	struct pfr_kentry	*p, *q;
445	struct pfr_addr		 ad;
446	int			 i, rv, s = 0 /* XXX gcc */, xadd = 0, xdel = 0,
447				 xchange = 0;
448	long			 tzero = time_second;
449
450	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
451	    PFR_FLAG_FEEDBACK);
452	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
453	    PFR_FLAG_USERIOCTL))
454		return (EINVAL);
455	kt = pfr_lookup_table(tbl);
456	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
457		return (ESRCH);
458	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
459		return (EPERM);
460	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
461	if (tmpkt == NULL)
462		return (ENOMEM);
463	pfr_mark_addrs(kt);
464	SLIST_INIT(&addq);
465	SLIST_INIT(&delq);
466	SLIST_INIT(&changeq);
467	for (i = 0; i < size; i++) {
468		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
469			senderr(EFAULT);
470		if (pfr_validate_addr(&ad))
471			senderr(EINVAL);
472		ad.pfra_fback = PFR_FB_NONE;
473		p = pfr_lookup_addr(kt, &ad, 1);
474		if (p != NULL) {
475			if (p->pfrke_mark) {
476				ad.pfra_fback = PFR_FB_DUPLICATE;
477				goto _skip;
478			}
479			p->pfrke_mark = 1;
480			if (p->pfrke_not != ad.pfra_not) {
481				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
482				ad.pfra_fback = PFR_FB_CHANGED;
483				xchange++;
484			}
485		} else {
486			q = pfr_lookup_addr(tmpkt, &ad, 1);
487			if (q != NULL) {
488				ad.pfra_fback = PFR_FB_DUPLICATE;
489				goto _skip;
490			}
491			p = pfr_create_kentry(&ad,
492			    !(flags & PFR_FLAG_USERIOCTL));
493			if (p == NULL)
494				senderr(ENOMEM);
495			if (pfr_route_kentry(tmpkt, p)) {
496				pfr_destroy_kentry(p);
497				ad.pfra_fback = PFR_FB_NONE;
498			} else {
499				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
500				ad.pfra_fback = PFR_FB_ADDED;
501				xadd++;
502			}
503		}
504_skip:
505		if (flags & PFR_FLAG_FEEDBACK)
506			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
507				senderr(EFAULT);
508	}
509	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
510	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
511		if (*size2 < size+xdel) {
512			*size2 = size+xdel;
513			senderr(0);
514		}
515		i = 0;
516		SLIST_FOREACH(p, &delq, pfrke_workq) {
517			pfr_copyout_addr(&ad, p);
518			ad.pfra_fback = PFR_FB_DELETED;
519			if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
520				senderr(EFAULT);
521			i++;
522		}
523	}
524	pfr_clean_node_mask(tmpkt, &addq);
525	if (!(flags & PFR_FLAG_DUMMY)) {
526		if (flags & PFR_FLAG_ATOMIC)
527			s = splsoftnet();
528		pfr_insert_kentries(kt, &addq, tzero);
529		pfr_remove_kentries(kt, &delq);
530		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
531		if (flags & PFR_FLAG_ATOMIC)
532			splx(s);
533	} else
534		pfr_destroy_kentries(&addq);
535	if (nadd != NULL)
536		*nadd = xadd;
537	if (ndel != NULL)
538		*ndel = xdel;
539	if (nchange != NULL)
540		*nchange = xchange;
541	if ((flags & PFR_FLAG_FEEDBACK) && size2)
542		*size2 = size+xdel;
543	pfr_destroy_ktable(tmpkt, 0);
544	return (0);
545_bad:
546	pfr_clean_node_mask(tmpkt, &addq);
547	pfr_destroy_kentries(&addq);
548	if (flags & PFR_FLAG_FEEDBACK)
549		pfr_reset_feedback(addr, size, flags);
550	pfr_destroy_ktable(tmpkt, 0);
551	return (rv);
552}
553
554int
555pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
556	int *nmatch, int flags)
557{
558	struct pfr_ktable	*kt;
559	struct pfr_kentry	*p;
560	struct pfr_addr		 ad;
561	int			 i, xmatch = 0;
562
563	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
564	if (pfr_validate_table(tbl, 0, 0))
565		return (EINVAL);
566	kt = pfr_lookup_table(tbl);
567	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
568		return (ESRCH);
569
570	for (i = 0; i < size; i++) {
571		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
572			return (EFAULT);
573		if (pfr_validate_addr(&ad))
574			return (EINVAL);
575		if (ADDR_NETWORK(&ad))
576			return (EINVAL);
577		p = pfr_lookup_addr(kt, &ad, 0);
578		if (flags & PFR_FLAG_REPLACE)
579			pfr_copyout_addr(&ad, p);
580		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
581		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
582		if (p != NULL && !p->pfrke_not)
583			xmatch++;
584		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
585			return (EFAULT);
586	}
587	if (nmatch != NULL)
588		*nmatch = xmatch;
589	return (0);
590}
591
592int
593pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
594	int flags)
595{
596	struct pfr_ktable	*kt;
597	struct pfr_walktree	 w;
598	int			 rv;
599
600	ACCEPT_FLAGS(flags, 0);
601	if (pfr_validate_table(tbl, 0, 0))
602		return (EINVAL);
603	kt = pfr_lookup_table(tbl);
604	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
605		return (ESRCH);
606	if (kt->pfrkt_cnt > *size) {
607		*size = kt->pfrkt_cnt;
608		return (0);
609	}
610
611	bzero(&w, sizeof(w));
612	w.pfrw_op = PFRW_GET_ADDRS;
613	w.pfrw_addr = addr;
614	w.pfrw_free = kt->pfrkt_cnt;
615	w.pfrw_flags = flags;
616	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
617	if (!rv)
618		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
619	if (rv)
620		return (rv);
621
622	if (w.pfrw_free) {
623		printf("pfr_get_addrs: corruption detected (%d).\n",
624		    w.pfrw_free);
625		return (ENOTTY);
626	}
627	*size = kt->pfrkt_cnt;
628	return (0);
629}
630
631int
632pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
633	int flags)
634{
635	struct pfr_ktable	*kt;
636	struct pfr_walktree	 w;
637	struct pfr_kentryworkq	 workq;
638	int			 rv, s = 0 /* XXX gcc */;
639	long			 tzero = time_second;
640
641	/* XXX PFR_FLAG_CLSTATS disabled */
642	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
643	if (pfr_validate_table(tbl, 0, 0))
644		return (EINVAL);
645	kt = pfr_lookup_table(tbl);
646	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
647		return (ESRCH);
648	if (kt->pfrkt_cnt > *size) {
649		*size = kt->pfrkt_cnt;
650		return (0);
651	}
652
653	bzero(&w, sizeof(w));
654	w.pfrw_op = PFRW_GET_ASTATS;
655	w.pfrw_astats = addr;
656	w.pfrw_free = kt->pfrkt_cnt;
657	w.pfrw_flags = flags;
658	if (flags & PFR_FLAG_ATOMIC)
659		s = splsoftnet();
660	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
661	if (!rv)
662		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
663	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
664		pfr_enqueue_addrs(kt, &workq, NULL, 0);
665		pfr_clstats_kentries(&workq, tzero, 0);
666	}
667	if (flags & PFR_FLAG_ATOMIC)
668		splx(s);
669	if (rv)
670		return (rv);
671
672	if (w.pfrw_free) {
673		printf("pfr_get_astats: corruption detected (%d).\n",
674		    w.pfrw_free);
675		return (ENOTTY);
676	}
677	*size = kt->pfrkt_cnt;
678	return (0);
679}
680
681int
682pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
683    int *nzero, int flags)
684{
685	struct pfr_ktable	*kt;
686	struct pfr_kentryworkq	 workq;
687	struct pfr_kentry	*p;
688	struct pfr_addr		 ad;
689	int			 i, rv, s = 0, xzero = 0;
690
691	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
692	    PFR_FLAG_FEEDBACK);
693	if (pfr_validate_table(tbl, 0, 0))
694		return (EINVAL);
695	kt = pfr_lookup_table(tbl);
696	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
697		return (ESRCH);
698	SLIST_INIT(&workq);
699	for (i = 0; i < size; i++) {
700		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
701			senderr(EFAULT);
702		if (pfr_validate_addr(&ad))
703			senderr(EINVAL);
704		p = pfr_lookup_addr(kt, &ad, 1);
705		if (flags & PFR_FLAG_FEEDBACK) {
706			ad.pfra_fback = (p != NULL) ?
707			    PFR_FB_CLEARED : PFR_FB_NONE;
708			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
709				senderr(EFAULT);
710		}
711		if (p != NULL) {
712			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
713			xzero++;
714		}
715	}
716
717	if (!(flags & PFR_FLAG_DUMMY)) {
718		if (flags & PFR_FLAG_ATOMIC)
719			s = splsoftnet();
720		pfr_clstats_kentries(&workq, 0, 0);
721		if (flags & PFR_FLAG_ATOMIC)
722			splx(s);
723	}
724	if (nzero != NULL)
725		*nzero = xzero;
726	return (0);
727_bad:
728	if (flags & PFR_FLAG_FEEDBACK)
729		pfr_reset_feedback(addr, size, flags);
730	return (rv);
731}
732
733int
734pfr_validate_addr(struct pfr_addr *ad)
735{
736	int i;
737
738	switch (ad->pfra_af) {
739#ifdef INET
740	case AF_INET:
741		if (ad->pfra_net > 32)
742			return (-1);
743		break;
744#endif /* INET */
745#ifdef INET6
746	case AF_INET6:
747		if (ad->pfra_net > 128)
748			return (-1);
749		break;
750#endif /* INET6 */
751	default:
752		return (-1);
753	}
754	if (ad->pfra_net < 128 &&
755		(((char *)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
756			return (-1);
757	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
758		if (((char *)ad)[i])
759			return (-1);
760	if (ad->pfra_not && ad->pfra_not != 1)
761		return (-1);
762	if (ad->pfra_fback)
763		return (-1);
764	return (0);
765}
766
767void
768pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
769	int *naddr, int sweep)
770{
771	struct pfr_walktree	w;
772
773	SLIST_INIT(workq);
774	bzero(&w, sizeof(w));
775	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
776	w.pfrw_workq = workq;
777	if (kt->pfrkt_ip4 != NULL)
778		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
779			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
780	if (kt->pfrkt_ip6 != NULL)
781		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
782			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
783	if (naddr != NULL)
784		*naddr = w.pfrw_cnt;
785}
786
787void
788pfr_mark_addrs(struct pfr_ktable *kt)
789{
790	struct pfr_walktree	w;
791
792	bzero(&w, sizeof(w));
793	w.pfrw_op = PFRW_MARK;
794	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
795		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
796	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
797		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
798}
799
800
801struct pfr_kentry *
802pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
803{
804	union sockaddr_union	 sa, mask;
805	struct radix_node_head	*head = (void *)0xdeadb;
806	struct pfr_kentry	*ke;
807	int			 s;
808
809	bzero(&sa, sizeof(sa));
810	if (ad->pfra_af == AF_INET) {
811		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
812		head = kt->pfrkt_ip4;
813	} else if ( ad->pfra_af == AF_INET6 ) {
814		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
815		head = kt->pfrkt_ip6;
816	}
817	if (ADDR_NETWORK(ad)) {
818		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
819		s = splsoftnet(); /* rn_lookup makes use of globals */
820		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
821		splx(s);
822		if (ke && KENTRY_RNF_ROOT(ke))
823			ke = NULL;
824	} else {
825		ke = (struct pfr_kentry *)rn_match(&sa, head);
826		if (ke && KENTRY_RNF_ROOT(ke))
827			ke = NULL;
828		if (exact && ke && KENTRY_NETWORK(ke))
829			ke = NULL;
830	}
831	return (ke);
832}
833
834struct pfr_kentry *
835pfr_create_kentry(struct pfr_addr *ad, int intr)
836{
837	struct pfr_kentry	*ke;
838
839	if (intr)
840		ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
841	else
842		ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
843	if (ke == NULL)
844		return (NULL);
845	bzero(ke, sizeof(*ke));
846
847	if (ad->pfra_af == AF_INET)
848		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
849	else if (ad->pfra_af == AF_INET6)
850		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
851	ke->pfrke_af = ad->pfra_af;
852	ke->pfrke_net = ad->pfra_net;
853	ke->pfrke_not = ad->pfra_not;
854	ke->pfrke_intrpool = intr;
855	return (ke);
856}
857
858void
859pfr_destroy_kentries(struct pfr_kentryworkq *workq)
860{
861	struct pfr_kentry	*p, *q;
862
863	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
864		q = SLIST_NEXT(p, pfrke_workq);
865		pfr_destroy_kentry(p);
866	}
867}
868
869void
870pfr_destroy_kentry(struct pfr_kentry *ke)
871{
872	if (ke->pfrke_intrpool)
873		pool_put(&pfr_kentry_pl2, ke);
874	else
875		pool_put(&pfr_kentry_pl, ke);
876}
877
878void
879pfr_insert_kentries(struct pfr_ktable *kt,
880    struct pfr_kentryworkq *workq, long tzero)
881{
882	struct pfr_kentry	*p;
883	int			 rv, n = 0;
884
885	SLIST_FOREACH(p, workq, pfrke_workq) {
886		rv = pfr_route_kentry(kt, p);
887		if (rv) {
888			printf("pfr_insert_kentries: cannot route entry "
889			    "(code=%d).\n", rv);
890			break;
891		}
892		p->pfrke_tzero = tzero;
893		n++;
894	}
895	kt->pfrkt_cnt += n;
896}
897
898int
899pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
900{
901	struct pfr_kentry	*p;
902	int			 rv;
903
904	p = pfr_lookup_addr(kt, ad, 1);
905	if (p != NULL)
906		return (0);
907	p = pfr_create_kentry(ad, 1);
908	if (p == NULL)
909		return (EINVAL);
910
911	rv = pfr_route_kentry(kt, p);
912	if (rv)
913		return (rv);
914
915	p->pfrke_tzero = tzero;
916	kt->pfrkt_cnt++;
917
918	return (0);
919}
920
921void
922pfr_remove_kentries(struct pfr_ktable *kt,
923    struct pfr_kentryworkq *workq)
924{
925	struct pfr_kentry	*p;
926	int			 n = 0;
927
928	SLIST_FOREACH(p, workq, pfrke_workq) {
929		pfr_unroute_kentry(kt, p);
930		n++;
931	}
932	kt->pfrkt_cnt -= n;
933	pfr_destroy_kentries(workq);
934}
935
936void
937pfr_clean_node_mask(struct pfr_ktable *kt,
938    struct pfr_kentryworkq *workq)
939{
940	struct pfr_kentry	*p;
941
942	SLIST_FOREACH(p, workq, pfrke_workq)
943		pfr_unroute_kentry(kt, p);
944}
945
946void
947pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
948{
949	struct pfr_kentry	*p;
950	int			 s;
951
952	SLIST_FOREACH(p, workq, pfrke_workq) {
953		s = splsoftnet();
954		if (negchange)
955			p->pfrke_not = !p->pfrke_not;
956		bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
957		bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
958		splx(s);
959		p->pfrke_tzero = tzero;
960	}
961}
962
963void
964pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
965{
966	struct pfr_addr	ad;
967	int		i;
968
969	for (i = 0; i < size; i++) {
970		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
971			break;
972		ad.pfra_fback = PFR_FB_NONE;
973		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
974			break;
975	}
976}
977
978void
979pfr_prepare_network(union sockaddr_union *sa, int af, int net)
980{
981	int	i;
982
983	bzero(sa, sizeof(*sa));
984	if (af == AF_INET) {
985		sa->sin.sin_len = sizeof(sa->sin);
986		sa->sin.sin_family = AF_INET;
987		sa->sin.sin_addr.s_addr = net ? htonl(~0U << (32-net)) : 0;
988	} else if (af == AF_INET6) {
989		sa->sin6.sin6_len = sizeof(sa->sin6);
990		sa->sin6.sin6_family = AF_INET6;
991		for (i = 0; i < 4; i++) {
992			if (net <= 32) {
993				sa->sin6.sin6_addr.s6_addr32[i] =
994				    net ? htonl(~0U << (32-net)) : 0;
995				break;
996			}
997			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
998			net -= 32;
999		}
1000	}
1001}
1002
1003int
1004pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1005{
1006	union sockaddr_union	 mask;
1007	struct radix_node	*rn;
1008	struct radix_node_head	*head = (void *)0xdeadb;
1009	int			 s;
1010
1011	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1012	if (ke->pfrke_af == AF_INET)
1013		head = kt->pfrkt_ip4;
1014	else if (ke->pfrke_af == AF_INET6)
1015		head = kt->pfrkt_ip6;
1016
1017	s = splsoftnet();
1018	if (KENTRY_NETWORK(ke)) {
1019		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1020		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1021	} else
1022		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1023	splx(s);
1024
1025	return (rn == NULL ? -1 : 0);
1026}
1027
1028int
1029pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1030{
1031	union sockaddr_union	 mask;
1032	struct radix_node	*rn;
1033	struct radix_node_head	*head = (void *)0xdeadb;
1034	int			 s;
1035
1036	if (ke->pfrke_af == AF_INET)
1037		head = kt->pfrkt_ip4;
1038	else if (ke->pfrke_af == AF_INET6)
1039		head = kt->pfrkt_ip6;
1040
1041	s = splsoftnet();
1042	if (KENTRY_NETWORK(ke)) {
1043		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1044		rn = rn_delete(&ke->pfrke_sa, &mask, head);
1045	} else
1046		rn = rn_delete(&ke->pfrke_sa, NULL, head);
1047	splx(s);
1048
1049	if (rn == NULL) {
1050		printf("pfr_unroute_kentry: delete failed.\n");
1051		return (-1);
1052	}
1053	return (0);
1054}
1055
1056void
1057pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1058{
1059	bzero(ad, sizeof(*ad));
1060	if (ke == NULL)
1061		return;
1062	ad->pfra_af = ke->pfrke_af;
1063	ad->pfra_net = ke->pfrke_net;
1064	ad->pfra_not = ke->pfrke_not;
1065	if (ad->pfra_af == AF_INET)
1066		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1067	else if (ad->pfra_af == AF_INET6)
1068		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1069}
1070
1071int
1072pfr_walktree(struct radix_node *rn, void *arg)
1073{
1074	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1075	struct pfr_walktree	*w = arg;
1076	int			 s, flags = w->pfrw_flags;
1077
1078	switch (w->pfrw_op) {
1079	case PFRW_MARK:
1080		ke->pfrke_mark = 0;
1081		break;
1082	case PFRW_SWEEP:
1083		if (ke->pfrke_mark)
1084			break;
1085		/* FALLTHROUGH */
1086	case PFRW_ENQUEUE:
1087		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1088		w->pfrw_cnt++;
1089		break;
1090	case PFRW_GET_ADDRS:
1091		if (w->pfrw_free-- > 0) {
1092			struct pfr_addr ad;
1093
1094			pfr_copyout_addr(&ad, ke);
1095			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1096				return (EFAULT);
1097			w->pfrw_addr++;
1098		}
1099		break;
1100	case PFRW_GET_ASTATS:
1101		if (w->pfrw_free-- > 0) {
1102			struct pfr_astats as;
1103
1104			pfr_copyout_addr(&as.pfras_a, ke);
1105
1106			s = splsoftnet();
1107			bcopy(ke->pfrke_packets, as.pfras_packets,
1108			    sizeof(as.pfras_packets));
1109			bcopy(ke->pfrke_bytes, as.pfras_bytes,
1110			    sizeof(as.pfras_bytes));
1111			splx(s);
1112			as.pfras_tzero = ke->pfrke_tzero;
1113
1114			if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1115				return (EFAULT);
1116			w->pfrw_astats++;
1117		}
1118		break;
1119	case PFRW_POOL_GET:
1120		if (ke->pfrke_not)
1121			break; /* negative entries are ignored */
1122		if (!w->pfrw_cnt--) {
1123			w->pfrw_kentry = ke;
1124			return (1); /* finish search */
1125		}
1126		break;
1127	case PFRW_DYNADDR_UPDATE:
1128		if (ke->pfrke_af == AF_INET) {
1129			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1130				break;
1131			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1132			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1133			    &ke->pfrke_sa, AF_INET);
1134			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1135			    &pfr_mask, AF_INET);
1136		} else if (ke->pfrke_af == AF_INET6){
1137			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1138				break;
1139			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1140			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1141			    &ke->pfrke_sa, AF_INET6);
1142			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1143			    &pfr_mask, AF_INET6);
1144		}
1145		break;
1146	}
1147	return (0);
1148}
1149
1150int
1151pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1152{
1153	struct pfr_ktableworkq	 workq;
1154	struct pfr_ktable	*p;
1155	int			 s = 0, xdel = 0;
1156
1157	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1158	    PFR_FLAG_ALLRSETS);
1159	if (pfr_fix_anchor(filter->pfrt_anchor))
1160		return (EINVAL);
1161	if (pfr_table_count(filter, flags) < 0)
1162		return (ENOENT);
1163
1164	SLIST_INIT(&workq);
1165	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1166		if (pfr_skip_table(filter, p, flags))
1167			continue;
1168		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1169			continue;
1170		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1171			continue;
1172		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1173		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1174		xdel++;
1175	}
1176	if (!(flags & PFR_FLAG_DUMMY)) {
1177		if (flags & PFR_FLAG_ATOMIC)
1178			s = splsoftnet();
1179		pfr_setflags_ktables(&workq);
1180		if (flags & PFR_FLAG_ATOMIC)
1181			splx(s);
1182	}
1183	if (ndel != NULL)
1184		*ndel = xdel;
1185	return (0);
1186}
1187
1188int
1189pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1190{
1191	struct pfr_ktableworkq	 addq, changeq;
1192	struct pfr_ktable	*p, *q, *r, key;
1193	int			 i, rv, s = 0 /* XXX gcc */, xadd = 0;
1194	long			 tzero = time_second;
1195
1196	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1197	SLIST_INIT(&addq);
1198	SLIST_INIT(&changeq);
1199	for (i = 0; i < size; i++) {
1200		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1201			senderr(EFAULT);
1202		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1203		    flags & PFR_FLAG_USERIOCTL))
1204			senderr(EINVAL);
1205		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1206		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1207		if (p == NULL) {
1208			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1209			if (p == NULL)
1210				senderr(ENOMEM);
1211			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1212				if (!pfr_ktable_compare(p, q))
1213					goto _skip;
1214			}
1215			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1216			xadd++;
1217			if (!key.pfrkt_anchor[0])
1218				goto _skip;
1219
1220			/* find or create root table */
1221			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1222			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1223			if (r != NULL) {
1224				p->pfrkt_root = r;
1225				goto _skip;
1226			}
1227			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1228				if (!pfr_ktable_compare(&key, q)) {
1229					p->pfrkt_root = q;
1230					goto _skip;
1231				}
1232			}
1233			key.pfrkt_flags = 0;
1234			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1235			if (r == NULL)
1236				senderr(ENOMEM);
1237			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1238			p->pfrkt_root = r;
1239		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1240			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1241				if (!pfr_ktable_compare(&key, q))
1242					goto _skip;
1243			p->pfrkt_nflags = (p->pfrkt_flags &
1244			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1245			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1246			xadd++;
1247		}
1248_skip:
1249	;
1250	}
1251	if (!(flags & PFR_FLAG_DUMMY)) {
1252		if (flags & PFR_FLAG_ATOMIC)
1253			s = splsoftnet();
1254		pfr_insert_ktables(&addq);
1255		pfr_setflags_ktables(&changeq);
1256		if (flags & PFR_FLAG_ATOMIC)
1257			splx(s);
1258	} else
1259		 pfr_destroy_ktables(&addq, 0);
1260	if (nadd != NULL)
1261		*nadd = xadd;
1262	return (0);
1263_bad:
1264	pfr_destroy_ktables(&addq, 0);
1265	return (rv);
1266}
1267
1268int
1269pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1270{
1271	struct pfr_ktableworkq	 workq;
1272	struct pfr_ktable	*p, *q, key;
1273	int			 i, s = 0, xdel = 0;
1274
1275	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1276	SLIST_INIT(&workq);
1277	for (i = 0; i < size; i++) {
1278		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1279			return (EFAULT);
1280		if (pfr_validate_table(&key.pfrkt_t, 0,
1281		    flags & PFR_FLAG_USERIOCTL))
1282			return (EINVAL);
1283		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1284		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1285			SLIST_FOREACH(q, &workq, pfrkt_workq)
1286				if (!pfr_ktable_compare(p, q))
1287					goto _skip;
1288			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1289			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1290			xdel++;
1291		}
1292_skip:
1293	;
1294	}
1295
1296	if (!(flags & PFR_FLAG_DUMMY)) {
1297		if (flags & PFR_FLAG_ATOMIC)
1298			s = splsoftnet();
1299		pfr_setflags_ktables(&workq);
1300		if (flags & PFR_FLAG_ATOMIC)
1301			splx(s);
1302	}
1303	if (ndel != NULL)
1304		*ndel = xdel;
1305	return (0);
1306}
1307
1308int
1309pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1310	int flags)
1311{
1312	struct pfr_ktable	*p;
1313	int			 n, nn;
1314
1315	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1316	if (pfr_fix_anchor(filter->pfrt_anchor))
1317		return (EINVAL);
1318	n = nn = pfr_table_count(filter, flags);
1319	if (n < 0)
1320		return (ENOENT);
1321	if (n > *size) {
1322		*size = n;
1323		return (0);
1324	}
1325	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1326		if (pfr_skip_table(filter, p, flags))
1327			continue;
1328		if (n-- <= 0)
1329			continue;
1330		if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1331			return (EFAULT);
1332	}
1333	if (n) {
1334		printf("pfr_get_tables: corruption detected (%d).\n", n);
1335		return (ENOTTY);
1336	}
1337	*size = nn;
1338	return (0);
1339}
1340
1341int
1342pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1343	int flags)
1344{
1345	struct pfr_ktable	*p;
1346	struct pfr_ktableworkq	 workq;
1347	int			 s = 0 /* XXX gcc */, n, nn;
1348	long			 tzero = time_second;
1349
1350	/* XXX PFR_FLAG_CLSTATS disabled */
1351	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1352	if (pfr_fix_anchor(filter->pfrt_anchor))
1353		return (EINVAL);
1354	n = nn = pfr_table_count(filter, flags);
1355	if (n < 0)
1356		return (ENOENT);
1357	if (n > *size) {
1358		*size = n;
1359		return (0);
1360	}
1361	SLIST_INIT(&workq);
1362	if (flags & PFR_FLAG_ATOMIC)
1363		s = splsoftnet();
1364	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1365		if (pfr_skip_table(filter, p, flags))
1366			continue;
1367		if (n-- <= 0)
1368			continue;
1369		if (!(flags & PFR_FLAG_ATOMIC))
1370			s = splsoftnet();
1371		if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1372			splx(s);
1373			return (EFAULT);
1374		}
1375		if (!(flags & PFR_FLAG_ATOMIC))
1376			splx(s);
1377		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1378	}
1379	if (flags & PFR_FLAG_CLSTATS)
1380		pfr_clstats_ktables(&workq, tzero,
1381		    flags & PFR_FLAG_ADDRSTOO);
1382	if (flags & PFR_FLAG_ATOMIC)
1383		splx(s);
1384	if (n) {
1385		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1386		return (ENOTTY);
1387	}
1388	*size = nn;
1389	return (0);
1390}
1391
1392int
1393pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1394{
1395	struct pfr_ktableworkq	 workq;
1396	struct pfr_ktable	*p, key;
1397	int			 i, s = 0 /* XXX gcc */, xzero = 0;
1398	long			 tzero = time_second;
1399
1400	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1401	    PFR_FLAG_ADDRSTOO);
1402	SLIST_INIT(&workq);
1403	for (i = 0; i < size; i++) {
1404		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1405			return (EFAULT);
1406		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1407			return (EINVAL);
1408		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1409		if (p != NULL) {
1410			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1411			xzero++;
1412		}
1413	}
1414	if (!(flags & PFR_FLAG_DUMMY)) {
1415		if (flags & PFR_FLAG_ATOMIC)
1416			s = splsoftnet();
1417		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1418		if (flags & PFR_FLAG_ATOMIC)
1419			splx(s);
1420	}
1421	if (nzero != NULL)
1422		*nzero = xzero;
1423	return (0);
1424}
1425
1426int
1427pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1428	int *nchange, int *ndel, int flags)
1429{
1430	struct pfr_ktableworkq	 workq;
1431	struct pfr_ktable	*p, *q, key;
1432	int			 i, s = 0, xchange = 0, xdel = 0;
1433
1434	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1435	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1436	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1437	    (setflag & clrflag))
1438		return (EINVAL);
1439	SLIST_INIT(&workq);
1440	for (i = 0; i < size; i++) {
1441		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1442			return (EFAULT);
1443		if (pfr_validate_table(&key.pfrkt_t, 0,
1444		    flags & PFR_FLAG_USERIOCTL))
1445			return (EINVAL);
1446		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1447		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1448			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1449			    ~clrflag;
1450			if (p->pfrkt_nflags == p->pfrkt_flags)
1451				goto _skip;
1452			SLIST_FOREACH(q, &workq, pfrkt_workq)
1453				if (!pfr_ktable_compare(p, q))
1454					goto _skip;
1455			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1456			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1457			    (clrflag & PFR_TFLAG_PERSIST) &&
1458			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1459				xdel++;
1460			else
1461				xchange++;
1462		}
1463_skip:
1464	;
1465	}
1466	if (!(flags & PFR_FLAG_DUMMY)) {
1467		if (flags & PFR_FLAG_ATOMIC)
1468			s = splsoftnet();
1469		pfr_setflags_ktables(&workq);
1470		if (flags & PFR_FLAG_ATOMIC)
1471			splx(s);
1472	}
1473	if (nchange != NULL)
1474		*nchange = xchange;
1475	if (ndel != NULL)
1476		*ndel = xdel;
1477	return (0);
1478}
1479
1480int
1481pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1482{
1483	struct pfr_ktableworkq	 workq;
1484	struct pfr_ktable	*p;
1485	struct pf_ruleset	*rs;
1486	int			 xdel = 0;
1487
1488	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1489	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1490	if (rs == NULL)
1491		return (ENOMEM);
1492	SLIST_INIT(&workq);
1493	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1494		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1495		    pfr_skip_table(trs, p, 0))
1496			continue;
1497		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1498		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1499		xdel++;
1500	}
1501	if (!(flags & PFR_FLAG_DUMMY)) {
1502		pfr_setflags_ktables(&workq);
1503		if (ticket != NULL)
1504			*ticket = ++rs->tticket;
1505		rs->topen = 1;
1506	} else
1507		pf_remove_if_empty_ruleset(rs);
1508	if (ndel != NULL)
1509		*ndel = xdel;
1510	return (0);
1511}
1512
1513int
1514pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1515    int *nadd, int *naddr, u_int32_t ticket, int flags)
1516{
1517	struct pfr_ktableworkq	 tableq;
1518	struct pfr_kentryworkq	 addrq;
1519	struct pfr_ktable	*kt, *rt, *shadow, key;
1520	struct pfr_kentry	*p;
1521	struct pfr_addr		 ad;
1522	struct pf_ruleset	*rs;
1523	int			 i, rv, xadd = 0, xaddr = 0;
1524
1525	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1526	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1527		return (EINVAL);
1528	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1529	    flags & PFR_FLAG_USERIOCTL))
1530		return (EINVAL);
1531	rs = pf_find_ruleset(tbl->pfrt_anchor);
1532	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1533		return (EBUSY);
1534	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1535	SLIST_INIT(&tableq);
1536	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1537	if (kt == NULL) {
1538		kt = pfr_create_ktable(tbl, 0, 1);
1539		if (kt == NULL)
1540			return (ENOMEM);
1541		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1542		xadd++;
1543		if (!tbl->pfrt_anchor[0])
1544			goto _skip;
1545
1546		/* find or create root table */
1547		bzero(&key, sizeof(key));
1548		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1549		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1550		if (rt != NULL) {
1551			kt->pfrkt_root = rt;
1552			goto _skip;
1553		}
1554		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1555		if (rt == NULL) {
1556			pfr_destroy_ktables(&tableq, 0);
1557			return (ENOMEM);
1558		}
1559		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1560		kt->pfrkt_root = rt;
1561	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1562		xadd++;
1563_skip:
1564	shadow = pfr_create_ktable(tbl, 0, 0);
1565	if (shadow == NULL) {
1566		pfr_destroy_ktables(&tableq, 0);
1567		return (ENOMEM);
1568	}
1569	SLIST_INIT(&addrq);
1570	for (i = 0; i < size; i++) {
1571		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1572			senderr(EFAULT);
1573		if (pfr_validate_addr(&ad))
1574			senderr(EINVAL);
1575		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1576			continue;
1577		p = pfr_create_kentry(&ad, 0);
1578		if (p == NULL)
1579			senderr(ENOMEM);
1580		if (pfr_route_kentry(shadow, p)) {
1581			pfr_destroy_kentry(p);
1582			continue;
1583		}
1584		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1585		xaddr++;
1586	}
1587	if (!(flags & PFR_FLAG_DUMMY)) {
1588		if (kt->pfrkt_shadow != NULL)
1589			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1590		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1591		pfr_insert_ktables(&tableq);
1592		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1593		    xaddr : NO_ADDRESSES;
1594		kt->pfrkt_shadow = shadow;
1595	} else {
1596		pfr_clean_node_mask(shadow, &addrq);
1597		pfr_destroy_ktable(shadow, 0);
1598		pfr_destroy_ktables(&tableq, 0);
1599		pfr_destroy_kentries(&addrq);
1600	}
1601	if (nadd != NULL)
1602		*nadd = xadd;
1603	if (naddr != NULL)
1604		*naddr = xaddr;
1605	return (0);
1606_bad:
1607	pfr_destroy_ktable(shadow, 0);
1608	pfr_destroy_ktables(&tableq, 0);
1609	pfr_destroy_kentries(&addrq);
1610	return (rv);
1611}
1612
1613int
1614pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1615{
1616	struct pfr_ktableworkq	 workq;
1617	struct pfr_ktable	*p;
1618	struct pf_ruleset	*rs;
1619	int			 xdel = 0;
1620
1621	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1622	rs = pf_find_ruleset(trs->pfrt_anchor);
1623	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1624		return (0);
1625	SLIST_INIT(&workq);
1626	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1627		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1628		    pfr_skip_table(trs, p, 0))
1629			continue;
1630		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1631		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1632		xdel++;
1633	}
1634	if (!(flags & PFR_FLAG_DUMMY)) {
1635		pfr_setflags_ktables(&workq);
1636		rs->topen = 0;
1637		pf_remove_if_empty_ruleset(rs);
1638	}
1639	if (ndel != NULL)
1640		*ndel = xdel;
1641	return (0);
1642}
1643
1644int
1645pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1646    int *nchange, int flags)
1647{
1648	struct pfr_ktable	*p, *q;
1649	struct pfr_ktableworkq	 workq;
1650	struct pf_ruleset	*rs;
1651	int			 s = 0 /* XXX gcc */, xadd = 0, xchange = 0;
1652	long			 tzero = time_second;
1653
1654	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1655	rs = pf_find_ruleset(trs->pfrt_anchor);
1656	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1657		return (EBUSY);
1658
1659	SLIST_INIT(&workq);
1660	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1661		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1662		    pfr_skip_table(trs, p, 0))
1663			continue;
1664		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1665		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1666			xchange++;
1667		else
1668			xadd++;
1669	}
1670
1671	if (!(flags & PFR_FLAG_DUMMY)) {
1672		if (flags & PFR_FLAG_ATOMIC)
1673			s = splsoftnet();
1674		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1675			q = SLIST_NEXT(p, pfrkt_workq);
1676			pfr_commit_ktable(p, tzero);
1677		}
1678		if (flags & PFR_FLAG_ATOMIC)
1679			splx(s);
1680		rs->topen = 0;
1681		pf_remove_if_empty_ruleset(rs);
1682	}
1683	if (nadd != NULL)
1684		*nadd = xadd;
1685	if (nchange != NULL)
1686		*nchange = xchange;
1687
1688	return (0);
1689}
1690
1691void
1692pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1693{
1694	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1695	int			 nflags;
1696
1697	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1698		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1699			pfr_clstats_ktable(kt, tzero, 1);
1700	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1701		/* kt might contain addresses */
1702		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1703		struct pfr_kentry	*p, *q, *next;
1704		struct pfr_addr		 ad;
1705
1706		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1707		pfr_mark_addrs(kt);
1708		SLIST_INIT(&addq);
1709		SLIST_INIT(&changeq);
1710		SLIST_INIT(&delq);
1711		SLIST_INIT(&garbageq);
1712		pfr_clean_node_mask(shadow, &addrq);
1713		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1714			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1715			pfr_copyout_addr(&ad, p);
1716			q = pfr_lookup_addr(kt, &ad, 1);
1717			if (q != NULL) {
1718				if (q->pfrke_not != p->pfrke_not)
1719					SLIST_INSERT_HEAD(&changeq, q,
1720					    pfrke_workq);
1721				q->pfrke_mark = 1;
1722				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1723			} else {
1724				p->pfrke_tzero = tzero;
1725				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1726			}
1727		}
1728		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1729		pfr_insert_kentries(kt, &addq, tzero);
1730		pfr_remove_kentries(kt, &delq);
1731		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1732		pfr_destroy_kentries(&garbageq);
1733	} else {
1734		/* kt cannot contain addresses */
1735		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1736		    shadow->pfrkt_ip4);
1737		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1738		    shadow->pfrkt_ip6);
1739		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1740		pfr_clstats_ktable(kt, tzero, 1);
1741	}
1742	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1743	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1744		& ~PFR_TFLAG_INACTIVE;
1745	pfr_destroy_ktable(shadow, 0);
1746	kt->pfrkt_shadow = NULL;
1747	pfr_setflags_ktable(kt, nflags);
1748}
1749
1750int
1751pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1752{
1753	int i;
1754
1755	if (!tbl->pfrt_name[0])
1756		return (-1);
1757	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1758		 return (-1);
1759	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1760		return (-1);
1761	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1762		if (tbl->pfrt_name[i])
1763			return (-1);
1764	if (pfr_fix_anchor(tbl->pfrt_anchor))
1765		return (-1);
1766	if (tbl->pfrt_flags & ~allowedflags)
1767		return (-1);
1768	return (0);
1769}
1770
1771/*
1772 * Rewrite anchors referenced by tables to remove slashes
1773 * and check for validity.
1774 */
1775int
1776pfr_fix_anchor(char *anchor)
1777{
1778	size_t siz = MAXPATHLEN;
1779	int i;
1780
1781	if (anchor[0] == '/') {
1782		char *path;
1783		int off;
1784
1785		path = anchor;
1786		off = 1;
1787		while (*++path == '/')
1788			off++;
1789		memmove(anchor, path, siz - off);
1790		memset(anchor + siz - off, 0, off);
1791	}
1792	if (anchor[siz - 1])
1793		return (-1);
1794	for (i = strlen(anchor); i < siz; i++)
1795		if (anchor[i])
1796			return (-1);
1797	return (0);
1798}
1799
1800int
1801pfr_table_count(struct pfr_table *filter, int flags)
1802{
1803	struct pf_ruleset *rs;
1804
1805	if (flags & PFR_FLAG_ALLRSETS)
1806		return (pfr_ktable_cnt);
1807	if (filter->pfrt_anchor[0]) {
1808		rs = pf_find_ruleset(filter->pfrt_anchor);
1809		return ((rs != NULL) ? rs->tables : -1);
1810	}
1811	return (pf_main_ruleset.tables);
1812}
1813
1814int
1815pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1816{
1817	if (flags & PFR_FLAG_ALLRSETS)
1818		return (0);
1819	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1820		return (1);
1821	return (0);
1822}
1823
1824void
1825pfr_insert_ktables(struct pfr_ktableworkq *workq)
1826{
1827	struct pfr_ktable	*p;
1828
1829	SLIST_FOREACH(p, workq, pfrkt_workq)
1830		pfr_insert_ktable(p);
1831}
1832
1833void
1834pfr_insert_ktable(struct pfr_ktable *kt)
1835{
1836	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1837	pfr_ktable_cnt++;
1838	if (kt->pfrkt_root != NULL)
1839		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1840			pfr_setflags_ktable(kt->pfrkt_root,
1841			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1842}
1843
1844void
1845pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1846{
1847	struct pfr_ktable	*p, *q;
1848
1849	for (p = SLIST_FIRST(workq); p; p = q) {
1850		q = SLIST_NEXT(p, pfrkt_workq);
1851		pfr_setflags_ktable(p, p->pfrkt_nflags);
1852	}
1853}
1854
1855void
1856pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1857{
1858	struct pfr_kentryworkq	addrq;
1859
1860	if (!(newf & PFR_TFLAG_REFERENCED) &&
1861	    !(newf & PFR_TFLAG_PERSIST))
1862		newf &= ~PFR_TFLAG_ACTIVE;
1863	if (!(newf & PFR_TFLAG_ACTIVE))
1864		newf &= ~PFR_TFLAG_USRMASK;
1865	if (!(newf & PFR_TFLAG_SETMASK)) {
1866		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1867		if (kt->pfrkt_root != NULL)
1868			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1869				pfr_setflags_ktable(kt->pfrkt_root,
1870				    kt->pfrkt_root->pfrkt_flags &
1871					~PFR_TFLAG_REFDANCHOR);
1872		pfr_destroy_ktable(kt, 1);
1873		pfr_ktable_cnt--;
1874		return;
1875	}
1876	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1877		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1878		pfr_remove_kentries(kt, &addrq);
1879	}
1880	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1881		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1882		kt->pfrkt_shadow = NULL;
1883	}
1884	kt->pfrkt_flags = newf;
1885}
1886
1887void
1888pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1889{
1890	struct pfr_ktable	*p;
1891
1892	SLIST_FOREACH(p, workq, pfrkt_workq)
1893		pfr_clstats_ktable(p, tzero, recurse);
1894}
1895
1896void
1897pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1898{
1899	struct pfr_kentryworkq	 addrq;
1900	int			 s;
1901
1902	if (recurse) {
1903		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1904		pfr_clstats_kentries(&addrq, tzero, 0);
1905	}
1906	s = splsoftnet();
1907	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1908	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1909	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1910	splx(s);
1911	kt->pfrkt_tzero = tzero;
1912}
1913
1914struct pfr_ktable *
1915pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1916{
1917	struct pfr_ktable	*kt;
1918	struct pf_ruleset	*rs;
1919	void			*h4 = NULL, *h6 = NULL;
1920
1921	kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1922	if (kt == NULL)
1923		return (NULL);
1924	bzero(kt, sizeof(*kt));
1925	kt->pfrkt_t = *tbl;
1926
1927	if (attachruleset) {
1928		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1929		if (!rs) {
1930			pfr_destroy_ktable(kt, 0);
1931			return (NULL);
1932		}
1933		kt->pfrkt_rs = rs;
1934		rs->tables++;
1935	}
1936
1937	if (!rn_inithead(&h4, offsetof(struct sockaddr_in, sin_addr) * 8))
1938		goto out;
1939
1940	if (!rn_inithead(&h6, offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1941		Free(h4);
1942		goto out;
1943	}
1944	kt->pfrkt_ip4 = h4;
1945	kt->pfrkt_ip6 = h6;
1946	kt->pfrkt_tzero = tzero;
1947
1948	return (kt);
1949out:
1950	pfr_destroy_ktable(kt, 0);
1951	return (NULL);
1952}
1953
1954void
1955pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1956{
1957	struct pfr_ktable	*p, *q;
1958
1959	for (p = SLIST_FIRST(workq); p; p = q) {
1960		q = SLIST_NEXT(p, pfrkt_workq);
1961		pfr_destroy_ktable(p, flushaddr);
1962	}
1963}
1964
1965void
1966pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1967{
1968	struct pfr_kentryworkq	 addrq;
1969
1970	if (flushaddr) {
1971		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1972		pfr_clean_node_mask(kt, &addrq);
1973		pfr_destroy_kentries(&addrq);
1974	}
1975	if (kt->pfrkt_ip4 != NULL)
1976		free((void *)kt->pfrkt_ip4, M_RTABLE);
1977	if (kt->pfrkt_ip6 != NULL)
1978		free((void *)kt->pfrkt_ip6, M_RTABLE);
1979	if (kt->pfrkt_shadow != NULL)
1980		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1981	if (kt->pfrkt_rs != NULL) {
1982		kt->pfrkt_rs->tables--;
1983		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1984	}
1985	pool_put(&pfr_ktable_pl, kt);
1986}
1987
1988int
1989pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1990{
1991	int d;
1992
1993	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1994		return (d);
1995	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1996}
1997
1998struct pfr_ktable *
1999pfr_lookup_table(struct pfr_table *tbl)
2000{
2001	/* struct pfr_ktable start like a struct pfr_table */
2002	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2003	    (struct pfr_ktable *)tbl));
2004}
2005
2006int
2007pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2008{
2009	struct pfr_kentry	*ke = NULL;
2010	int			 match;
2011
2012	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2013		kt = kt->pfrkt_root;
2014	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2015		return (0);
2016
2017	switch (af) {
2018#ifdef INET
2019	case AF_INET:
2020		pfr_sin.sin_addr.s_addr = a->addr32[0];
2021		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2022		if (ke && KENTRY_RNF_ROOT(ke))
2023			ke = NULL;
2024		break;
2025#endif /* INET */
2026#ifdef INET6
2027	case AF_INET6:
2028		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2029		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2030		if (ke && KENTRY_RNF_ROOT(ke))
2031			ke = NULL;
2032		break;
2033#endif /* INET6 */
2034	}
2035	match = (ke && !ke->pfrke_not);
2036	if (match)
2037		kt->pfrkt_match++;
2038	else
2039		kt->pfrkt_nomatch++;
2040	return (match);
2041}
2042
2043void
2044pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2045    u_int64_t len, int dir_out, int op_pass, int notrule)
2046{
2047	struct pfr_kentry	*ke = NULL;
2048
2049	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2050		kt = kt->pfrkt_root;
2051	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2052		return;
2053
2054	switch (af) {
2055#ifdef INET
2056	case AF_INET:
2057		pfr_sin.sin_addr.s_addr = a->addr32[0];
2058		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2059		if (ke && KENTRY_RNF_ROOT(ke))
2060			ke = NULL;
2061		break;
2062#endif /* INET */
2063#ifdef INET6
2064	case AF_INET6:
2065		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2066		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2067		if (ke && KENTRY_RNF_ROOT(ke))
2068			ke = NULL;
2069		break;
2070#endif /* INET6 */
2071	default:
2072		;
2073	}
2074	if ((ke == NULL || ke->pfrke_not) != notrule) {
2075		if (op_pass != PFR_OP_PASS)
2076			printf("pfr_update_stats: assertion failed.\n");
2077		op_pass = PFR_OP_XPASS;
2078	}
2079	kt->pfrkt_packets[dir_out][op_pass]++;
2080	kt->pfrkt_bytes[dir_out][op_pass] += len;
2081	if (ke != NULL && op_pass != PFR_OP_XPASS) {
2082		ke->pfrke_packets[dir_out][op_pass]++;
2083		ke->pfrke_bytes[dir_out][op_pass] += len;
2084	}
2085}
2086
2087struct pfr_ktable *
2088pfr_attach_table(struct pf_ruleset *rs, char *name)
2089{
2090	struct pfr_ktable	*kt, *rt;
2091	struct pfr_table	 tbl;
2092	struct pf_anchor	*ac = rs->anchor;
2093
2094	bzero(&tbl, sizeof(tbl));
2095	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2096	if (ac != NULL)
2097		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2098	kt = pfr_lookup_table(&tbl);
2099	if (kt == NULL) {
2100		kt = pfr_create_ktable(&tbl, time_second, 1);
2101		if (kt == NULL)
2102			return (NULL);
2103		if (ac != NULL) {
2104			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2105			rt = pfr_lookup_table(&tbl);
2106			if (rt == NULL) {
2107				rt = pfr_create_ktable(&tbl, 0, 1);
2108				if (rt == NULL) {
2109					pfr_destroy_ktable(kt, 0);
2110					return (NULL);
2111				}
2112				pfr_insert_ktable(rt);
2113			}
2114			kt->pfrkt_root = rt;
2115		}
2116		pfr_insert_ktable(kt);
2117	}
2118	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2119		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2120	return (kt);
2121}
2122
2123void
2124pfr_detach_table(struct pfr_ktable *kt)
2125{
2126	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2127		printf("pfr_detach_table: refcount = %d.\n",
2128		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2129	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2130		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2131}
2132
2133int
2134pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2135    struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2136{
2137	struct pfr_kentry	*ke, *ke2 = (void *)0xdeadb;
2138	struct pf_addr		*addr = (void *)0xdeadb;
2139	union sockaddr_union	 mask;
2140	int			 idx = -1, use_counter = 0;
2141
2142	if (af == AF_INET)
2143		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2144	else if (af == AF_INET6)
2145		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2146	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2147		kt = kt->pfrkt_root;
2148	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2149		return (-1);
2150
2151	if (pidx != NULL)
2152		idx = *pidx;
2153	if (counter != NULL && idx >= 0)
2154		use_counter = 1;
2155	if (idx < 0)
2156		idx = 0;
2157
2158_next_block:
2159	ke = pfr_kentry_byidx(kt, idx, af);
2160	if (ke == NULL)
2161		return (1);
2162	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2163	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2164	*rmask = SUNION2PF(&pfr_mask, af);
2165
2166	if (use_counter) {
2167		/* is supplied address within block? */
2168		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2169			/* no, go to next block in table */
2170			idx++;
2171			use_counter = 0;
2172			goto _next_block;
2173		}
2174		PF_ACPY(addr, counter, af);
2175	} else {
2176		/* use first address of block */
2177		PF_ACPY(addr, *raddr, af);
2178	}
2179
2180	if (!KENTRY_NETWORK(ke)) {
2181		/* this is a single IP address - no possible nested block */
2182		PF_ACPY(counter, addr, af);
2183		*pidx = idx;
2184		return (0);
2185	}
2186	for (;;) {
2187		/* we don't want to use a nested block */
2188		if (af == AF_INET)
2189			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2190			    kt->pfrkt_ip4);
2191		else if (af == AF_INET6)
2192			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2193			    kt->pfrkt_ip6);
2194		/* no need to check KENTRY_RNF_ROOT() here */
2195		if (ke2 == ke) {
2196			/* lookup return the same block - perfect */
2197			PF_ACPY(counter, addr, af);
2198			*pidx = idx;
2199			return (0);
2200		}
2201
2202		/* we need to increase the counter past the nested block */
2203		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2204		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2205		PF_AINC(addr, af);
2206		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2207			/* ok, we reached the end of our main block */
2208			/* go to next block in table */
2209			idx++;
2210			use_counter = 0;
2211			goto _next_block;
2212		}
2213	}
2214}
2215
2216struct pfr_kentry *
2217pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2218{
2219	struct pfr_walktree	w;
2220
2221	bzero(&w, sizeof(w));
2222	w.pfrw_op = PFRW_POOL_GET;
2223	w.pfrw_cnt = idx;
2224
2225	switch (af) {
2226#ifdef INET
2227	case AF_INET:
2228		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2229		return (w.pfrw_kentry);
2230#endif /* INET */
2231#ifdef INET6
2232	case AF_INET6:
2233		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2234		return (w.pfrw_kentry);
2235#endif /* INET6 */
2236	default:
2237		return (NULL);
2238	}
2239}
2240
2241void
2242pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2243{
2244	struct pfr_walktree	w;
2245	int			s;
2246
2247	bzero(&w, sizeof(w));
2248	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2249	w.pfrw_dyn = dyn;
2250
2251	s = splsoftnet();
2252	dyn->pfid_acnt4 = 0;
2253	dyn->pfid_acnt6 = 0;
2254	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2255		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2256	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2257		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2258	splx(s);
2259}
2260