Deleted Added
full compact
pf_table.c (236364) pf_table.c (240233)
1/* $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ */
2
3/*
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 16 unchanged lines hidden (view full) ---

25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
1/* $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ */
2
3/*
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 16 unchanged lines hidden (view full) ---

25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33#ifdef __FreeBSD__
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/contrib/pf/net/pf_table.c 240233 2012-09-08 06:41:54Z glebius $");
35
34#include "opt_inet.h"
35#include "opt_inet6.h"
36
36#include "opt_inet.h"
37#include "opt_inet6.h"
38
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/contrib/pf/net/pf_table.c 236364 2012-05-31 20:10:05Z eri $");
39#endif
40
41#include <sys/param.h>
39#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/socket.h>
44#include <sys/mbuf.h>
45#include <sys/kernel.h>
40#include <sys/kernel.h>
46#ifdef __FreeBSD__
41#include <sys/lock.h>
47#include <sys/malloc.h>
42#include <sys/malloc.h>
48#else
49#include <sys/pool.h>
50#endif
43#include <sys/mutex.h>
44#include <sys/refcount.h>
45#include <sys/rwlock.h>
46#include <sys/socket.h>
47#include <vm/uma.h>
51
52#include <net/if.h>
48
49#include <net/if.h>
53#include <net/route.h>
54#include <netinet/in.h>
55#ifndef __FreeBSD__
56#include <netinet/ip_ipsp.h>
57#endif
50#include <net/vnet.h>
58#include <net/pfvar.h>
59
60#define ACCEPT_FLAGS(flags, oklist) \
61 do { \
62 if ((flags & ~(oklist)) & \
63 PFR_FLAG_ALLMASK) \
64 return (EINVAL); \
65 } while (0)
66
51#include <net/pfvar.h>
52
53#define ACCEPT_FLAGS(flags, oklist) \
54 do { \
55 if ((flags & ~(oklist)) & \
56 PFR_FLAG_ALLMASK) \
57 return (EINVAL); \
58 } while (0)
59
67#ifdef __FreeBSD__
68static inline int
69_copyin(const void *uaddr, void *kaddr, size_t len)
70{
71 int r;
72
73 PF_UNLOCK();
74 r = copyin(uaddr, kaddr, len);
75 PF_LOCK();
76
77 return (r);
78}
79
80static inline int
81_copyout(const void *uaddr, void *kaddr, size_t len)
82{
83 int r;
84
85 PF_UNLOCK();
86 r = copyout(uaddr, kaddr, len);
87 PF_LOCK();
88
89 return (r);
90}
91
92#define COPYIN(from, to, size, flags) \
93 ((flags & PFR_FLAG_USERIOCTL) ? \
94 _copyin((from), (to), (size)) : \
95 (bcopy((from), (to), (size)), 0))
96
97#define COPYOUT(from, to, size, flags) \
98 ((flags & PFR_FLAG_USERIOCTL) ? \
99 _copyout((from), (to), (size)) : \
100 (bcopy((from), (to), (size)), 0))
101
102#else
103#define COPYIN(from, to, size, flags) \
104 ((flags & PFR_FLAG_USERIOCTL) ? \
105 copyin((from), (to), (size)) : \
106 (bcopy((from), (to), (size)), 0))
107
108#define COPYOUT(from, to, size, flags) \
109 ((flags & PFR_FLAG_USERIOCTL) ? \
110 copyout((from), (to), (size)) : \
111 (bcopy((from), (to), (size)), 0))
112#endif
113
114#define FILLIN_SIN(sin, addr) \
115 do { \
116 (sin).sin_len = sizeof(sin); \
117 (sin).sin_family = AF_INET; \
118 (sin).sin_addr = (addr); \
119 } while (0)
120
121#define FILLIN_SIN6(sin6, addr) \

--- 37 unchanged lines hidden (view full) ---

159 union {
160 struct pfr_addr *pfrw1_addr;
161 struct pfr_astats *pfrw1_astats;
162 struct pfr_kentryworkq *pfrw1_workq;
163 struct pfr_kentry *pfrw1_kentry;
164 struct pfi_dynaddr *pfrw1_dyn;
165 } pfrw_1;
166 int pfrw_free;
60#define FILLIN_SIN(sin, addr) \
61 do { \
62 (sin).sin_len = sizeof(sin); \
63 (sin).sin_family = AF_INET; \
64 (sin).sin_addr = (addr); \
65 } while (0)
66
67#define FILLIN_SIN6(sin6, addr) \

--- 37 unchanged lines hidden (view full) ---

105 union {
106 struct pfr_addr *pfrw1_addr;
107 struct pfr_astats *pfrw1_astats;
108 struct pfr_kentryworkq *pfrw1_workq;
109 struct pfr_kentry *pfrw1_kentry;
110 struct pfi_dynaddr *pfrw1_dyn;
111 } pfrw_1;
112 int pfrw_free;
167 int pfrw_flags;
168};
169#define pfrw_addr pfrw_1.pfrw1_addr
170#define pfrw_astats pfrw_1.pfrw1_astats
171#define pfrw_workq pfrw_1.pfrw1_workq
172#define pfrw_kentry pfrw_1.pfrw1_kentry
173#define pfrw_dyn pfrw_1.pfrw1_dyn
174#define pfrw_cnt pfrw_free
175
176#define senderr(e) do { rv = (e); goto _bad; } while (0)
177
113};
114#define pfrw_addr pfrw_1.pfrw1_addr
115#define pfrw_astats pfrw_1.pfrw1_astats
116#define pfrw_workq pfrw_1.pfrw1_workq
117#define pfrw_kentry pfrw_1.pfrw1_kentry
118#define pfrw_dyn pfrw_1.pfrw1_dyn
119#define pfrw_cnt pfrw_free
120
121#define senderr(e) do { rv = (e); goto _bad; } while (0)
122
178#ifdef __FreeBSD__
179VNET_DEFINE(uma_zone_t, pfr_ktable_pl);
180VNET_DEFINE(uma_zone_t, pfr_kentry_pl);
181VNET_DEFINE(uma_zone_t, pfr_kcounters_pl);
182VNET_DEFINE(struct sockaddr_in, pfr_sin);
183#define V_pfr_sin VNET(pfr_sin)
184VNET_DEFINE(struct sockaddr_in6, pfr_sin6);
185#define V_pfr_sin6 VNET(pfr_sin6)
186VNET_DEFINE(union sockaddr_union, pfr_mask);
187#define V_pfr_mask VNET(pfr_mask)
188VNET_DEFINE(struct pf_addr, pfr_ffaddr);
189#define V_pfr_ffaddr VNET(pfr_ffaddr)
190#else
191struct pool pfr_ktable_pl;
192struct pool pfr_kentry_pl;
193struct pool pfr_kcounters_pl;
194struct sockaddr_in pfr_sin;
195struct sockaddr_in6 pfr_sin6;
196union sockaddr_union pfr_mask;
197struct pf_addr pfr_ffaddr;
198#endif
123static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
124static VNET_DEFINE(uma_zone_t, pfr_kentry_z);
125#define V_pfr_kentry_z VNET(pfr_kentry_z)
126static VNET_DEFINE(uma_zone_t, pfr_kcounters_z);
127#define V_pfr_kcounters_z VNET(pfr_kcounters_z)
199
128
200void pfr_copyout_addr(struct pfr_addr *,
129static struct pf_addr pfr_ffaddr = {
130 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
131};
132
133static void pfr_copyout_addr(struct pfr_addr *,
201 struct pfr_kentry *ke);
134 struct pfr_kentry *ke);
202int pfr_validate_addr(struct pfr_addr *);
203void pfr_enqueue_addrs(struct pfr_ktable *,
135static int pfr_validate_addr(struct pfr_addr *);
136static void pfr_enqueue_addrs(struct pfr_ktable *,
204 struct pfr_kentryworkq *, int *, int);
137 struct pfr_kentryworkq *, int *, int);
205void pfr_mark_addrs(struct pfr_ktable *);
206struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
138static void pfr_mark_addrs(struct pfr_ktable *);
139static struct pfr_kentry
140 *pfr_lookup_addr(struct pfr_ktable *,
207 struct pfr_addr *, int);
141 struct pfr_addr *, int);
208struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
209void pfr_destroy_kentries(struct pfr_kentryworkq *);
210void pfr_destroy_kentry(struct pfr_kentry *);
211void pfr_insert_kentries(struct pfr_ktable *,
142static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
143static void pfr_destroy_kentries(struct pfr_kentryworkq *);
144static void pfr_destroy_kentry(struct pfr_kentry *);
145static void pfr_insert_kentries(struct pfr_ktable *,
212 struct pfr_kentryworkq *, long);
146 struct pfr_kentryworkq *, long);
213void pfr_remove_kentries(struct pfr_ktable *,
147static void pfr_remove_kentries(struct pfr_ktable *,
214 struct pfr_kentryworkq *);
148 struct pfr_kentryworkq *);
215void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
149static void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
216 int);
150 int);
217void pfr_reset_feedback(struct pfr_addr *, int, int);
218void pfr_prepare_network(union sockaddr_union *, int, int);
219int pfr_route_kentry(struct pfr_ktable *,
151static void pfr_reset_feedback(struct pfr_addr *, int);
152static void pfr_prepare_network(union sockaddr_union *, int, int);
153static int pfr_route_kentry(struct pfr_ktable *,
220 struct pfr_kentry *);
154 struct pfr_kentry *);
221int pfr_unroute_kentry(struct pfr_ktable *,
155static int pfr_unroute_kentry(struct pfr_ktable *,
222 struct pfr_kentry *);
156 struct pfr_kentry *);
223int pfr_walktree(struct radix_node *, void *);
224int pfr_validate_table(struct pfr_table *, int, int);
225int pfr_fix_anchor(char *);
226void pfr_commit_ktable(struct pfr_ktable *, long);
227void pfr_insert_ktables(struct pfr_ktableworkq *);
228void pfr_insert_ktable(struct pfr_ktable *);
229void pfr_setflags_ktables(struct pfr_ktableworkq *);
230void pfr_setflags_ktable(struct pfr_ktable *, int);
231void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
157static int pfr_walktree(struct radix_node *, void *);
158static int pfr_validate_table(struct pfr_table *, int, int);
159static int pfr_fix_anchor(char *);
160static void pfr_commit_ktable(struct pfr_ktable *, long);
161static void pfr_insert_ktables(struct pfr_ktableworkq *);
162static void pfr_insert_ktable(struct pfr_ktable *);
163static void pfr_setflags_ktables(struct pfr_ktableworkq *);
164static void pfr_setflags_ktable(struct pfr_ktable *, int);
165static void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
232 int);
166 int);
233void pfr_clstats_ktable(struct pfr_ktable *, long, int);
234struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int, int);
235void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
236void pfr_destroy_ktable(struct pfr_ktable *, int);
237int pfr_ktable_compare(struct pfr_ktable *,
167static void pfr_clstats_ktable(struct pfr_ktable *, long, int);
168static struct pfr_ktable
169 *pfr_create_ktable(struct pfr_table *, long, int);
170static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
171static void pfr_destroy_ktable(struct pfr_ktable *, int);
172static int pfr_ktable_compare(struct pfr_ktable *,
238 struct pfr_ktable *);
173 struct pfr_ktable *);
239struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
240void pfr_clean_node_mask(struct pfr_ktable *,
174static struct pfr_ktable
175 *pfr_lookup_table(struct pfr_table *);
176static void pfr_clean_node_mask(struct pfr_ktable *,
241 struct pfr_kentryworkq *);
177 struct pfr_kentryworkq *);
242int pfr_table_count(struct pfr_table *, int);
243int pfr_skip_table(struct pfr_table *,
178static int pfr_table_count(struct pfr_table *, int);
179static int pfr_skip_table(struct pfr_table *,
244 struct pfr_ktable *, int);
180 struct pfr_ktable *, int);
245struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
181static struct pfr_kentry
182 *pfr_kentry_byidx(struct pfr_ktable *, int, int);
246
183
247RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
248RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
184static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
185static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
249
250struct pfr_ktablehead pfr_ktables;
251struct pfr_table pfr_nulltable;
252int pfr_ktable_cnt;
253
254void
255pfr_initialize(void)
256{
186
187struct pfr_ktablehead pfr_ktables;
188struct pfr_table pfr_nulltable;
189int pfr_ktable_cnt;
190
191void
192pfr_initialize(void)
193{
257#ifndef __FreeBSD__
258 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
259 "pfrktable", NULL);
260 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
261 "pfrkentry", NULL);
262 pool_init(&pfr_kcounters_pl, sizeof(struct pfr_kcounters), 0, 0, 0,
263 "pfrkcounters", NULL);
264
194
265 pfr_sin.sin_len = sizeof(pfr_sin);
266 pfr_sin.sin_family = AF_INET;
267 pfr_sin6.sin6_len = sizeof(pfr_sin6);
268 pfr_sin6.sin6_family = AF_INET6;
195 V_pfr_kentry_z = uma_zcreate("pf table entries",
196 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
197 0);
198 V_pfr_kcounters_z = uma_zcreate("pf table counters",
199 sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL,
200 UMA_ALIGN_PTR, 0);
201 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
202 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
203}
269
204
270 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
271#else
272 V_pfr_sin.sin_len = sizeof(V_pfr_sin);
273 V_pfr_sin.sin_family = AF_INET;
274 V_pfr_sin6.sin6_len = sizeof(V_pfr_sin6);
275 V_pfr_sin6.sin6_family = AF_INET6;
205void
206pfr_cleanup(void)
207{
276
208
277 memset(&V_pfr_ffaddr, 0xff, sizeof(V_pfr_ffaddr));
278#endif
209 uma_zdestroy(V_pfr_kentry_z);
210 uma_zdestroy(V_pfr_kcounters_z);
279}
280
281int
282pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
283{
284 struct pfr_ktable *kt;
285 struct pfr_kentryworkq workq;
211}
212
213int
214pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
215{
216 struct pfr_ktable *kt;
217 struct pfr_kentryworkq workq;
286 int s;
287
218
288 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
219 PF_RULES_WASSERT();
220
221 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
289 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
290 return (EINVAL);
291 kt = pfr_lookup_table(tbl);
292 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
293 return (ESRCH);
294 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
295 return (EPERM);
296 pfr_enqueue_addrs(kt, &workq, ndel, 0);
297
298 if (!(flags & PFR_FLAG_DUMMY)) {
222 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
223 return (EINVAL);
224 kt = pfr_lookup_table(tbl);
225 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
226 return (ESRCH);
227 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
228 return (EPERM);
229 pfr_enqueue_addrs(kt, &workq, ndel, 0);
230
231 if (!(flags & PFR_FLAG_DUMMY)) {
299 if (flags & PFR_FLAG_ATOMIC)
300 s = splsoftnet();
301 pfr_remove_kentries(kt, &workq);
232 pfr_remove_kentries(kt, &workq);
302 if (flags & PFR_FLAG_ATOMIC)
303 splx(s);
304 if (kt->pfrkt_cnt) {
305 printf("pfr_clr_addrs: corruption detected (%d).\n",
306 kt->pfrkt_cnt);
307 kt->pfrkt_cnt = 0;
308 }
233 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
309 }
310 return (0);
311}
312
313int
314pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
315 int *nadd, int flags)
316{
317 struct pfr_ktable *kt, *tmpkt;
318 struct pfr_kentryworkq workq;
319 struct pfr_kentry *p, *q;
234 }
235 return (0);
236}
237
238int
239pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
240 int *nadd, int flags)
241{
242 struct pfr_ktable *kt, *tmpkt;
243 struct pfr_kentryworkq workq;
244 struct pfr_kentry *p, *q;
320 struct pfr_addr ad;
321 int i, rv, s, xadd = 0;
245 struct pfr_addr *ad;
246 int i, rv, xadd = 0;
322 long tzero = time_second;
323
247 long tzero = time_second;
248
324 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
325 PFR_FLAG_FEEDBACK);
249 PF_RULES_WASSERT();
250
251 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
326 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
327 return (EINVAL);
328 kt = pfr_lookup_table(tbl);
329 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
330 return (ESRCH);
331 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
332 return (EPERM);
252 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
253 return (EINVAL);
254 kt = pfr_lookup_table(tbl);
255 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
256 return (ESRCH);
257 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
258 return (EPERM);
333 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
334 !(flags & PFR_FLAG_USERIOCTL));
259 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
335 if (tmpkt == NULL)
336 return (ENOMEM);
337 SLIST_INIT(&workq);
260 if (tmpkt == NULL)
261 return (ENOMEM);
262 SLIST_INIT(&workq);
338 for (i = 0; i < size; i++) {
339 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
340 senderr(EFAULT);
341 if (pfr_validate_addr(&ad))
263 for (i = 0, ad = addr; i < size; i++, ad++) {
264 if (pfr_validate_addr(ad))
342 senderr(EINVAL);
265 senderr(EINVAL);
343 p = pfr_lookup_addr(kt, &ad, 1);
344 q = pfr_lookup_addr(tmpkt, &ad, 1);
266 p = pfr_lookup_addr(kt, ad, 1);
267 q = pfr_lookup_addr(tmpkt, ad, 1);
345 if (flags & PFR_FLAG_FEEDBACK) {
346 if (q != NULL)
268 if (flags & PFR_FLAG_FEEDBACK) {
269 if (q != NULL)
347 ad.pfra_fback = PFR_FB_DUPLICATE;
270 ad->pfra_fback = PFR_FB_DUPLICATE;
348 else if (p == NULL)
271 else if (p == NULL)
349 ad.pfra_fback = PFR_FB_ADDED;
350 else if (p->pfrke_not != ad.pfra_not)
351 ad.pfra_fback = PFR_FB_CONFLICT;
272 ad->pfra_fback = PFR_FB_ADDED;
273 else if (p->pfrke_not != ad->pfra_not)
274 ad->pfra_fback = PFR_FB_CONFLICT;
352 else
275 else
353 ad.pfra_fback = PFR_FB_NONE;
276 ad->pfra_fback = PFR_FB_NONE;
354 }
355 if (p == NULL && q == NULL) {
277 }
278 if (p == NULL && q == NULL) {
356 p = pfr_create_kentry(&ad,
357 !(flags & PFR_FLAG_USERIOCTL));
279 p = pfr_create_kentry(ad);
358 if (p == NULL)
359 senderr(ENOMEM);
360 if (pfr_route_kentry(tmpkt, p)) {
361 pfr_destroy_kentry(p);
280 if (p == NULL)
281 senderr(ENOMEM);
282 if (pfr_route_kentry(tmpkt, p)) {
283 pfr_destroy_kentry(p);
362 ad.pfra_fback = PFR_FB_NONE;
284 ad->pfra_fback = PFR_FB_NONE;
363 } else {
364 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
365 xadd++;
366 }
367 }
285 } else {
286 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
287 xadd++;
288 }
289 }
368 if (flags & PFR_FLAG_FEEDBACK)
369 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
370 senderr(EFAULT);
371 }
372 pfr_clean_node_mask(tmpkt, &workq);
290 }
291 pfr_clean_node_mask(tmpkt, &workq);
373 if (!(flags & PFR_FLAG_DUMMY)) {
374 if (flags & PFR_FLAG_ATOMIC)
375 s = splsoftnet();
292 if (!(flags & PFR_FLAG_DUMMY))
376 pfr_insert_kentries(kt, &workq, tzero);
293 pfr_insert_kentries(kt, &workq, tzero);
377 if (flags & PFR_FLAG_ATOMIC)
378 splx(s);
379 } else
294 else
380 pfr_destroy_kentries(&workq);
381 if (nadd != NULL)
382 *nadd = xadd;
383 pfr_destroy_ktable(tmpkt, 0);
384 return (0);
385_bad:
386 pfr_clean_node_mask(tmpkt, &workq);
387 pfr_destroy_kentries(&workq);
388 if (flags & PFR_FLAG_FEEDBACK)
295 pfr_destroy_kentries(&workq);
296 if (nadd != NULL)
297 *nadd = xadd;
298 pfr_destroy_ktable(tmpkt, 0);
299 return (0);
300_bad:
301 pfr_clean_node_mask(tmpkt, &workq);
302 pfr_destroy_kentries(&workq);
303 if (flags & PFR_FLAG_FEEDBACK)
389 pfr_reset_feedback(addr, size, flags);
304 pfr_reset_feedback(addr, size);
390 pfr_destroy_ktable(tmpkt, 0);
391 return (rv);
392}
393
394int
395pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
396 int *ndel, int flags)
397{
398 struct pfr_ktable *kt;
399 struct pfr_kentryworkq workq;
400 struct pfr_kentry *p;
305 pfr_destroy_ktable(tmpkt, 0);
306 return (rv);
307}
308
309int
310pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
311 int *ndel, int flags)
312{
313 struct pfr_ktable *kt;
314 struct pfr_kentryworkq workq;
315 struct pfr_kentry *p;
401 struct pfr_addr ad;
402 int i, rv, s, xdel = 0, log = 1;
316 struct pfr_addr *ad;
317 int i, rv, xdel = 0, log = 1;
403
318
404 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
405 PFR_FLAG_FEEDBACK);
319 PF_RULES_WASSERT();
320
321 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
406 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
407 return (EINVAL);
408 kt = pfr_lookup_table(tbl);
409 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
410 return (ESRCH);
411 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
412 return (EPERM);
413 /*
414 * there are two algorithms to choose from here.
415 * with:
416 * n: number of addresses to delete
417 * N: number of addresses in the table
418 *
419 * one is O(N) and is better for large 'n'
420 * one is O(n*LOG(N)) and is better for small 'n'
322 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
323 return (EINVAL);
324 kt = pfr_lookup_table(tbl);
325 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
326 return (ESRCH);
327 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
328 return (EPERM);
329 /*
330 * there are two algorithms to choose from here.
331 * with:
332 * n: number of addresses to delete
333 * N: number of addresses in the table
334 *
335 * one is O(N) and is better for large 'n'
336 * one is O(n*LOG(N)) and is better for small 'n'
421 *
337 *
422 * following code try to decide which one is best.
423 */
424 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
425 log++;
426 if (size > kt->pfrkt_cnt/log) {
427 /* full table scan */
428 pfr_mark_addrs(kt);
429 } else {
430 /* iterate over addresses to delete */
338 * following code try to decide which one is best.
339 */
340 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
341 log++;
342 if (size > kt->pfrkt_cnt/log) {
343 /* full table scan */
344 pfr_mark_addrs(kt);
345 } else {
346 /* iterate over addresses to delete */
431 for (i = 0; i < size; i++) {
432 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
433 return (EFAULT);
434 if (pfr_validate_addr(&ad))
347 for (i = 0, ad = addr; i < size; i++, ad++) {
348 if (pfr_validate_addr(ad))
435 return (EINVAL);
349 return (EINVAL);
436 p = pfr_lookup_addr(kt, &ad, 1);
350 p = pfr_lookup_addr(kt, ad, 1);
437 if (p != NULL)
438 p->pfrke_mark = 0;
439 }
440 }
441 SLIST_INIT(&workq);
351 if (p != NULL)
352 p->pfrke_mark = 0;
353 }
354 }
355 SLIST_INIT(&workq);
442 for (i = 0; i < size; i++) {
443 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
444 senderr(EFAULT);
445 if (pfr_validate_addr(&ad))
356 for (i = 0, ad = addr; i < size; i++, ad++) {
357 if (pfr_validate_addr(ad))
446 senderr(EINVAL);
358 senderr(EINVAL);
447 p = pfr_lookup_addr(kt, &ad, 1);
359 p = pfr_lookup_addr(kt, ad, 1);
448 if (flags & PFR_FLAG_FEEDBACK) {
449 if (p == NULL)
360 if (flags & PFR_FLAG_FEEDBACK) {
361 if (p == NULL)
450 ad.pfra_fback = PFR_FB_NONE;
451 else if (p->pfrke_not != ad.pfra_not)
452 ad.pfra_fback = PFR_FB_CONFLICT;
362 ad->pfra_fback = PFR_FB_NONE;
363 else if (p->pfrke_not != ad->pfra_not)
364 ad->pfra_fback = PFR_FB_CONFLICT;
453 else if (p->pfrke_mark)
365 else if (p->pfrke_mark)
454 ad.pfra_fback = PFR_FB_DUPLICATE;
366 ad->pfra_fback = PFR_FB_DUPLICATE;
455 else
367 else
456 ad.pfra_fback = PFR_FB_DELETED;
368 ad->pfra_fback = PFR_FB_DELETED;
457 }
369 }
458 if (p != NULL && p->pfrke_not == ad.pfra_not &&
370 if (p != NULL && p->pfrke_not == ad->pfra_not &&
459 !p->pfrke_mark) {
460 p->pfrke_mark = 1;
461 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
462 xdel++;
463 }
371 !p->pfrke_mark) {
372 p->pfrke_mark = 1;
373 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
374 xdel++;
375 }
464 if (flags & PFR_FLAG_FEEDBACK)
465 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
466 senderr(EFAULT);
467 }
376 }
468 if (!(flags & PFR_FLAG_DUMMY)) {
469 if (flags & PFR_FLAG_ATOMIC)
470 s = splsoftnet();
377 if (!(flags & PFR_FLAG_DUMMY))
471 pfr_remove_kentries(kt, &workq);
378 pfr_remove_kentries(kt, &workq);
472 if (flags & PFR_FLAG_ATOMIC)
473 splx(s);
474 }
475 if (ndel != NULL)
476 *ndel = xdel;
477 return (0);
478_bad:
479 if (flags & PFR_FLAG_FEEDBACK)
379 if (ndel != NULL)
380 *ndel = xdel;
381 return (0);
382_bad:
383 if (flags & PFR_FLAG_FEEDBACK)
480 pfr_reset_feedback(addr, size, flags);
384 pfr_reset_feedback(addr, size);
481 return (rv);
482}
483
484int
485pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
486 int *size2, int *nadd, int *ndel, int *nchange, int flags,
487 u_int32_t ignore_pfrt_flags)
488{
489 struct pfr_ktable *kt, *tmpkt;
490 struct pfr_kentryworkq addq, delq, changeq;
491 struct pfr_kentry *p, *q;
492 struct pfr_addr ad;
385 return (rv);
386}
387
388int
389pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
390 int *size2, int *nadd, int *ndel, int *nchange, int flags,
391 u_int32_t ignore_pfrt_flags)
392{
393 struct pfr_ktable *kt, *tmpkt;
394 struct pfr_kentryworkq addq, delq, changeq;
395 struct pfr_kentry *p, *q;
396 struct pfr_addr ad;
493 int i, rv, s, xadd = 0, xdel = 0, xchange = 0;
397 int i, rv, xadd = 0, xdel = 0, xchange = 0;
494 long tzero = time_second;
495
398 long tzero = time_second;
399
496 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
497 PFR_FLAG_FEEDBACK);
400 PF_RULES_WASSERT();
401
402 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
498 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
499 PFR_FLAG_USERIOCTL))
500 return (EINVAL);
501 kt = pfr_lookup_table(tbl);
502 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
503 return (ESRCH);
504 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
505 return (EPERM);
403 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
404 PFR_FLAG_USERIOCTL))
405 return (EINVAL);
406 kt = pfr_lookup_table(tbl);
407 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
408 return (ESRCH);
409 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
410 return (EPERM);
506 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
507 !(flags & PFR_FLAG_USERIOCTL));
411 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
508 if (tmpkt == NULL)
509 return (ENOMEM);
510 pfr_mark_addrs(kt);
511 SLIST_INIT(&addq);
512 SLIST_INIT(&delq);
513 SLIST_INIT(&changeq);
514 for (i = 0; i < size; i++) {
412 if (tmpkt == NULL)
413 return (ENOMEM);
414 pfr_mark_addrs(kt);
415 SLIST_INIT(&addq);
416 SLIST_INIT(&delq);
417 SLIST_INIT(&changeq);
418 for (i = 0; i < size; i++) {
515 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
516 senderr(EFAULT);
419 /*
420 * XXXGL: undertand pf_if usage of this function
421 * and make ad a moving pointer
422 */
423 bcopy(addr + i, &ad, sizeof(ad));
517 if (pfr_validate_addr(&ad))
518 senderr(EINVAL);
519 ad.pfra_fback = PFR_FB_NONE;
520 p = pfr_lookup_addr(kt, &ad, 1);
521 if (p != NULL) {
522 if (p->pfrke_mark) {
523 ad.pfra_fback = PFR_FB_DUPLICATE;
524 goto _skip;

--- 5 unchanged lines hidden (view full) ---

530 xchange++;
531 }
532 } else {
533 q = pfr_lookup_addr(tmpkt, &ad, 1);
534 if (q != NULL) {
535 ad.pfra_fback = PFR_FB_DUPLICATE;
536 goto _skip;
537 }
424 if (pfr_validate_addr(&ad))
425 senderr(EINVAL);
426 ad.pfra_fback = PFR_FB_NONE;
427 p = pfr_lookup_addr(kt, &ad, 1);
428 if (p != NULL) {
429 if (p->pfrke_mark) {
430 ad.pfra_fback = PFR_FB_DUPLICATE;
431 goto _skip;

--- 5 unchanged lines hidden (view full) ---

437 xchange++;
438 }
439 } else {
440 q = pfr_lookup_addr(tmpkt, &ad, 1);
441 if (q != NULL) {
442 ad.pfra_fback = PFR_FB_DUPLICATE;
443 goto _skip;
444 }
538 p = pfr_create_kentry(&ad,
539 !(flags & PFR_FLAG_USERIOCTL));
445 p = pfr_create_kentry(&ad);
540 if (p == NULL)
541 senderr(ENOMEM);
542 if (pfr_route_kentry(tmpkt, p)) {
543 pfr_destroy_kentry(p);
544 ad.pfra_fback = PFR_FB_NONE;
545 } else {
546 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
547 ad.pfra_fback = PFR_FB_ADDED;
548 xadd++;
549 }
550 }
551_skip:
552 if (flags & PFR_FLAG_FEEDBACK)
446 if (p == NULL)
447 senderr(ENOMEM);
448 if (pfr_route_kentry(tmpkt, p)) {
449 pfr_destroy_kentry(p);
450 ad.pfra_fback = PFR_FB_NONE;
451 } else {
452 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
453 ad.pfra_fback = PFR_FB_ADDED;
454 xadd++;
455 }
456 }
457_skip:
458 if (flags & PFR_FLAG_FEEDBACK)
553 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
554 senderr(EFAULT);
459 bcopy(&ad, addr + i, sizeof(ad));
555 }
556 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
557 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
558 if (*size2 < size+xdel) {
559 *size2 = size+xdel;
560 senderr(0);
561 }
562 i = 0;
563 SLIST_FOREACH(p, &delq, pfrke_workq) {
564 pfr_copyout_addr(&ad, p);
565 ad.pfra_fback = PFR_FB_DELETED;
460 }
461 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
462 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
463 if (*size2 < size+xdel) {
464 *size2 = size+xdel;
465 senderr(0);
466 }
467 i = 0;
468 SLIST_FOREACH(p, &delq, pfrke_workq) {
469 pfr_copyout_addr(&ad, p);
470 ad.pfra_fback = PFR_FB_DELETED;
566 if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
567 senderr(EFAULT);
471 bcopy(&ad, addr + size + i, sizeof(ad));
568 i++;
569 }
570 }
571 pfr_clean_node_mask(tmpkt, &addq);
572 if (!(flags & PFR_FLAG_DUMMY)) {
472 i++;
473 }
474 }
475 pfr_clean_node_mask(tmpkt, &addq);
476 if (!(flags & PFR_FLAG_DUMMY)) {
573 if (flags & PFR_FLAG_ATOMIC)
574 s = splsoftnet();
575 pfr_insert_kentries(kt, &addq, tzero);
576 pfr_remove_kentries(kt, &delq);
577 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
477 pfr_insert_kentries(kt, &addq, tzero);
478 pfr_remove_kentries(kt, &delq);
479 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
578 if (flags & PFR_FLAG_ATOMIC)
579 splx(s);
580 } else
581 pfr_destroy_kentries(&addq);
582 if (nadd != NULL)
583 *nadd = xadd;
584 if (ndel != NULL)
585 *ndel = xdel;
586 if (nchange != NULL)
587 *nchange = xchange;
588 if ((flags & PFR_FLAG_FEEDBACK) && size2)
589 *size2 = size+xdel;
590 pfr_destroy_ktable(tmpkt, 0);
591 return (0);
592_bad:
593 pfr_clean_node_mask(tmpkt, &addq);
594 pfr_destroy_kentries(&addq);
595 if (flags & PFR_FLAG_FEEDBACK)
480 } else
481 pfr_destroy_kentries(&addq);
482 if (nadd != NULL)
483 *nadd = xadd;
484 if (ndel != NULL)
485 *ndel = xdel;
486 if (nchange != NULL)
487 *nchange = xchange;
488 if ((flags & PFR_FLAG_FEEDBACK) && size2)
489 *size2 = size+xdel;
490 pfr_destroy_ktable(tmpkt, 0);
491 return (0);
492_bad:
493 pfr_clean_node_mask(tmpkt, &addq);
494 pfr_destroy_kentries(&addq);
495 if (flags & PFR_FLAG_FEEDBACK)
596 pfr_reset_feedback(addr, size, flags);
496 pfr_reset_feedback(addr, size);
597 pfr_destroy_ktable(tmpkt, 0);
598 return (rv);
599}
600
601int
602pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
603 int *nmatch, int flags)
604{
605 struct pfr_ktable *kt;
606 struct pfr_kentry *p;
497 pfr_destroy_ktable(tmpkt, 0);
498 return (rv);
499}
500
501int
502pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
503 int *nmatch, int flags)
504{
505 struct pfr_ktable *kt;
506 struct pfr_kentry *p;
607 struct pfr_addr ad;
507 struct pfr_addr *ad;
608 int i, xmatch = 0;
609
508 int i, xmatch = 0;
509
510 PF_RULES_RASSERT();
511
610 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
611 if (pfr_validate_table(tbl, 0, 0))
612 return (EINVAL);
613 kt = pfr_lookup_table(tbl);
614 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
615 return (ESRCH);
616
512 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
513 if (pfr_validate_table(tbl, 0, 0))
514 return (EINVAL);
515 kt = pfr_lookup_table(tbl);
516 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
517 return (ESRCH);
518
617 for (i = 0; i < size; i++) {
618 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
619 return (EFAULT);
620 if (pfr_validate_addr(&ad))
519 for (i = 0, ad = addr; i < size; i++, ad++) {
520 if (pfr_validate_addr(ad))
621 return (EINVAL);
521 return (EINVAL);
622 if (ADDR_NETWORK(&ad))
522 if (ADDR_NETWORK(ad))
623 return (EINVAL);
523 return (EINVAL);
624 p = pfr_lookup_addr(kt, &ad, 0);
524 p = pfr_lookup_addr(kt, ad, 0);
625 if (flags & PFR_FLAG_REPLACE)
525 if (flags & PFR_FLAG_REPLACE)
626 pfr_copyout_addr(&ad, p);
627 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
526 pfr_copyout_addr(ad, p);
527 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
628 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
629 if (p != NULL && !p->pfrke_not)
630 xmatch++;
528 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
529 if (p != NULL && !p->pfrke_not)
530 xmatch++;
631 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
632 return (EFAULT);
633 }
634 if (nmatch != NULL)
635 *nmatch = xmatch;
636 return (0);
637}
638
639int
640pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
641 int flags)
642{
643 struct pfr_ktable *kt;
644 struct pfr_walktree w;
645 int rv;
646
531 }
532 if (nmatch != NULL)
533 *nmatch = xmatch;
534 return (0);
535}
536
537int
538pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
539 int flags)
540{
541 struct pfr_ktable *kt;
542 struct pfr_walktree w;
543 int rv;
544
545 PF_RULES_RASSERT();
546
647 ACCEPT_FLAGS(flags, 0);
648 if (pfr_validate_table(tbl, 0, 0))
649 return (EINVAL);
650 kt = pfr_lookup_table(tbl);
651 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
652 return (ESRCH);
653 if (kt->pfrkt_cnt > *size) {
654 *size = kt->pfrkt_cnt;
655 return (0);
656 }
657
658 bzero(&w, sizeof(w));
659 w.pfrw_op = PFRW_GET_ADDRS;
660 w.pfrw_addr = addr;
661 w.pfrw_free = kt->pfrkt_cnt;
547 ACCEPT_FLAGS(flags, 0);
548 if (pfr_validate_table(tbl, 0, 0))
549 return (EINVAL);
550 kt = pfr_lookup_table(tbl);
551 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
552 return (ESRCH);
553 if (kt->pfrkt_cnt > *size) {
554 *size = kt->pfrkt_cnt;
555 return (0);
556 }
557
558 bzero(&w, sizeof(w));
559 w.pfrw_op = PFRW_GET_ADDRS;
560 w.pfrw_addr = addr;
561 w.pfrw_free = kt->pfrkt_cnt;
662 w.pfrw_flags = flags;
663#ifdef __FreeBSD__
664 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
562 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
665#else
666 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
667#endif
668 if (!rv)
563 if (!rv)
669#ifdef __FreeBSD__
670 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
671 &w);
564 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
565 &w);
672#else
673 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
674#endif
675 if (rv)
676 return (rv);
677
566 if (rv)
567 return (rv);
568
678 if (w.pfrw_free) {
679 printf("pfr_get_addrs: corruption detected (%d).\n",
680 w.pfrw_free);
681 return (ENOTTY);
682 }
569 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
570 w.pfrw_free));
571
683 *size = kt->pfrkt_cnt;
684 return (0);
685}
686
687int
688pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
689 int flags)
690{
691 struct pfr_ktable *kt;
692 struct pfr_walktree w;
693 struct pfr_kentryworkq workq;
572 *size = kt->pfrkt_cnt;
573 return (0);
574}
575
576int
577pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
578 int flags)
579{
580 struct pfr_ktable *kt;
581 struct pfr_walktree w;
582 struct pfr_kentryworkq workq;
694 int rv, s;
583 int rv;
695 long tzero = time_second;
696
584 long tzero = time_second;
585
586 PF_RULES_RASSERT();
587
697 /* XXX PFR_FLAG_CLSTATS disabled */
588 /* XXX PFR_FLAG_CLSTATS disabled */
698 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
589 ACCEPT_FLAGS(flags, 0);
699 if (pfr_validate_table(tbl, 0, 0))
700 return (EINVAL);
701 kt = pfr_lookup_table(tbl);
702 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
703 return (ESRCH);
704 if (kt->pfrkt_cnt > *size) {
705 *size = kt->pfrkt_cnt;
706 return (0);
707 }
708
709 bzero(&w, sizeof(w));
710 w.pfrw_op = PFRW_GET_ASTATS;
711 w.pfrw_astats = addr;
712 w.pfrw_free = kt->pfrkt_cnt;
590 if (pfr_validate_table(tbl, 0, 0))
591 return (EINVAL);
592 kt = pfr_lookup_table(tbl);
593 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
594 return (ESRCH);
595 if (kt->pfrkt_cnt > *size) {
596 *size = kt->pfrkt_cnt;
597 return (0);
598 }
599
600 bzero(&w, sizeof(w));
601 w.pfrw_op = PFRW_GET_ASTATS;
602 w.pfrw_astats = addr;
603 w.pfrw_free = kt->pfrkt_cnt;
713 w.pfrw_flags = flags;
714 if (flags & PFR_FLAG_ATOMIC)
715 s = splsoftnet();
716#ifdef __FreeBSD__
717 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
604 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
718#else
719 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
720#endif
721 if (!rv)
605 if (!rv)
722#ifdef __FreeBSD__
723 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
606 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
724 &w);
607 &w);
725#else
726 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
727#endif
728 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
729 pfr_enqueue_addrs(kt, &workq, NULL, 0);
730 pfr_clstats_kentries(&workq, tzero, 0);
731 }
608 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
609 pfr_enqueue_addrs(kt, &workq, NULL, 0);
610 pfr_clstats_kentries(&workq, tzero, 0);
611 }
732 if (flags & PFR_FLAG_ATOMIC)
733 splx(s);
734 if (rv)
735 return (rv);
736
737 if (w.pfrw_free) {
738 printf("pfr_get_astats: corruption detected (%d).\n",
739 w.pfrw_free);
740 return (ENOTTY);
741 }
742 *size = kt->pfrkt_cnt;
743 return (0);
744}
745
746int
747pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
748 int *nzero, int flags)
749{
750 struct pfr_ktable *kt;
751 struct pfr_kentryworkq workq;
752 struct pfr_kentry *p;
612 if (rv)
613 return (rv);
614
615 if (w.pfrw_free) {
616 printf("pfr_get_astats: corruption detected (%d).\n",
617 w.pfrw_free);
618 return (ENOTTY);
619 }
620 *size = kt->pfrkt_cnt;
621 return (0);
622}
623
624int
625pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
626 int *nzero, int flags)
627{
628 struct pfr_ktable *kt;
629 struct pfr_kentryworkq workq;
630 struct pfr_kentry *p;
753 struct pfr_addr ad;
754 int i, rv, s, xzero = 0;
631 struct pfr_addr *ad;
632 int i, rv, xzero = 0;
755
633
756 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
757 PFR_FLAG_FEEDBACK);
634 PF_RULES_WASSERT();
635
636 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
758 if (pfr_validate_table(tbl, 0, 0))
759 return (EINVAL);
760 kt = pfr_lookup_table(tbl);
761 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
762 return (ESRCH);
763 SLIST_INIT(&workq);
637 if (pfr_validate_table(tbl, 0, 0))
638 return (EINVAL);
639 kt = pfr_lookup_table(tbl);
640 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
641 return (ESRCH);
642 SLIST_INIT(&workq);
764 for (i = 0; i < size; i++) {
765 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
766 senderr(EFAULT);
767 if (pfr_validate_addr(&ad))
643 for (i = 0, ad = addr; i < size; i++, ad++) {
644 if (pfr_validate_addr(ad))
768 senderr(EINVAL);
645 senderr(EINVAL);
769 p = pfr_lookup_addr(kt, &ad, 1);
646 p = pfr_lookup_addr(kt, ad, 1);
770 if (flags & PFR_FLAG_FEEDBACK) {
647 if (flags & PFR_FLAG_FEEDBACK) {
771 ad.pfra_fback = (p != NULL) ?
648 ad->pfra_fback = (p != NULL) ?
772 PFR_FB_CLEARED : PFR_FB_NONE;
649 PFR_FB_CLEARED : PFR_FB_NONE;
773 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
774 senderr(EFAULT);
775 }
776 if (p != NULL) {
777 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
778 xzero++;
779 }
780 }
781
650 }
651 if (p != NULL) {
652 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
653 xzero++;
654 }
655 }
656
782 if (!(flags & PFR_FLAG_DUMMY)) {
783 if (flags & PFR_FLAG_ATOMIC)
784 s = splsoftnet();
657 if (!(flags & PFR_FLAG_DUMMY))
785 pfr_clstats_kentries(&workq, 0, 0);
658 pfr_clstats_kentries(&workq, 0, 0);
786 if (flags & PFR_FLAG_ATOMIC)
787 splx(s);
788 }
789 if (nzero != NULL)
790 *nzero = xzero;
791 return (0);
792_bad:
793 if (flags & PFR_FLAG_FEEDBACK)
659 if (nzero != NULL)
660 *nzero = xzero;
661 return (0);
662_bad:
663 if (flags & PFR_FLAG_FEEDBACK)
794 pfr_reset_feedback(addr, size, flags);
664 pfr_reset_feedback(addr, size);
795 return (rv);
796}
797
665 return (rv);
666}
667
798int
668static int
799pfr_validate_addr(struct pfr_addr *ad)
800{
801 int i;
802
803 switch (ad->pfra_af) {
804#ifdef INET
805 case AF_INET:
806 if (ad->pfra_net > 32)

--- 17 unchanged lines hidden (view full) ---

824 return (-1);
825 if (ad->pfra_not && ad->pfra_not != 1)
826 return (-1);
827 if (ad->pfra_fback)
828 return (-1);
829 return (0);
830}
831
669pfr_validate_addr(struct pfr_addr *ad)
670{
671 int i;
672
673 switch (ad->pfra_af) {
674#ifdef INET
675 case AF_INET:
676 if (ad->pfra_net > 32)

--- 17 unchanged lines hidden (view full) ---

694 return (-1);
695 if (ad->pfra_not && ad->pfra_not != 1)
696 return (-1);
697 if (ad->pfra_fback)
698 return (-1);
699 return (0);
700}
701
832void
702static void
833pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
834 int *naddr, int sweep)
835{
836 struct pfr_walktree w;
837
838 SLIST_INIT(workq);
839 bzero(&w, sizeof(w));
840 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
841 w.pfrw_workq = workq;
842 if (kt->pfrkt_ip4 != NULL)
703pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
704 int *naddr, int sweep)
705{
706 struct pfr_walktree w;
707
708 SLIST_INIT(workq);
709 bzero(&w, sizeof(w));
710 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
711 w.pfrw_workq = workq;
712 if (kt->pfrkt_ip4 != NULL)
843#ifdef __FreeBSD__
844 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree,
713 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree,
845 &w))
714 &w))
846#else
847 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
848#endif
849 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
850 if (kt->pfrkt_ip6 != NULL)
715 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
716 if (kt->pfrkt_ip6 != NULL)
851#ifdef __FreeBSD__
852 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
717 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
853 &w))
718 &w))
854#else
855 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
856#endif
857 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
858 if (naddr != NULL)
859 *naddr = w.pfrw_cnt;
860}
861
719 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
720 if (naddr != NULL)
721 *naddr = w.pfrw_cnt;
722}
723
862void
724static void
863pfr_mark_addrs(struct pfr_ktable *kt)
864{
865 struct pfr_walktree w;
866
867 bzero(&w, sizeof(w));
868 w.pfrw_op = PFRW_MARK;
725pfr_mark_addrs(struct pfr_ktable *kt)
726{
727 struct pfr_walktree w;
728
729 bzero(&w, sizeof(w));
730 w.pfrw_op = PFRW_MARK;
869#ifdef __FreeBSD__
870 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
731 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
871#else
872 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
873#endif
874 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
732 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
875#ifdef __FreeBSD__
876 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
733 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
877#else
878 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
879#endif
880 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
881}
882
883
734 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
735}
736
737
884struct pfr_kentry *
738static struct pfr_kentry *
885pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
886{
887 union sockaddr_union sa, mask;
739pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
740{
741 union sockaddr_union sa, mask;
888#ifdef __FreeBSD__
889 struct radix_node_head *head = NULL;
742 struct radix_node_head *head = NULL;
890#else
891 struct radix_node_head *head;
892#endif
893 struct pfr_kentry *ke;
743 struct pfr_kentry *ke;
894 int s;
895
896 bzero(&sa, sizeof(sa));
897 if (ad->pfra_af == AF_INET) {
898 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
899 head = kt->pfrkt_ip4;
900 } else if ( ad->pfra_af == AF_INET6 ) {
901 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
902 head = kt->pfrkt_ip6;
903 }
904 if (ADDR_NETWORK(ad)) {
905 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
744
745 bzero(&sa, sizeof(sa));
746 if (ad->pfra_af == AF_INET) {
747 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
748 head = kt->pfrkt_ip4;
749 } else if ( ad->pfra_af == AF_INET6 ) {
750 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
751 head = kt->pfrkt_ip6;
752 }
753 if (ADDR_NETWORK(ad)) {
754 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
906 s = splsoftnet(); /* rn_lookup makes use of globals */
907#ifdef __FreeBSD__
908 PF_LOCK_ASSERT();
909#endif
910 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
755 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
911 splx(s);
912 if (ke && KENTRY_RNF_ROOT(ke))
913 ke = NULL;
914 } else {
915 ke = (struct pfr_kentry *)rn_match(&sa, head);
916 if (ke && KENTRY_RNF_ROOT(ke))
917 ke = NULL;
918 if (exact && ke && KENTRY_NETWORK(ke))
919 ke = NULL;
920 }
921 return (ke);
922}
923
756 if (ke && KENTRY_RNF_ROOT(ke))
757 ke = NULL;
758 } else {
759 ke = (struct pfr_kentry *)rn_match(&sa, head);
760 if (ke && KENTRY_RNF_ROOT(ke))
761 ke = NULL;
762 if (exact && ke && KENTRY_NETWORK(ke))
763 ke = NULL;
764 }
765 return (ke);
766}
767
924struct pfr_kentry *
925pfr_create_kentry(struct pfr_addr *ad, int intr)
768static struct pfr_kentry *
769pfr_create_kentry(struct pfr_addr *ad)
926{
927 struct pfr_kentry *ke;
928
770{
771 struct pfr_kentry *ke;
772
929#ifdef __FreeBSD__
930 ke = pool_get(&V_pfr_kentry_pl, PR_NOWAIT | PR_ZERO);
931#else
932 if (intr)
933 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT | PR_ZERO);
934 else
935 ke = pool_get(&pfr_kentry_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
936#endif
773 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
937 if (ke == NULL)
938 return (NULL);
939
940 if (ad->pfra_af == AF_INET)
941 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
942 else if (ad->pfra_af == AF_INET6)
943 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
944 ke->pfrke_af = ad->pfra_af;
945 ke->pfrke_net = ad->pfra_net;
946 ke->pfrke_not = ad->pfra_not;
947 return (ke);
948}
949
774 if (ke == NULL)
775 return (NULL);
776
777 if (ad->pfra_af == AF_INET)
778 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
779 else if (ad->pfra_af == AF_INET6)
780 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
781 ke->pfrke_af = ad->pfra_af;
782 ke->pfrke_net = ad->pfra_net;
783 ke->pfrke_not = ad->pfra_not;
784 return (ke);
785}
786
950void
787static void
951pfr_destroy_kentries(struct pfr_kentryworkq *workq)
952{
953 struct pfr_kentry *p, *q;
954
955 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
956 q = SLIST_NEXT(p, pfrke_workq);
957 pfr_destroy_kentry(p);
958 }
959}
960
788pfr_destroy_kentries(struct pfr_kentryworkq *workq)
789{
790 struct pfr_kentry *p, *q;
791
792 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
793 q = SLIST_NEXT(p, pfrke_workq);
794 pfr_destroy_kentry(p);
795 }
796}
797
961void
798static void
962pfr_destroy_kentry(struct pfr_kentry *ke)
963{
964 if (ke->pfrke_counters)
799pfr_destroy_kentry(struct pfr_kentry *ke)
800{
801 if (ke->pfrke_counters)
965#ifdef __FreeBSD__
966 pool_put(&V_pfr_kcounters_pl, ke->pfrke_counters);
967 pool_put(&V_pfr_kentry_pl, ke);
968#else
969 pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
970 pool_put(&pfr_kentry_pl, ke);
971#endif
802 uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters);
803 uma_zfree(V_pfr_kentry_z, ke);
972}
973
804}
805
974void
806static void
975pfr_insert_kentries(struct pfr_ktable *kt,
976 struct pfr_kentryworkq *workq, long tzero)
977{
978 struct pfr_kentry *p;
979 int rv, n = 0;
980
981 SLIST_FOREACH(p, workq, pfrke_workq) {
982 rv = pfr_route_kentry(kt, p);

--- 12 unchanged lines hidden (view full) ---

995pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
996{
997 struct pfr_kentry *p;
998 int rv;
999
1000 p = pfr_lookup_addr(kt, ad, 1);
1001 if (p != NULL)
1002 return (0);
807pfr_insert_kentries(struct pfr_ktable *kt,
808 struct pfr_kentryworkq *workq, long tzero)
809{
810 struct pfr_kentry *p;
811 int rv, n = 0;
812
813 SLIST_FOREACH(p, workq, pfrke_workq) {
814 rv = pfr_route_kentry(kt, p);

--- 12 unchanged lines hidden (view full) ---

827pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
828{
829 struct pfr_kentry *p;
830 int rv;
831
832 p = pfr_lookup_addr(kt, ad, 1);
833 if (p != NULL)
834 return (0);
1003 p = pfr_create_kentry(ad, 1);
835 p = pfr_create_kentry(ad);
1004 if (p == NULL)
1005 return (EINVAL);
1006
1007 rv = pfr_route_kentry(kt, p);
1008 if (rv)
1009 return (rv);
1010
1011 p->pfrke_tzero = tzero;
1012 kt->pfrkt_cnt++;
1013
1014 return (0);
1015}
1016
836 if (p == NULL)
837 return (EINVAL);
838
839 rv = pfr_route_kentry(kt, p);
840 if (rv)
841 return (rv);
842
843 p->pfrke_tzero = tzero;
844 kt->pfrkt_cnt++;
845
846 return (0);
847}
848
1017void
849static void
1018pfr_remove_kentries(struct pfr_ktable *kt,
1019 struct pfr_kentryworkq *workq)
1020{
1021 struct pfr_kentry *p;
1022 int n = 0;
1023
1024 SLIST_FOREACH(p, workq, pfrke_workq) {
1025 pfr_unroute_kentry(kt, p);
1026 n++;
1027 }
1028 kt->pfrkt_cnt -= n;
1029 pfr_destroy_kentries(workq);
1030}
1031
850pfr_remove_kentries(struct pfr_ktable *kt,
851 struct pfr_kentryworkq *workq)
852{
853 struct pfr_kentry *p;
854 int n = 0;
855
856 SLIST_FOREACH(p, workq, pfrke_workq) {
857 pfr_unroute_kentry(kt, p);
858 n++;
859 }
860 kt->pfrkt_cnt -= n;
861 pfr_destroy_kentries(workq);
862}
863
1032void
864static void
1033pfr_clean_node_mask(struct pfr_ktable *kt,
1034 struct pfr_kentryworkq *workq)
1035{
1036 struct pfr_kentry *p;
1037
1038 SLIST_FOREACH(p, workq, pfrke_workq)
1039 pfr_unroute_kentry(kt, p);
1040}
1041
865pfr_clean_node_mask(struct pfr_ktable *kt,
866 struct pfr_kentryworkq *workq)
867{
868 struct pfr_kentry *p;
869
870 SLIST_FOREACH(p, workq, pfrke_workq)
871 pfr_unroute_kentry(kt, p);
872}
873
1042void
874static void
1043pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
1044{
1045 struct pfr_kentry *p;
875pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
876{
877 struct pfr_kentry *p;
1046 int s;
1047
1048 SLIST_FOREACH(p, workq, pfrke_workq) {
878
879 SLIST_FOREACH(p, workq, pfrke_workq) {
1049 s = splsoftnet();
1050 if (negchange)
1051 p->pfrke_not = !p->pfrke_not;
1052 if (p->pfrke_counters) {
880 if (negchange)
881 p->pfrke_not = !p->pfrke_not;
882 if (p->pfrke_counters) {
1053#ifdef __FreeBSD__
1054 pool_put(&V_pfr_kcounters_pl, p->pfrke_counters);
1055#else
1056 pool_put(&pfr_kcounters_pl, p->pfrke_counters);
1057#endif
883 uma_zfree(V_pfr_kcounters_z, p->pfrke_counters);
1058 p->pfrke_counters = NULL;
1059 }
884 p->pfrke_counters = NULL;
885 }
1060 splx(s);
1061 p->pfrke_tzero = tzero;
1062 }
1063}
1064
886 p->pfrke_tzero = tzero;
887 }
888}
889
1065void
1066pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
890static void
891pfr_reset_feedback(struct pfr_addr *addr, int size)
1067{
892{
1068 struct pfr_addr ad;
893 struct pfr_addr *ad;
1069 int i;
1070
894 int i;
895
1071 for (i = 0; i < size; i++) {
1072 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1073 break;
1074 ad.pfra_fback = PFR_FB_NONE;
1075 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
1076 break;
1077 }
896 for (i = 0, ad = addr; i < size; i++, ad++)
897 ad->pfra_fback = PFR_FB_NONE;
1078}
1079
898}
899
1080void
900static void
1081pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1082{
1083 int i;
1084
1085 bzero(sa, sizeof(*sa));
1086 if (af == AF_INET) {
1087 sa->sin.sin_len = sizeof(sa->sin);
1088 sa->sin.sin_family = AF_INET;

--- 8 unchanged lines hidden (view full) ---

1097 break;
1098 }
1099 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1100 net -= 32;
1101 }
1102 }
1103}
1104
901pfr_prepare_network(union sockaddr_union *sa, int af, int net)
902{
903 int i;
904
905 bzero(sa, sizeof(*sa));
906 if (af == AF_INET) {
907 sa->sin.sin_len = sizeof(sa->sin);
908 sa->sin.sin_family = AF_INET;

--- 8 unchanged lines hidden (view full) ---

917 break;
918 }
919 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
920 net -= 32;
921 }
922 }
923}
924
1105int
925static int
1106pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1107{
1108 union sockaddr_union mask;
1109 struct radix_node *rn;
926pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
927{
928 union sockaddr_union mask;
929 struct radix_node *rn;
1110#ifdef __FreeBSD__
1111 struct radix_node_head *head = NULL;
930 struct radix_node_head *head = NULL;
1112#else
1113 struct radix_node_head *head;
1114#endif
1115 int s;
1116
1117 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1118 if (ke->pfrke_af == AF_INET)
1119 head = kt->pfrkt_ip4;
1120 else if (ke->pfrke_af == AF_INET6)
1121 head = kt->pfrkt_ip6;
1122
931
932 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
933 if (ke->pfrke_af == AF_INET)
934 head = kt->pfrkt_ip4;
935 else if (ke->pfrke_af == AF_INET6)
936 head = kt->pfrkt_ip6;
937
1123 s = splsoftnet();
1124#ifdef __FreeBSD__
1125 PF_LOCK_ASSERT();
1126#endif
1127 if (KENTRY_NETWORK(ke)) {
1128 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
938 if (KENTRY_NETWORK(ke)) {
939 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1129#ifdef __FreeBSD__
1130 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
940 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1131#else
1132 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node, 0);
1133#endif
1134 } else
941 } else
1135#ifdef __FreeBSD__
1136 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
942 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1137#else
1138 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node, 0);
1139#endif
1140 splx(s);
1141
1142 return (rn == NULL ? -1 : 0);
1143}
1144
943
944 return (rn == NULL ? -1 : 0);
945}
946
1145int
947static int
1146pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1147{
1148 union sockaddr_union mask;
1149 struct radix_node *rn;
948pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
949{
950 union sockaddr_union mask;
951 struct radix_node *rn;
1150#ifdef __FreeBSD__
1151 struct radix_node_head *head = NULL;
952 struct radix_node_head *head = NULL;
1152#else
1153 struct radix_node_head *head;
1154#endif
1155 int s;
1156
1157 if (ke->pfrke_af == AF_INET)
1158 head = kt->pfrkt_ip4;
1159 else if (ke->pfrke_af == AF_INET6)
1160 head = kt->pfrkt_ip6;
1161
953
954 if (ke->pfrke_af == AF_INET)
955 head = kt->pfrkt_ip4;
956 else if (ke->pfrke_af == AF_INET6)
957 head = kt->pfrkt_ip6;
958
1162 s = splsoftnet();
1163#ifdef __FreeBSD__
1164 PF_LOCK_ASSERT();
1165#endif
1166 if (KENTRY_NETWORK(ke)) {
1167 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
959 if (KENTRY_NETWORK(ke)) {
960 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1168#ifdef __FreeBSD__
1169 rn = rn_delete(&ke->pfrke_sa, &mask, head);
961 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1170#else
1171 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1172#endif
1173 } else
962 } else
1174#ifdef __FreeBSD__
1175 rn = rn_delete(&ke->pfrke_sa, NULL, head);
963 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1176#else
1177 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1178#endif
1179 splx(s);
1180
1181 if (rn == NULL) {
1182 printf("pfr_unroute_kentry: delete failed.\n");
1183 return (-1);
1184 }
1185 return (0);
1186}
1187
964
965 if (rn == NULL) {
966 printf("pfr_unroute_kentry: delete failed.\n");
967 return (-1);
968 }
969 return (0);
970}
971
1188void
972static void
1189pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1190{
1191 bzero(ad, sizeof(*ad));
1192 if (ke == NULL)
1193 return;
1194 ad->pfra_af = ke->pfrke_af;
1195 ad->pfra_net = ke->pfrke_net;
1196 ad->pfra_not = ke->pfrke_not;
1197 if (ad->pfra_af == AF_INET)
1198 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1199 else if (ad->pfra_af == AF_INET6)
1200 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1201}
1202
973pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
974{
975 bzero(ad, sizeof(*ad));
976 if (ke == NULL)
977 return;
978 ad->pfra_af = ke->pfrke_af;
979 ad->pfra_net = ke->pfrke_net;
980 ad->pfra_not = ke->pfrke_not;
981 if (ad->pfra_af == AF_INET)
982 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
983 else if (ad->pfra_af == AF_INET6)
984 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
985}
986
1203int
987static int
1204pfr_walktree(struct radix_node *rn, void *arg)
1205{
1206 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1207 struct pfr_walktree *w = arg;
988pfr_walktree(struct radix_node *rn, void *arg)
989{
990 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
991 struct pfr_walktree *w = arg;
1208 int s, flags = w->pfrw_flags;
1209
1210 switch (w->pfrw_op) {
1211 case PFRW_MARK:
1212 ke->pfrke_mark = 0;
1213 break;
1214 case PFRW_SWEEP:
1215 if (ke->pfrke_mark)
1216 break;
1217 /* FALLTHROUGH */
1218 case PFRW_ENQUEUE:
1219 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1220 w->pfrw_cnt++;
1221 break;
1222 case PFRW_GET_ADDRS:
1223 if (w->pfrw_free-- > 0) {
992
993 switch (w->pfrw_op) {
994 case PFRW_MARK:
995 ke->pfrke_mark = 0;
996 break;
997 case PFRW_SWEEP:
998 if (ke->pfrke_mark)
999 break;
1000 /* FALLTHROUGH */
1001 case PFRW_ENQUEUE:
1002 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1003 w->pfrw_cnt++;
1004 break;
1005 case PFRW_GET_ADDRS:
1006 if (w->pfrw_free-- > 0) {
1224 struct pfr_addr ad;
1225
1226 pfr_copyout_addr(&ad, ke);
1227 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1228 return (EFAULT);
1007 pfr_copyout_addr(w->pfrw_addr, ke);
1229 w->pfrw_addr++;
1230 }
1231 break;
1232 case PFRW_GET_ASTATS:
1233 if (w->pfrw_free-- > 0) {
1234 struct pfr_astats as;
1235
1236 pfr_copyout_addr(&as.pfras_a, ke);
1237
1008 w->pfrw_addr++;
1009 }
1010 break;
1011 case PFRW_GET_ASTATS:
1012 if (w->pfrw_free-- > 0) {
1013 struct pfr_astats as;
1014
1015 pfr_copyout_addr(&as.pfras_a, ke);
1016
1238 s = splsoftnet();
1239 if (ke->pfrke_counters) {
1240 bcopy(ke->pfrke_counters->pfrkc_packets,
1241 as.pfras_packets, sizeof(as.pfras_packets));
1242 bcopy(ke->pfrke_counters->pfrkc_bytes,
1243 as.pfras_bytes, sizeof(as.pfras_bytes));
1244 } else {
1245 bzero(as.pfras_packets, sizeof(as.pfras_packets));
1246 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1247 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1248 }
1017 if (ke->pfrke_counters) {
1018 bcopy(ke->pfrke_counters->pfrkc_packets,
1019 as.pfras_packets, sizeof(as.pfras_packets));
1020 bcopy(ke->pfrke_counters->pfrkc_bytes,
1021 as.pfras_bytes, sizeof(as.pfras_bytes));
1022 } else {
1023 bzero(as.pfras_packets, sizeof(as.pfras_packets));
1024 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1025 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1026 }
1249 splx(s);
1250 as.pfras_tzero = ke->pfrke_tzero;
1251
1027 as.pfras_tzero = ke->pfrke_tzero;
1028
1252 if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1253 return (EFAULT);
1029 bcopy(&as, w->pfrw_astats, sizeof(as));
1254 w->pfrw_astats++;
1255 }
1256 break;
1257 case PFRW_POOL_GET:
1258 if (ke->pfrke_not)
1259 break; /* negative entries are ignored */
1260 if (!w->pfrw_cnt--) {
1261 w->pfrw_kentry = ke;
1262 return (1); /* finish search */
1263 }
1264 break;
1265 case PFRW_DYNADDR_UPDATE:
1030 w->pfrw_astats++;
1031 }
1032 break;
1033 case PFRW_POOL_GET:
1034 if (ke->pfrke_not)
1035 break; /* negative entries are ignored */
1036 if (!w->pfrw_cnt--) {
1037 w->pfrw_kentry = ke;
1038 return (1); /* finish search */
1039 }
1040 break;
1041 case PFRW_DYNADDR_UPDATE:
1042 {
1043 union sockaddr_union pfr_mask;
1044
1266 if (ke->pfrke_af == AF_INET) {
1267 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1268 break;
1045 if (ke->pfrke_af == AF_INET) {
1046 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1047 break;
1269#ifdef __FreeBSD__
1270 pfr_prepare_network(&V_pfr_mask, AF_INET, ke->pfrke_net);
1271#else
1272 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1048 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1273#endif
1274 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1275 &ke->pfrke_sa, AF_INET);
1276 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1277#ifdef __FreeBSD__
1278 &V_pfr_mask, AF_INET);
1279#else
1280 &pfr_mask, AF_INET);
1281#endif
1049 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1050 AF_INET);
1051 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1052 AF_INET);
1282 } else if (ke->pfrke_af == AF_INET6){
1283 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1284 break;
1053 } else if (ke->pfrke_af == AF_INET6){
1054 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1055 break;
1285#ifdef __FreeBSD__
1286 pfr_prepare_network(&V_pfr_mask, AF_INET6, ke->pfrke_net);
1287#else
1288 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1056 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1289#endif
1290 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1291 &ke->pfrke_sa, AF_INET6);
1292 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1293#ifdef __FreeBSD__
1294 &V_pfr_mask, AF_INET6);
1295#else
1296 &pfr_mask, AF_INET6);
1297#endif
1057 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1058 AF_INET6);
1059 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1060 AF_INET6);
1298 }
1299 break;
1061 }
1062 break;
1063 }
1300 }
1301 return (0);
1302}
1303
1304int
1305pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1306{
1307 struct pfr_ktableworkq workq;
1308 struct pfr_ktable *p;
1064 }
1065 return (0);
1066}
1067
1068int
1069pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1070{
1071 struct pfr_ktableworkq workq;
1072 struct pfr_ktable *p;
1309 int s, xdel = 0;
1073 int xdel = 0;
1310
1074
1311 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1312 PFR_FLAG_ALLRSETS);
1075 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1313 if (pfr_fix_anchor(filter->pfrt_anchor))
1314 return (EINVAL);
1315 if (pfr_table_count(filter, flags) < 0)
1316 return (ENOENT);
1317
1318 SLIST_INIT(&workq);
1319 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1320 if (pfr_skip_table(filter, p, flags))
1321 continue;
1322 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1323 continue;
1324 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1325 continue;
1326 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1327 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1328 xdel++;
1329 }
1076 if (pfr_fix_anchor(filter->pfrt_anchor))
1077 return (EINVAL);
1078 if (pfr_table_count(filter, flags) < 0)
1079 return (ENOENT);
1080
1081 SLIST_INIT(&workq);
1082 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1083 if (pfr_skip_table(filter, p, flags))
1084 continue;
1085 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1086 continue;
1087 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1088 continue;
1089 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1090 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1091 xdel++;
1092 }
1330 if (!(flags & PFR_FLAG_DUMMY)) {
1331 if (flags & PFR_FLAG_ATOMIC)
1332 s = splsoftnet();
1093 if (!(flags & PFR_FLAG_DUMMY))
1333 pfr_setflags_ktables(&workq);
1094 pfr_setflags_ktables(&workq);
1334 if (flags & PFR_FLAG_ATOMIC)
1335 splx(s);
1336 }
1337 if (ndel != NULL)
1338 *ndel = xdel;
1339 return (0);
1340}
1341
1342int
1343pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1344{
1345 struct pfr_ktableworkq addq, changeq;
1346 struct pfr_ktable *p, *q, *r, key;
1095 if (ndel != NULL)
1096 *ndel = xdel;
1097 return (0);
1098}
1099
1100int
1101pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1102{
1103 struct pfr_ktableworkq addq, changeq;
1104 struct pfr_ktable *p, *q, *r, key;
1347 int i, rv, s, xadd = 0;
1105 int i, rv, xadd = 0;
1348 long tzero = time_second;
1349
1106 long tzero = time_second;
1107
1350 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1108 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1351 SLIST_INIT(&addq);
1352 SLIST_INIT(&changeq);
1353 for (i = 0; i < size; i++) {
1109 SLIST_INIT(&addq);
1110 SLIST_INIT(&changeq);
1111 for (i = 0; i < size; i++) {
1354 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1355 senderr(EFAULT);
1112 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1356 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1357 flags & PFR_FLAG_USERIOCTL))
1358 senderr(EINVAL);
1359 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1360 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1361 if (p == NULL) {
1113 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1114 flags & PFR_FLAG_USERIOCTL))
1115 senderr(EINVAL);
1116 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1117 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1118 if (p == NULL) {
1362 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1,
1363 !(flags & PFR_FLAG_USERIOCTL));
1119 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1364 if (p == NULL)
1365 senderr(ENOMEM);
1366 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1367 if (!pfr_ktable_compare(p, q))
1368 goto _skip;
1369 }
1370 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1371 xadd++;

--- 9 unchanged lines hidden (view full) ---

1381 }
1382 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1383 if (!pfr_ktable_compare(&key, q)) {
1384 p->pfrkt_root = q;
1385 goto _skip;
1386 }
1387 }
1388 key.pfrkt_flags = 0;
1120 if (p == NULL)
1121 senderr(ENOMEM);
1122 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1123 if (!pfr_ktable_compare(p, q))
1124 goto _skip;
1125 }
1126 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1127 xadd++;

--- 9 unchanged lines hidden (view full) ---

1137 }
1138 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1139 if (!pfr_ktable_compare(&key, q)) {
1140 p->pfrkt_root = q;
1141 goto _skip;
1142 }
1143 }
1144 key.pfrkt_flags = 0;
1389 r = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1390 !(flags & PFR_FLAG_USERIOCTL));
1145 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1391 if (r == NULL)
1392 senderr(ENOMEM);
1393 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1394 p->pfrkt_root = r;
1395 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1396 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1397 if (!pfr_ktable_compare(&key, q))
1398 goto _skip;
1399 p->pfrkt_nflags = (p->pfrkt_flags &
1400 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1401 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1402 xadd++;
1403 }
1404_skip:
1405 ;
1406 }
1407 if (!(flags & PFR_FLAG_DUMMY)) {
1146 if (r == NULL)
1147 senderr(ENOMEM);
1148 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1149 p->pfrkt_root = r;
1150 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1151 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1152 if (!pfr_ktable_compare(&key, q))
1153 goto _skip;
1154 p->pfrkt_nflags = (p->pfrkt_flags &
1155 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1156 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1157 xadd++;
1158 }
1159_skip:
1160 ;
1161 }
1162 if (!(flags & PFR_FLAG_DUMMY)) {
1408 if (flags & PFR_FLAG_ATOMIC)
1409 s = splsoftnet();
1410 pfr_insert_ktables(&addq);
1411 pfr_setflags_ktables(&changeq);
1163 pfr_insert_ktables(&addq);
1164 pfr_setflags_ktables(&changeq);
1412 if (flags & PFR_FLAG_ATOMIC)
1413 splx(s);
1414 } else
1415 pfr_destroy_ktables(&addq, 0);
1416 if (nadd != NULL)
1417 *nadd = xadd;
1418 return (0);
1419_bad:
1420 pfr_destroy_ktables(&addq, 0);
1421 return (rv);
1422}
1423
1424int
1425pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1426{
1427 struct pfr_ktableworkq workq;
1428 struct pfr_ktable *p, *q, key;
1165 } else
1166 pfr_destroy_ktables(&addq, 0);
1167 if (nadd != NULL)
1168 *nadd = xadd;
1169 return (0);
1170_bad:
1171 pfr_destroy_ktables(&addq, 0);
1172 return (rv);
1173}
1174
1175int
1176pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1177{
1178 struct pfr_ktableworkq workq;
1179 struct pfr_ktable *p, *q, key;
1429 int i, s, xdel = 0;
1180 int i, xdel = 0;
1430
1181
1431 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1182 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1432 SLIST_INIT(&workq);
1433 for (i = 0; i < size; i++) {
1183 SLIST_INIT(&workq);
1184 for (i = 0; i < size; i++) {
1434 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1435 return (EFAULT);
1185 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1436 if (pfr_validate_table(&key.pfrkt_t, 0,
1437 flags & PFR_FLAG_USERIOCTL))
1438 return (EINVAL);
1439 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1440 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1441 SLIST_FOREACH(q, &workq, pfrkt_workq)
1442 if (!pfr_ktable_compare(p, q))
1443 goto _skip;
1444 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1445 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1446 xdel++;
1447 }
1448_skip:
1449 ;
1450 }
1451
1186 if (pfr_validate_table(&key.pfrkt_t, 0,
1187 flags & PFR_FLAG_USERIOCTL))
1188 return (EINVAL);
1189 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1190 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1191 SLIST_FOREACH(q, &workq, pfrkt_workq)
1192 if (!pfr_ktable_compare(p, q))
1193 goto _skip;
1194 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1195 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1196 xdel++;
1197 }
1198_skip:
1199 ;
1200 }
1201
1452 if (!(flags & PFR_FLAG_DUMMY)) {
1453 if (flags & PFR_FLAG_ATOMIC)
1454 s = splsoftnet();
1202 if (!(flags & PFR_FLAG_DUMMY))
1455 pfr_setflags_ktables(&workq);
1203 pfr_setflags_ktables(&workq);
1456 if (flags & PFR_FLAG_ATOMIC)
1457 splx(s);
1458 }
1459 if (ndel != NULL)
1460 *ndel = xdel;
1461 return (0);
1462}
1463
1464int
1465pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1466 int flags)
1467{
1468 struct pfr_ktable *p;
1469 int n, nn;
1470
1204 if (ndel != NULL)
1205 *ndel = xdel;
1206 return (0);
1207}
1208
1209int
1210pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1211 int flags)
1212{
1213 struct pfr_ktable *p;
1214 int n, nn;
1215
1216 PF_RULES_RASSERT();
1217
1471 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1472 if (pfr_fix_anchor(filter->pfrt_anchor))
1473 return (EINVAL);
1474 n = nn = pfr_table_count(filter, flags);
1475 if (n < 0)
1476 return (ENOENT);
1477 if (n > *size) {
1478 *size = n;
1479 return (0);
1480 }
1481 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1482 if (pfr_skip_table(filter, p, flags))
1483 continue;
1484 if (n-- <= 0)
1485 continue;
1218 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1219 if (pfr_fix_anchor(filter->pfrt_anchor))
1220 return (EINVAL);
1221 n = nn = pfr_table_count(filter, flags);
1222 if (n < 0)
1223 return (ENOENT);
1224 if (n > *size) {
1225 *size = n;
1226 return (0);
1227 }
1228 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1229 if (pfr_skip_table(filter, p, flags))
1230 continue;
1231 if (n-- <= 0)
1232 continue;
1486 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1487 return (EFAULT);
1233 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1488 }
1234 }
1489 if (n) {
1490 printf("pfr_get_tables: corruption detected (%d).\n", n);
1491 return (ENOTTY);
1492 }
1235
1236 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1237
1493 *size = nn;
1494 return (0);
1495}
1496
1497int
1498pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1499 int flags)
1500{
1501 struct pfr_ktable *p;
1502 struct pfr_ktableworkq workq;
1238 *size = nn;
1239 return (0);
1240}
1241
1242int
1243pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1244 int flags)
1245{
1246 struct pfr_ktable *p;
1247 struct pfr_ktableworkq workq;
1503 int s, n, nn;
1248 int n, nn;
1504 long tzero = time_second;
1505
1506 /* XXX PFR_FLAG_CLSTATS disabled */
1249 long tzero = time_second;
1250
1251 /* XXX PFR_FLAG_CLSTATS disabled */
1507 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1252 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1508 if (pfr_fix_anchor(filter->pfrt_anchor))
1509 return (EINVAL);
1510 n = nn = pfr_table_count(filter, flags);
1511 if (n < 0)
1512 return (ENOENT);
1513 if (n > *size) {
1514 *size = n;
1515 return (0);
1516 }
1517 SLIST_INIT(&workq);
1253 if (pfr_fix_anchor(filter->pfrt_anchor))
1254 return (EINVAL);
1255 n = nn = pfr_table_count(filter, flags);
1256 if (n < 0)
1257 return (ENOENT);
1258 if (n > *size) {
1259 *size = n;
1260 return (0);
1261 }
1262 SLIST_INIT(&workq);
1518 if (flags & PFR_FLAG_ATOMIC)
1519 s = splsoftnet();
1520 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1521 if (pfr_skip_table(filter, p, flags))
1522 continue;
1523 if (n-- <= 0)
1524 continue;
1263 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1264 if (pfr_skip_table(filter, p, flags))
1265 continue;
1266 if (n-- <= 0)
1267 continue;
1525 if (!(flags & PFR_FLAG_ATOMIC))
1526 s = splsoftnet();
1527 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1528 splx(s);
1529 return (EFAULT);
1530 }
1531 if (!(flags & PFR_FLAG_ATOMIC))
1532 splx(s);
1268 bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl));
1533 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1534 }
1535 if (flags & PFR_FLAG_CLSTATS)
1536 pfr_clstats_ktables(&workq, tzero,
1537 flags & PFR_FLAG_ADDRSTOO);
1269 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1270 }
1271 if (flags & PFR_FLAG_CLSTATS)
1272 pfr_clstats_ktables(&workq, tzero,
1273 flags & PFR_FLAG_ADDRSTOO);
1538 if (flags & PFR_FLAG_ATOMIC)
1539 splx(s);
1540 if (n) {
1541 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1542 return (ENOTTY);
1543 }
1274
1275 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1276
1544 *size = nn;
1545 return (0);
1546}
1547
1548int
1549pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1550{
1551 struct pfr_ktableworkq workq;
1552 struct pfr_ktable *p, key;
1277 *size = nn;
1278 return (0);
1279}
1280
1281int
1282pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1283{
1284 struct pfr_ktableworkq workq;
1285 struct pfr_ktable *p, key;
1553 int i, s, xzero = 0;
1286 int i, xzero = 0;
1554 long tzero = time_second;
1555
1287 long tzero = time_second;
1288
1556 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1557 PFR_FLAG_ADDRSTOO);
1289 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1558 SLIST_INIT(&workq);
1559 for (i = 0; i < size; i++) {
1290 SLIST_INIT(&workq);
1291 for (i = 0; i < size; i++) {
1560 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1561 return (EFAULT);
1292 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1562 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1563 return (EINVAL);
1564 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1565 if (p != NULL) {
1566 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1567 xzero++;
1568 }
1569 }
1293 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1294 return (EINVAL);
1295 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1296 if (p != NULL) {
1297 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1298 xzero++;
1299 }
1300 }
1570 if (!(flags & PFR_FLAG_DUMMY)) {
1571 if (flags & PFR_FLAG_ATOMIC)
1572 s = splsoftnet();
1301 if (!(flags & PFR_FLAG_DUMMY))
1573 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1302 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1574 if (flags & PFR_FLAG_ATOMIC)
1575 splx(s);
1576 }
1577 if (nzero != NULL)
1578 *nzero = xzero;
1579 return (0);
1580}
1581
1582int
1583pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1584 int *nchange, int *ndel, int flags)
1585{
1586 struct pfr_ktableworkq workq;
1587 struct pfr_ktable *p, *q, key;
1303 if (nzero != NULL)
1304 *nzero = xzero;
1305 return (0);
1306}
1307
1308int
1309pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1310 int *nchange, int *ndel, int flags)
1311{
1312 struct pfr_ktableworkq workq;
1313 struct pfr_ktable *p, *q, key;
1588 int i, s, xchange = 0, xdel = 0;
1314 int i, xchange = 0, xdel = 0;
1589
1315
1590 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1316 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1591 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1592 (clrflag & ~PFR_TFLAG_USRMASK) ||
1593 (setflag & clrflag))
1594 return (EINVAL);
1595 SLIST_INIT(&workq);
1596 for (i = 0; i < size; i++) {
1317 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1318 (clrflag & ~PFR_TFLAG_USRMASK) ||
1319 (setflag & clrflag))
1320 return (EINVAL);
1321 SLIST_INIT(&workq);
1322 for (i = 0; i < size; i++) {
1597 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1598 return (EFAULT);
1323 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1599 if (pfr_validate_table(&key.pfrkt_t, 0,
1600 flags & PFR_FLAG_USERIOCTL))
1601 return (EINVAL);
1602 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1603 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1604 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1605 ~clrflag;
1606 if (p->pfrkt_nflags == p->pfrkt_flags)

--- 7 unchanged lines hidden (view full) ---

1614 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1615 xdel++;
1616 else
1617 xchange++;
1618 }
1619_skip:
1620 ;
1621 }
1324 if (pfr_validate_table(&key.pfrkt_t, 0,
1325 flags & PFR_FLAG_USERIOCTL))
1326 return (EINVAL);
1327 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1328 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1329 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1330 ~clrflag;
1331 if (p->pfrkt_nflags == p->pfrkt_flags)

--- 7 unchanged lines hidden (view full) ---

1339 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1340 xdel++;
1341 else
1342 xchange++;
1343 }
1344_skip:
1345 ;
1346 }
1622 if (!(flags & PFR_FLAG_DUMMY)) {
1623 if (flags & PFR_FLAG_ATOMIC)
1624 s = splsoftnet();
1347 if (!(flags & PFR_FLAG_DUMMY))
1625 pfr_setflags_ktables(&workq);
1348 pfr_setflags_ktables(&workq);
1626 if (flags & PFR_FLAG_ATOMIC)
1627 splx(s);
1628 }
1629 if (nchange != NULL)
1630 *nchange = xchange;
1631 if (ndel != NULL)
1632 *ndel = xdel;
1633 return (0);
1634}
1635
1636int

--- 32 unchanged lines hidden (view full) ---

1669int
1670pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1671 int *nadd, int *naddr, u_int32_t ticket, int flags)
1672{
1673 struct pfr_ktableworkq tableq;
1674 struct pfr_kentryworkq addrq;
1675 struct pfr_ktable *kt, *rt, *shadow, key;
1676 struct pfr_kentry *p;
1349 if (nchange != NULL)
1350 *nchange = xchange;
1351 if (ndel != NULL)
1352 *ndel = xdel;
1353 return (0);
1354}
1355
1356int

--- 32 unchanged lines hidden (view full) ---

1389int
1390pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1391 int *nadd, int *naddr, u_int32_t ticket, int flags)
1392{
1393 struct pfr_ktableworkq tableq;
1394 struct pfr_kentryworkq addrq;
1395 struct pfr_ktable *kt, *rt, *shadow, key;
1396 struct pfr_kentry *p;
1677 struct pfr_addr ad;
1397 struct pfr_addr *ad;
1678 struct pf_ruleset *rs;
1679 int i, rv, xadd = 0, xaddr = 0;
1680
1398 struct pf_ruleset *rs;
1399 int i, rv, xadd = 0, xaddr = 0;
1400
1401 PF_RULES_WASSERT();
1402
1681 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1682 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1683 return (EINVAL);
1684 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1685 flags & PFR_FLAG_USERIOCTL))
1686 return (EINVAL);
1687 rs = pf_find_ruleset(tbl->pfrt_anchor);
1688 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1689 return (EBUSY);
1690 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1691 SLIST_INIT(&tableq);
1692 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1693 if (kt == NULL) {
1403 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1404 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1405 return (EINVAL);
1406 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1407 flags & PFR_FLAG_USERIOCTL))
1408 return (EINVAL);
1409 rs = pf_find_ruleset(tbl->pfrt_anchor);
1410 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1411 return (EBUSY);
1412 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1413 SLIST_INIT(&tableq);
1414 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1415 if (kt == NULL) {
1694 kt = pfr_create_ktable(tbl, 0, 1,
1695 !(flags & PFR_FLAG_USERIOCTL));
1416 kt = pfr_create_ktable(tbl, 0, 1);
1696 if (kt == NULL)
1697 return (ENOMEM);
1698 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1699 xadd++;
1700 if (!tbl->pfrt_anchor[0])
1701 goto _skip;
1702
1703 /* find or create root table */
1704 bzero(&key, sizeof(key));
1705 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1706 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1707 if (rt != NULL) {
1708 kt->pfrkt_root = rt;
1709 goto _skip;
1710 }
1417 if (kt == NULL)
1418 return (ENOMEM);
1419 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1420 xadd++;
1421 if (!tbl->pfrt_anchor[0])
1422 goto _skip;
1423
1424 /* find or create root table */
1425 bzero(&key, sizeof(key));
1426 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1427 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1428 if (rt != NULL) {
1429 kt->pfrkt_root = rt;
1430 goto _skip;
1431 }
1711 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1712 !(flags & PFR_FLAG_USERIOCTL));
1432 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1713 if (rt == NULL) {
1714 pfr_destroy_ktables(&tableq, 0);
1715 return (ENOMEM);
1716 }
1717 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1718 kt->pfrkt_root = rt;
1719 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1720 xadd++;
1721_skip:
1433 if (rt == NULL) {
1434 pfr_destroy_ktables(&tableq, 0);
1435 return (ENOMEM);
1436 }
1437 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1438 kt->pfrkt_root = rt;
1439 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1440 xadd++;
1441_skip:
1722 shadow = pfr_create_ktable(tbl, 0, 0, !(flags & PFR_FLAG_USERIOCTL));
1442 shadow = pfr_create_ktable(tbl, 0, 0);
1723 if (shadow == NULL) {
1724 pfr_destroy_ktables(&tableq, 0);
1725 return (ENOMEM);
1726 }
1727 SLIST_INIT(&addrq);
1443 if (shadow == NULL) {
1444 pfr_destroy_ktables(&tableq, 0);
1445 return (ENOMEM);
1446 }
1447 SLIST_INIT(&addrq);
1728 for (i = 0; i < size; i++) {
1729 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1730 senderr(EFAULT);
1731 if (pfr_validate_addr(&ad))
1448 for (i = 0, ad = addr; i < size; i++, ad++) {
1449 if (pfr_validate_addr(ad))
1732 senderr(EINVAL);
1450 senderr(EINVAL);
1733 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1451 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1734 continue;
1452 continue;
1735 p = pfr_create_kentry(&ad, 0);
1453 p = pfr_create_kentry(ad);
1736 if (p == NULL)
1737 senderr(ENOMEM);
1738 if (pfr_route_kentry(shadow, p)) {
1739 pfr_destroy_kentry(p);
1740 continue;
1741 }
1742 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1743 xaddr++;

--- 27 unchanged lines hidden (view full) ---

1771int
1772pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1773{
1774 struct pfr_ktableworkq workq;
1775 struct pfr_ktable *p;
1776 struct pf_ruleset *rs;
1777 int xdel = 0;
1778
1454 if (p == NULL)
1455 senderr(ENOMEM);
1456 if (pfr_route_kentry(shadow, p)) {
1457 pfr_destroy_kentry(p);
1458 continue;
1459 }
1460 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1461 xaddr++;

--- 27 unchanged lines hidden (view full) ---

1489int
1490pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1491{
1492 struct pfr_ktableworkq workq;
1493 struct pfr_ktable *p;
1494 struct pf_ruleset *rs;
1495 int xdel = 0;
1496
1497 PF_RULES_WASSERT();
1498
1779 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1780 rs = pf_find_ruleset(trs->pfrt_anchor);
1781 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1782 return (0);
1783 SLIST_INIT(&workq);
1784 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1785 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1786 pfr_skip_table(trs, p, 0))

--- 14 unchanged lines hidden (view full) ---

1801
1802int
1803pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1804 int *nchange, int flags)
1805{
1806 struct pfr_ktable *p, *q;
1807 struct pfr_ktableworkq workq;
1808 struct pf_ruleset *rs;
1499 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1500 rs = pf_find_ruleset(trs->pfrt_anchor);
1501 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1502 return (0);
1503 SLIST_INIT(&workq);
1504 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1505 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1506 pfr_skip_table(trs, p, 0))

--- 14 unchanged lines hidden (view full) ---

1521
1522int
1523pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1524 int *nchange, int flags)
1525{
1526 struct pfr_ktable *p, *q;
1527 struct pfr_ktableworkq workq;
1528 struct pf_ruleset *rs;
1809 int s, xadd = 0, xchange = 0;
1529 int xadd = 0, xchange = 0;
1810 long tzero = time_second;
1811
1530 long tzero = time_second;
1531
1812 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1532 PF_RULES_WASSERT();
1533
1534 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1813 rs = pf_find_ruleset(trs->pfrt_anchor);
1814 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1815 return (EBUSY);
1816
1817 SLIST_INIT(&workq);
1818 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1819 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1820 pfr_skip_table(trs, p, 0))
1821 continue;
1822 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1823 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1824 xchange++;
1825 else
1826 xadd++;
1827 }
1828
1829 if (!(flags & PFR_FLAG_DUMMY)) {
1535 rs = pf_find_ruleset(trs->pfrt_anchor);
1536 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1537 return (EBUSY);
1538
1539 SLIST_INIT(&workq);
1540 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1541 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1542 pfr_skip_table(trs, p, 0))
1543 continue;
1544 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1545 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1546 xchange++;
1547 else
1548 xadd++;
1549 }
1550
1551 if (!(flags & PFR_FLAG_DUMMY)) {
1830 if (flags & PFR_FLAG_ATOMIC)
1831 s = splsoftnet();
1832 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1833 q = SLIST_NEXT(p, pfrkt_workq);
1834 pfr_commit_ktable(p, tzero);
1835 }
1552 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1553 q = SLIST_NEXT(p, pfrkt_workq);
1554 pfr_commit_ktable(p, tzero);
1555 }
1836 if (flags & PFR_FLAG_ATOMIC)
1837 splx(s);
1838 rs->topen = 0;
1839 pf_remove_if_empty_ruleset(rs);
1840 }
1841 if (nadd != NULL)
1842 *nadd = xadd;
1843 if (nchange != NULL)
1844 *nchange = xchange;
1845
1846 return (0);
1847}
1848
1556 rs->topen = 0;
1557 pf_remove_if_empty_ruleset(rs);
1558 }
1559 if (nadd != NULL)
1560 *nadd = xadd;
1561 if (nchange != NULL)
1562 *nchange = xchange;
1563
1564 return (0);
1565}
1566
1849void
1567static void
1850pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1851{
1852 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1853 int nflags;
1854
1568pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1569{
1570 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1571 int nflags;
1572
1573 PF_RULES_WASSERT();
1574
1855 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1856 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1857 pfr_clstats_ktable(kt, tzero, 1);
1858 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1859 /* kt might contain addresses */
1860 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1861 struct pfr_kentry *p, *q, *next;
1862 struct pfr_addr ad;

--- 37 unchanged lines hidden (view full) ---

1900 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1901 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1902 & ~PFR_TFLAG_INACTIVE;
1903 pfr_destroy_ktable(shadow, 0);
1904 kt->pfrkt_shadow = NULL;
1905 pfr_setflags_ktable(kt, nflags);
1906}
1907
1575 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1576 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1577 pfr_clstats_ktable(kt, tzero, 1);
1578 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1579 /* kt might contain addresses */
1580 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1581 struct pfr_kentry *p, *q, *next;
1582 struct pfr_addr ad;

--- 37 unchanged lines hidden (view full) ---

1620 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1621 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1622 & ~PFR_TFLAG_INACTIVE;
1623 pfr_destroy_ktable(shadow, 0);
1624 kt->pfrkt_shadow = NULL;
1625 pfr_setflags_ktable(kt, nflags);
1626}
1627
1908int
1628static int
1909pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1910{
1911 int i;
1912
1913 if (!tbl->pfrt_name[0])
1914 return (-1);
1915 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1916 return (-1);

--- 8 unchanged lines hidden (view full) ---

1925 return (-1);
1926 return (0);
1927}
1928
1929/*
1930 * Rewrite anchors referenced by tables to remove slashes
1931 * and check for validity.
1932 */
1629pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1630{
1631 int i;
1632
1633 if (!tbl->pfrt_name[0])
1634 return (-1);
1635 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1636 return (-1);

--- 8 unchanged lines hidden (view full) ---

1645 return (-1);
1646 return (0);
1647}
1648
1649/*
1650 * Rewrite anchors referenced by tables to remove slashes
1651 * and check for validity.
1652 */
1933int
1653static int
1934pfr_fix_anchor(char *anchor)
1935{
1936 size_t siz = MAXPATHLEN;
1937 int i;
1938
1939 if (anchor[0] == '/') {
1940 char *path;
1941 int off;

--- 8 unchanged lines hidden (view full) ---

1950 if (anchor[siz - 1])
1951 return (-1);
1952 for (i = strlen(anchor); i < siz; i++)
1953 if (anchor[i])
1954 return (-1);
1955 return (0);
1956}
1957
1654pfr_fix_anchor(char *anchor)
1655{
1656 size_t siz = MAXPATHLEN;
1657 int i;
1658
1659 if (anchor[0] == '/') {
1660 char *path;
1661 int off;

--- 8 unchanged lines hidden (view full) ---

1670 if (anchor[siz - 1])
1671 return (-1);
1672 for (i = strlen(anchor); i < siz; i++)
1673 if (anchor[i])
1674 return (-1);
1675 return (0);
1676}
1677
1958int
1678static int
1959pfr_table_count(struct pfr_table *filter, int flags)
1960{
1961 struct pf_ruleset *rs;
1962
1679pfr_table_count(struct pfr_table *filter, int flags)
1680{
1681 struct pf_ruleset *rs;
1682
1683 PF_RULES_ASSERT();
1684
1963 if (flags & PFR_FLAG_ALLRSETS)
1964 return (pfr_ktable_cnt);
1965 if (filter->pfrt_anchor[0]) {
1966 rs = pf_find_ruleset(filter->pfrt_anchor);
1967 return ((rs != NULL) ? rs->tables : -1);
1968 }
1969 return (pf_main_ruleset.tables);
1970}
1971
1685 if (flags & PFR_FLAG_ALLRSETS)
1686 return (pfr_ktable_cnt);
1687 if (filter->pfrt_anchor[0]) {
1688 rs = pf_find_ruleset(filter->pfrt_anchor);
1689 return ((rs != NULL) ? rs->tables : -1);
1690 }
1691 return (pf_main_ruleset.tables);
1692}
1693
1972int
1694static int
1973pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1974{
1975 if (flags & PFR_FLAG_ALLRSETS)
1976 return (0);
1977 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1978 return (1);
1979 return (0);
1980}
1981
1695pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1696{
1697 if (flags & PFR_FLAG_ALLRSETS)
1698 return (0);
1699 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1700 return (1);
1701 return (0);
1702}
1703
1982void
1704static void
1983pfr_insert_ktables(struct pfr_ktableworkq *workq)
1984{
1985 struct pfr_ktable *p;
1986
1987 SLIST_FOREACH(p, workq, pfrkt_workq)
1988 pfr_insert_ktable(p);
1989}
1990
1705pfr_insert_ktables(struct pfr_ktableworkq *workq)
1706{
1707 struct pfr_ktable *p;
1708
1709 SLIST_FOREACH(p, workq, pfrkt_workq)
1710 pfr_insert_ktable(p);
1711}
1712
1991void
1713static void
1992pfr_insert_ktable(struct pfr_ktable *kt)
1993{
1714pfr_insert_ktable(struct pfr_ktable *kt)
1715{
1716
1717 PF_RULES_WASSERT();
1718
1994 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1995 pfr_ktable_cnt++;
1996 if (kt->pfrkt_root != NULL)
1997 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1998 pfr_setflags_ktable(kt->pfrkt_root,
1999 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
2000}
2001
1719 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1720 pfr_ktable_cnt++;
1721 if (kt->pfrkt_root != NULL)
1722 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1723 pfr_setflags_ktable(kt->pfrkt_root,
1724 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1725}
1726
2002void
1727static void
2003pfr_setflags_ktables(struct pfr_ktableworkq *workq)
2004{
2005 struct pfr_ktable *p, *q;
2006
2007 for (p = SLIST_FIRST(workq); p; p = q) {
2008 q = SLIST_NEXT(p, pfrkt_workq);
2009 pfr_setflags_ktable(p, p->pfrkt_nflags);
2010 }
2011}
2012
1728pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1729{
1730 struct pfr_ktable *p, *q;
1731
1732 for (p = SLIST_FIRST(workq); p; p = q) {
1733 q = SLIST_NEXT(p, pfrkt_workq);
1734 pfr_setflags_ktable(p, p->pfrkt_nflags);
1735 }
1736}
1737
2013void
1738static void
2014pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
2015{
2016 struct pfr_kentryworkq addrq;
2017
1739pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1740{
1741 struct pfr_kentryworkq addrq;
1742
1743 PF_RULES_WASSERT();
1744
2018 if (!(newf & PFR_TFLAG_REFERENCED) &&
2019 !(newf & PFR_TFLAG_PERSIST))
2020 newf &= ~PFR_TFLAG_ACTIVE;
2021 if (!(newf & PFR_TFLAG_ACTIVE))
2022 newf &= ~PFR_TFLAG_USRMASK;
2023 if (!(newf & PFR_TFLAG_SETMASK)) {
2024 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
2025 if (kt->pfrkt_root != NULL)

--- 11 unchanged lines hidden (view full) ---

2037 }
2038 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
2039 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
2040 kt->pfrkt_shadow = NULL;
2041 }
2042 kt->pfrkt_flags = newf;
2043}
2044
1745 if (!(newf & PFR_TFLAG_REFERENCED) &&
1746 !(newf & PFR_TFLAG_PERSIST))
1747 newf &= ~PFR_TFLAG_ACTIVE;
1748 if (!(newf & PFR_TFLAG_ACTIVE))
1749 newf &= ~PFR_TFLAG_USRMASK;
1750 if (!(newf & PFR_TFLAG_SETMASK)) {
1751 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1752 if (kt->pfrkt_root != NULL)

--- 11 unchanged lines hidden (view full) ---

1764 }
1765 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1766 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1767 kt->pfrkt_shadow = NULL;
1768 }
1769 kt->pfrkt_flags = newf;
1770}
1771
2045void
1772static void
2046pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
2047{
2048 struct pfr_ktable *p;
2049
2050 SLIST_FOREACH(p, workq, pfrkt_workq)
2051 pfr_clstats_ktable(p, tzero, recurse);
2052}
2053
1773pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1774{
1775 struct pfr_ktable *p;
1776
1777 SLIST_FOREACH(p, workq, pfrkt_workq)
1778 pfr_clstats_ktable(p, tzero, recurse);
1779}
1780
2054void
1781static void
2055pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
2056{
2057 struct pfr_kentryworkq addrq;
1782pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1783{
1784 struct pfr_kentryworkq addrq;
2058 int s;
2059
2060 if (recurse) {
2061 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2062 pfr_clstats_kentries(&addrq, tzero, 0);
2063 }
1785
1786 if (recurse) {
1787 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1788 pfr_clstats_kentries(&addrq, tzero, 0);
1789 }
2064 s = splsoftnet();
2065 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
2066 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
2067 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1790 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1791 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1792 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
2068 splx(s);
2069 kt->pfrkt_tzero = tzero;
2070}
2071
1793 kt->pfrkt_tzero = tzero;
1794}
1795
2072struct pfr_ktable *
2073pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset,
2074 int intr)
1796static struct pfr_ktable *
1797pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
2075{
2076 struct pfr_ktable *kt;
2077 struct pf_ruleset *rs;
2078
1798{
1799 struct pfr_ktable *kt;
1800 struct pf_ruleset *rs;
1801
2079#ifdef __FreeBSD__
2080 kt = pool_get(&V_pfr_ktable_pl, PR_NOWAIT|PR_ZERO);
2081#else
2082 if (intr)
2083 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT|PR_ZERO|PR_LIMITFAIL);
2084 else
2085 kt = pool_get(&pfr_ktable_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
2086#endif
1802 PF_RULES_WASSERT();
1803
1804 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
2087 if (kt == NULL)
2088 return (NULL);
2089 kt->pfrkt_t = *tbl;
2090
2091 if (attachruleset) {
2092 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2093 if (!rs) {
2094 pfr_destroy_ktable(kt, 0);

--- 10 unchanged lines hidden (view full) ---

2105 pfr_destroy_ktable(kt, 0);
2106 return (NULL);
2107 }
2108 kt->pfrkt_tzero = tzero;
2109
2110 return (kt);
2111}
2112
1805 if (kt == NULL)
1806 return (NULL);
1807 kt->pfrkt_t = *tbl;
1808
1809 if (attachruleset) {
1810 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1811 if (!rs) {
1812 pfr_destroy_ktable(kt, 0);

--- 10 unchanged lines hidden (view full) ---

1823 pfr_destroy_ktable(kt, 0);
1824 return (NULL);
1825 }
1826 kt->pfrkt_tzero = tzero;
1827
1828 return (kt);
1829}
1830
2113void
1831static void
2114pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2115{
2116 struct pfr_ktable *p, *q;
2117
2118 for (p = SLIST_FIRST(workq); p; p = q) {
2119 q = SLIST_NEXT(p, pfrkt_workq);
2120 pfr_destroy_ktable(p, flushaddr);
2121 }
2122}
2123
1832pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1833{
1834 struct pfr_ktable *p, *q;
1835
1836 for (p = SLIST_FIRST(workq); p; p = q) {
1837 q = SLIST_NEXT(p, pfrkt_workq);
1838 pfr_destroy_ktable(p, flushaddr);
1839 }
1840}
1841
2124void
1842static void
2125pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2126{
2127 struct pfr_kentryworkq addrq;
2128
2129 if (flushaddr) {
2130 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2131 pfr_clean_node_mask(kt, &addrq);
2132 pfr_destroy_kentries(&addrq);
2133 }
1843pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1844{
1845 struct pfr_kentryworkq addrq;
1846
1847 if (flushaddr) {
1848 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1849 pfr_clean_node_mask(kt, &addrq);
1850 pfr_destroy_kentries(&addrq);
1851 }
2134#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
2135 if (kt->pfrkt_ip4 != NULL) {
2136 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
2137 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2138 }
2139 if (kt->pfrkt_ip6 != NULL) {
2140 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
2141 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2142 }
1852 if (kt->pfrkt_ip4 != NULL) {
1853 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
1854 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1855 }
1856 if (kt->pfrkt_ip6 != NULL) {
1857 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
1858 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1859 }
2143#else
2144 if (kt->pfrkt_ip4 != NULL)
2145 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2146 if (kt->pfrkt_ip6 != NULL)
2147 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2148#endif
2149 if (kt->pfrkt_shadow != NULL)
2150 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2151 if (kt->pfrkt_rs != NULL) {
2152 kt->pfrkt_rs->tables--;
2153 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2154 }
1860 if (kt->pfrkt_shadow != NULL)
1861 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1862 if (kt->pfrkt_rs != NULL) {
1863 kt->pfrkt_rs->tables--;
1864 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1865 }
2155#ifdef __FreeBSD__
2156 pool_put(&V_pfr_ktable_pl, kt);
2157#else
2158 pool_put(&pfr_ktable_pl, kt);
2159#endif
1866 free(kt, M_PFTABLE);
2160}
2161
1867}
1868
2162int
1869static int
2163pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2164{
2165 int d;
2166
2167 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2168 return (d);
2169 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2170}
2171
1870pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1871{
1872 int d;
1873
1874 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1875 return (d);
1876 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1877}
1878
2172struct pfr_ktable *
1879static struct pfr_ktable *
2173pfr_lookup_table(struct pfr_table *tbl)
2174{
2175 /* struct pfr_ktable start like a struct pfr_table */
2176 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2177 (struct pfr_ktable *)tbl));
2178}
2179
2180int
2181pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2182{
2183 struct pfr_kentry *ke = NULL;
2184 int match;
2185
1880pfr_lookup_table(struct pfr_table *tbl)
1881{
1882 /* struct pfr_ktable start like a struct pfr_table */
1883 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1884 (struct pfr_ktable *)tbl));
1885}
1886
1887int
1888pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1889{
1890 struct pfr_kentry *ke = NULL;
1891 int match;
1892
1893 PF_RULES_RASSERT();
1894
2186 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2187 kt = kt->pfrkt_root;
2188 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2189 return (0);
2190
2191 switch (af) {
2192#ifdef INET
2193 case AF_INET:
1895 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1896 kt = kt->pfrkt_root;
1897 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1898 return (0);
1899
1900 switch (af) {
1901#ifdef INET
1902 case AF_INET:
2194#ifdef __FreeBSD__
2195 V_pfr_sin.sin_addr.s_addr = a->addr32[0];
2196 ke = (struct pfr_kentry *)rn_match(&V_pfr_sin, kt->pfrkt_ip4);
2197#else
2198 pfr_sin.sin_addr.s_addr = a->addr32[0];
2199 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2200#endif
1903 {
1904 struct sockaddr_in sin;
1905
1906 bzero(&sin, sizeof(sin));
1907 sin.sin_len = sizeof(sin);
1908 sin.sin_family = AF_INET;
1909 sin.sin_addr.s_addr = a->addr32[0];
1910 ke = (struct pfr_kentry *)rn_match(&sin, kt->pfrkt_ip4);
2201 if (ke && KENTRY_RNF_ROOT(ke))
2202 ke = NULL;
2203 break;
1911 if (ke && KENTRY_RNF_ROOT(ke))
1912 ke = NULL;
1913 break;
1914 }
2204#endif /* INET */
2205#ifdef INET6
2206 case AF_INET6:
1915#endif /* INET */
1916#ifdef INET6
1917 case AF_INET6:
2207#ifdef __FreeBSD__
2208 bcopy(a, &V_pfr_sin6.sin6_addr, sizeof(V_pfr_sin6.sin6_addr));
2209 ke = (struct pfr_kentry *)rn_match(&V_pfr_sin6, kt->pfrkt_ip6);
2210#else
2211 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2212 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2213#endif
1918 {
1919 struct sockaddr_in6 sin6;
1920
1921 bzero(&sin6, sizeof(sin6));
1922 sin6.sin6_len = sizeof(sin6);
1923 sin6.sin6_family = AF_INET6;
1924 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1925 ke = (struct pfr_kentry *)rn_match(&sin6, kt->pfrkt_ip6);
2214 if (ke && KENTRY_RNF_ROOT(ke))
2215 ke = NULL;
2216 break;
1926 if (ke && KENTRY_RNF_ROOT(ke))
1927 ke = NULL;
1928 break;
1929 }
2217#endif /* INET6 */
2218 }
2219 match = (ke && !ke->pfrke_not);
2220 if (match)
2221 kt->pfrkt_match++;
2222 else
2223 kt->pfrkt_nomatch++;
2224 return (match);

--- 8 unchanged lines hidden (view full) ---

2233 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2234 kt = kt->pfrkt_root;
2235 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2236 return;
2237
2238 switch (af) {
2239#ifdef INET
2240 case AF_INET:
1930#endif /* INET6 */
1931 }
1932 match = (ke && !ke->pfrke_not);
1933 if (match)
1934 kt->pfrkt_match++;
1935 else
1936 kt->pfrkt_nomatch++;
1937 return (match);

--- 8 unchanged lines hidden (view full) ---

1946 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1947 kt = kt->pfrkt_root;
1948 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1949 return;
1950
1951 switch (af) {
1952#ifdef INET
1953 case AF_INET:
2241#ifdef __FreeBSD__
2242 V_pfr_sin.sin_addr.s_addr = a->addr32[0];
2243 ke = (struct pfr_kentry *)rn_match(&V_pfr_sin, kt->pfrkt_ip4);
2244#else
2245 pfr_sin.sin_addr.s_addr = a->addr32[0];
2246 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2247#endif
1954 {
1955 struct sockaddr_in sin;
1956
1957 sin.sin_len = sizeof(sin);
1958 sin.sin_family = AF_INET;
1959 sin.sin_addr.s_addr = a->addr32[0];
1960 ke = (struct pfr_kentry *)rn_match(&sin, kt->pfrkt_ip4);
2248 if (ke && KENTRY_RNF_ROOT(ke))
2249 ke = NULL;
2250 break;
1961 if (ke && KENTRY_RNF_ROOT(ke))
1962 ke = NULL;
1963 break;
1964 }
2251#endif /* INET */
2252#ifdef INET6
2253 case AF_INET6:
1965#endif /* INET */
1966#ifdef INET6
1967 case AF_INET6:
2254#ifdef __FreeBSD__
2255 bcopy(a, &V_pfr_sin6.sin6_addr, sizeof(V_pfr_sin6.sin6_addr));
2256 ke = (struct pfr_kentry *)rn_match(&V_pfr_sin6, kt->pfrkt_ip6);
2257#else
2258 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2259 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2260#endif
1968 {
1969 struct sockaddr_in6 sin6;
1970
1971 sin6.sin6_len = sizeof(sin6);
1972 sin6.sin6_family = AF_INET6;
1973 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1974 ke = (struct pfr_kentry *)rn_match(&sin6, kt->pfrkt_ip6);
2261 if (ke && KENTRY_RNF_ROOT(ke))
2262 ke = NULL;
2263 break;
1975 if (ke && KENTRY_RNF_ROOT(ke))
1976 ke = NULL;
1977 break;
1978 }
2264#endif /* INET6 */
2265 default:
2266 ;
2267 }
2268 if ((ke == NULL || ke->pfrke_not) != notrule) {
2269 if (op_pass != PFR_OP_PASS)
2270 printf("pfr_update_stats: assertion failed.\n");
2271 op_pass = PFR_OP_XPASS;
2272 }
2273 kt->pfrkt_packets[dir_out][op_pass]++;
2274 kt->pfrkt_bytes[dir_out][op_pass] += len;
2275 if (ke != NULL && op_pass != PFR_OP_XPASS &&
2276 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2277 if (ke->pfrke_counters == NULL)
1979#endif /* INET6 */
1980 default:
1981 ;
1982 }
1983 if ((ke == NULL || ke->pfrke_not) != notrule) {
1984 if (op_pass != PFR_OP_PASS)
1985 printf("pfr_update_stats: assertion failed.\n");
1986 op_pass = PFR_OP_XPASS;
1987 }
1988 kt->pfrkt_packets[dir_out][op_pass]++;
1989 kt->pfrkt_bytes[dir_out][op_pass] += len;
1990 if (ke != NULL && op_pass != PFR_OP_XPASS &&
1991 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1992 if (ke->pfrke_counters == NULL)
2278#ifdef __FreeBSD__
2279 ke->pfrke_counters = pool_get(&V_pfr_kcounters_pl,
2280#else
2281 ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2282#endif
2283 PR_NOWAIT | PR_ZERO);
1993 ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z,
1994 M_NOWAIT | M_ZERO);
2284 if (ke->pfrke_counters != NULL) {
2285 ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2286 ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2287 }
2288 }
2289}
2290
2291struct pfr_ktable *
1995 if (ke->pfrke_counters != NULL) {
1996 ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
1997 ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
1998 }
1999 }
2000}
2001
2002struct pfr_ktable *
2292pfr_attach_table(struct pf_ruleset *rs, char *name, int intr)
2003pfr_attach_table(struct pf_ruleset *rs, char *name)
2293{
2294 struct pfr_ktable *kt, *rt;
2295 struct pfr_table tbl;
2296 struct pf_anchor *ac = rs->anchor;
2297
2004{
2005 struct pfr_ktable *kt, *rt;
2006 struct pfr_table tbl;
2007 struct pf_anchor *ac = rs->anchor;
2008
2009 PF_RULES_WASSERT();
2010
2298 bzero(&tbl, sizeof(tbl));
2299 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2300 if (ac != NULL)
2301 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2302 kt = pfr_lookup_table(&tbl);
2303 if (kt == NULL) {
2011 bzero(&tbl, sizeof(tbl));
2012 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2013 if (ac != NULL)
2014 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2015 kt = pfr_lookup_table(&tbl);
2016 if (kt == NULL) {
2304 kt = pfr_create_ktable(&tbl, time_second, 1, intr);
2017 kt = pfr_create_ktable(&tbl, time_second, 1);
2305 if (kt == NULL)
2306 return (NULL);
2307 if (ac != NULL) {
2308 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2309 rt = pfr_lookup_table(&tbl);
2310 if (rt == NULL) {
2018 if (kt == NULL)
2019 return (NULL);
2020 if (ac != NULL) {
2021 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2022 rt = pfr_lookup_table(&tbl);
2023 if (rt == NULL) {
2311 rt = pfr_create_ktable(&tbl, 0, 1, intr);
2024 rt = pfr_create_ktable(&tbl, 0, 1);
2312 if (rt == NULL) {
2313 pfr_destroy_ktable(kt, 0);
2314 return (NULL);
2315 }
2316 pfr_insert_ktable(rt);
2317 }
2318 kt->pfrkt_root = rt;
2319 }
2320 pfr_insert_ktable(kt);
2321 }
2322 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2323 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2324 return (kt);
2325}
2326
2327void
2328pfr_detach_table(struct pfr_ktable *kt)
2329{
2025 if (rt == NULL) {
2026 pfr_destroy_ktable(kt, 0);
2027 return (NULL);
2028 }
2029 pfr_insert_ktable(rt);
2030 }
2031 kt->pfrkt_root = rt;
2032 }
2033 pfr_insert_ktable(kt);
2034 }
2035 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2036 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2037 return (kt);
2038}
2039
2040void
2041pfr_detach_table(struct pfr_ktable *kt)
2042{
2330 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2331 printf("pfr_detach_table: refcount = %d.\n",
2332 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2333 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2043
2044 PF_RULES_WASSERT();
2045 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2046 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2047
2048 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2334 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2335}
2336
2337int
2338pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2049 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2050}
2051
2052int
2053pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2339 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2054 sa_family_t af)
2340{
2055{
2341#ifdef __FreeBSD__
2056 struct pf_addr *addr, *cur, *mask;
2057 union sockaddr_union uaddr, umask;
2342 struct pfr_kentry *ke, *ke2 = NULL;
2058 struct pfr_kentry *ke, *ke2 = NULL;
2343 struct pf_addr *addr = NULL;
2344#else
2345 struct pfr_kentry *ke, *ke2;
2346 struct pf_addr *addr;
2347#endif
2348 union sockaddr_union mask;
2349 int idx = -1, use_counter = 0;
2350
2059 int idx = -1, use_counter = 0;
2060
2351#ifdef __FreeBSD__
2352 if (af == AF_INET)
2353 addr = (struct pf_addr *)&V_pfr_sin.sin_addr;
2354 else if (af == AF_INET6)
2355 addr = (struct pf_addr *)&V_pfr_sin6.sin6_addr;
2356#else
2357 if (af == AF_INET)
2358 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2359 else if (af == AF_INET6)
2360 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2361#endif
2061 switch (af) {
2062 case AF_INET:
2063 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2064 uaddr.sin.sin_family = AF_INET;
2065 break;
2066 case AF_INET6:
2067 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2068 uaddr.sin6.sin6_family = AF_INET6;
2069 break;
2070 }
2071 addr = SUNION2PF(&uaddr, af);
2072
2362 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2363 kt = kt->pfrkt_root;
2364 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2365 return (-1);
2366
2367 if (pidx != NULL)
2368 idx = *pidx;
2369 if (counter != NULL && idx >= 0)
2370 use_counter = 1;
2371 if (idx < 0)
2372 idx = 0;
2373
2374_next_block:
2375 ke = pfr_kentry_byidx(kt, idx, af);
2376 if (ke == NULL) {
2377 kt->pfrkt_nomatch++;
2378 return (1);
2379 }
2073 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2074 kt = kt->pfrkt_root;
2075 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2076 return (-1);
2077
2078 if (pidx != NULL)
2079 idx = *pidx;
2080 if (counter != NULL && idx >= 0)
2081 use_counter = 1;
2082 if (idx < 0)
2083 idx = 0;
2084
2085_next_block:
2086 ke = pfr_kentry_byidx(kt, idx, af);
2087 if (ke == NULL) {
2088 kt->pfrkt_nomatch++;
2089 return (1);
2090 }
2380#ifdef __FreeBSD__
2381 pfr_prepare_network(&V_pfr_mask, af, ke->pfrke_net);
2382#else
2383 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2384#endif
2385 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2386#ifdef __FreeBSD__
2387 *rmask = SUNION2PF(&V_pfr_mask, af);
2388#else
2389 *rmask = SUNION2PF(&pfr_mask, af);
2390#endif
2091 pfr_prepare_network(&umask, af, ke->pfrke_net);
2092 cur = SUNION2PF(&ke->pfrke_sa, af);
2093 mask = SUNION2PF(&umask, af);
2391
2392 if (use_counter) {
2393 /* is supplied address within block? */
2094
2095 if (use_counter) {
2096 /* is supplied address within block? */
2394 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2097 if (!PF_MATCHA(0, cur, mask, counter, af)) {
2395 /* no, go to next block in table */
2396 idx++;
2397 use_counter = 0;
2398 goto _next_block;
2399 }
2400 PF_ACPY(addr, counter, af);
2401 } else {
2402 /* use first address of block */
2098 /* no, go to next block in table */
2099 idx++;
2100 use_counter = 0;
2101 goto _next_block;
2102 }
2103 PF_ACPY(addr, counter, af);
2104 } else {
2105 /* use first address of block */
2403 PF_ACPY(addr, *raddr, af);
2106 PF_ACPY(addr, cur, af);
2404 }
2405
2406 if (!KENTRY_NETWORK(ke)) {
2407 /* this is a single IP address - no possible nested block */
2408 PF_ACPY(counter, addr, af);
2409 *pidx = idx;
2410 kt->pfrkt_match++;
2411 return (0);
2412 }
2413 for (;;) {
2414 /* we don't want to use a nested block */
2107 }
2108
2109 if (!KENTRY_NETWORK(ke)) {
2110 /* this is a single IP address - no possible nested block */
2111 PF_ACPY(counter, addr, af);
2112 *pidx = idx;
2113 kt->pfrkt_match++;
2114 return (0);
2115 }
2116 for (;;) {
2117 /* we don't want to use a nested block */
2415#ifdef __FreeBSD__
2416 if (af == AF_INET)
2417 ke2 = (struct pfr_kentry *)rn_match(&V_pfr_sin,
2118 switch (af) {
2119 case AF_INET:
2120 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2418 kt->pfrkt_ip4);
2121 kt->pfrkt_ip4);
2419 else if (af == AF_INET6)
2420 ke2 = (struct pfr_kentry *)rn_match(&V_pfr_sin6,
2122 break;
2123 case AF_INET6:
2124 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2421 kt->pfrkt_ip6);
2125 kt->pfrkt_ip6);
2422#else
2423 if (af == AF_INET)
2424 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2425 kt->pfrkt_ip4);
2426 else if (af == AF_INET6)
2427 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2428 kt->pfrkt_ip6);
2429#endif
2126 break;
2127 }
2430 /* no need to check KENTRY_RNF_ROOT() here */
2431 if (ke2 == ke) {
2432 /* lookup return the same block - perfect */
2433 PF_ACPY(counter, addr, af);
2434 *pidx = idx;
2435 kt->pfrkt_match++;
2436 return (0);
2437 }
2438
2439 /* we need to increase the counter past the nested block */
2128 /* no need to check KENTRY_RNF_ROOT() here */
2129 if (ke2 == ke) {
2130 /* lookup return the same block - perfect */
2131 PF_ACPY(counter, addr, af);
2132 *pidx = idx;
2133 kt->pfrkt_match++;
2134 return (0);
2135 }
2136
2137 /* we need to increase the counter past the nested block */
2440 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2441#ifdef __FreeBSD__
2442 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &V_pfr_ffaddr, af);
2443#else
2444 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2445#endif
2138 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2139 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2446 PF_AINC(addr, af);
2140 PF_AINC(addr, af);
2447 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2141 if (!PF_MATCHA(0, cur, mask, addr, af)) {
2448 /* ok, we reached the end of our main block */
2449 /* go to next block in table */
2450 idx++;
2451 use_counter = 0;
2452 goto _next_block;
2453 }
2454 }
2455}
2456
2142 /* ok, we reached the end of our main block */
2143 /* go to next block in table */
2144 idx++;
2145 use_counter = 0;
2146 goto _next_block;
2147 }
2148 }
2149}
2150
2457struct pfr_kentry *
2151static struct pfr_kentry *
2458pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2459{
2460 struct pfr_walktree w;
2461
2462 bzero(&w, sizeof(w));
2463 w.pfrw_op = PFRW_POOL_GET;
2464 w.pfrw_cnt = idx;
2465
2466 switch (af) {
2467#ifdef INET
2468 case AF_INET:
2152pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2153{
2154 struct pfr_walktree w;
2155
2156 bzero(&w, sizeof(w));
2157 w.pfrw_op = PFRW_POOL_GET;
2158 w.pfrw_cnt = idx;
2159
2160 switch (af) {
2161#ifdef INET
2162 case AF_INET:
2469#ifdef __FreeBSD__
2470 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2163 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2471#else
2472 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2473#endif
2474 return (w.pfrw_kentry);
2475#endif /* INET */
2476#ifdef INET6
2477 case AF_INET6:
2164 return (w.pfrw_kentry);
2165#endif /* INET */
2166#ifdef INET6
2167 case AF_INET6:
2478#ifdef __FreeBSD__
2479 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2168 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2480#else
2481 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2482#endif
2483 return (w.pfrw_kentry);
2484#endif /* INET6 */
2485 default:
2486 return (NULL);
2487 }
2488}
2489
2490void
2491pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2492{
2493 struct pfr_walktree w;
2169 return (w.pfrw_kentry);
2170#endif /* INET6 */
2171 default:
2172 return (NULL);
2173 }
2174}
2175
2176void
2177pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2178{
2179 struct pfr_walktree w;
2494 int s;
2495
2496 bzero(&w, sizeof(w));
2497 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2498 w.pfrw_dyn = dyn;
2499
2180
2181 bzero(&w, sizeof(w));
2182 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2183 w.pfrw_dyn = dyn;
2184
2500 s = splsoftnet();
2501 dyn->pfid_acnt4 = 0;
2502 dyn->pfid_acnt6 = 0;
2503 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2185 dyn->pfid_acnt4 = 0;
2186 dyn->pfid_acnt6 = 0;
2187 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2504#ifdef __FreeBSD__
2505 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2188 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2506#else
2507 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2508#endif
2509 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2189 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2510#ifdef __FreeBSD__
2511 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2190 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2512#else
2513 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2514#endif
2515 splx(s);
2516}
2191}