1/* $FreeBSD: head/sys/contrib/pf/net/pf_table.c 127145 2004-03-17 21:11:02Z mlaier $ */ 2/* $OpenBSD: pf_table.c,v 1.41 2003/08/22 15:19:23 henning Exp $ */
| 1/* $FreeBSD: head/sys/contrib/pf/net/pf_table.c 130613 2004-06-16 23:24:02Z mlaier $ */ 2/* $OpenBSD: pf_table.c,v 1.47 2004/03/09 21:44:41 mcbride Exp $ */
|
3 4/* 5 * Copyright (c) 2002 Cedric Berger 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34#ifdef __FreeBSD__ 35#include "opt_inet.h" 36#include "opt_inet6.h" 37#endif 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/socket.h> 42#include <sys/mbuf.h> 43#include <sys/kernel.h> 44#ifdef __FreeBSD__ 45#include <sys/malloc.h> 46#endif 47 48#include <net/if.h> 49#include <net/route.h> 50#include <netinet/in.h> 51#ifndef __FreeBSD__ 52#include <netinet/ip_ipsp.h> 53#endif 54 55#include <net/pfvar.h> 56 57#define ACCEPT_FLAGS(oklist) \ 58 do { \ 59 if ((flags & ~(oklist)) & \ 60 PFR_FLAG_ALLMASK) \ 61 return (EINVAL); \ 62 } while (0) 63
| 3 4/* 5 * Copyright (c) 2002 Cedric Berger 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34#ifdef __FreeBSD__ 35#include "opt_inet.h" 36#include "opt_inet6.h" 37#endif 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/socket.h> 42#include <sys/mbuf.h> 43#include <sys/kernel.h> 44#ifdef __FreeBSD__ 45#include <sys/malloc.h> 46#endif 47 48#include <net/if.h> 49#include <net/route.h> 50#include <netinet/in.h> 51#ifndef __FreeBSD__ 52#include <netinet/ip_ipsp.h> 53#endif 54 55#include <net/pfvar.h> 56 57#define ACCEPT_FLAGS(oklist) \ 58 do { \ 59 if ((flags & ~(oklist)) & \ 60 PFR_FLAG_ALLMASK) \ 61 return (EINVAL); \ 62 } while (0) 63
|
| 64#ifdef __FreeBSD__ 65static inline int 66_copyin(const void *uaddr, void *kaddr, size_t len) 67{ 68 int r; 69 70 PF_UNLOCK(); 71 r = copyin(uaddr, kaddr, len); 72 PF_LOCK(); 73 74 return (r); 75} 76 77static inline int 78_copyout(const void *uaddr, void *kaddr, size_t len) 79{ 80 int r; 81 82 PF_UNLOCK(); 83 r = copyout(uaddr, kaddr, len); 84 PF_LOCK(); 85 86 return (r); 87} 88 89#define COPYIN(from, to, size) \ 90 ((flags & PFR_FLAG_USERIOCTL) ? \ 91 _copyin((from), (to), (size)) : \ 92 (bcopy((from), (to), (size)), 0)) 93 94#define COPYOUT(from, to, size) \ 95 ((flags & PFR_FLAG_USERIOCTL) ? \ 96 _copyout((from), (to), (size)) : \ 97 (bcopy((from), (to), (size)), 0)) 98 99#else 100 101#define COPYIN(from, to, size) \ 102 ((flags & PFR_FLAG_USERIOCTL) ? \ 103 copyin((from), (to), (size)) : \ 104 (bcopy((from), (to), (size)), 0)) 105 106#define COPYOUT(from, to, size) \ 107 ((flags & PFR_FLAG_USERIOCTL) ? \ 108 copyout((from), (to), (size)) : \ 109 (bcopy((from), (to), (size)), 0)) 110 111#endif 112
|
64#define FILLIN_SIN(sin, addr) \ 65 do { \ 66 (sin).sin_len = sizeof(sin); \ 67 (sin).sin_family = AF_INET; \ 68 (sin).sin_addr = (addr); \ 69 } while (0) 70 71#define FILLIN_SIN6(sin6, addr) \ 72 do { \ 73 (sin6).sin6_len = sizeof(sin6); \ 74 (sin6).sin6_family = AF_INET6; \ 75 (sin6).sin6_addr = (addr); \ 76 } while (0) 77 78#define SWAP(type, a1, a2) \ 79 do { \ 80 type tmp = a1; \ 81 a1 = a2; \ 82 a2 = tmp; \ 83 } while (0) 84 85#define SUNION2PF(su, af) (((af)==AF_INET) ? \
| 113#define FILLIN_SIN(sin, addr) \ 114 do { \ 115 (sin).sin_len = sizeof(sin); \ 116 (sin).sin_family = AF_INET; \ 117 (sin).sin_addr = (addr); \ 118 } while (0) 119 120#define FILLIN_SIN6(sin6, addr) \ 121 do { \ 122 (sin6).sin6_len = sizeof(sin6); \ 123 (sin6).sin6_family = AF_INET6; \ 124 (sin6).sin6_addr = (addr); \ 125 } while (0) 126 127#define SWAP(type, a1, a2) \ 128 do { \ 129 type tmp = a1; \ 130 a1 = a2; \ 131 a2 = tmp; \ 132 } while (0) 133 134#define SUNION2PF(su, af) (((af)==AF_INET) ? \
|
86 (struct pf_addr *)&(su)->sin.sin_addr : \ 87 (struct pf_addr *)&(su)->sin6.sin6_addr)
| 135 (struct pf_addr *)&(su)->sin.sin_addr : \ 136 (struct pf_addr *)&(su)->sin6.sin6_addr)
|
88 89#define AF_BITS(af) (((af)==AF_INET)?32:128) 90#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 91#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 92#define KENTRY_RNF_ROOT(ke) \ 93 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 94 95#define NO_ADDRESSES (-1) 96#define ENQUEUE_UNMARKED_ONLY (1) 97#define INVERT_NEG_FLAG (1) 98 99struct pfr_walktree { 100 enum pfrw_op { 101 PFRW_MARK, 102 PFRW_SWEEP, 103 PFRW_ENQUEUE, 104 PFRW_GET_ADDRS, 105 PFRW_GET_ASTATS,
| 137 138#define AF_BITS(af) (((af)==AF_INET)?32:128) 139#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 140#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 141#define KENTRY_RNF_ROOT(ke) \ 142 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 143 144#define NO_ADDRESSES (-1) 145#define ENQUEUE_UNMARKED_ONLY (1) 146#define INVERT_NEG_FLAG (1) 147 148struct pfr_walktree { 149 enum pfrw_op { 150 PFRW_MARK, 151 PFRW_SWEEP, 152 PFRW_ENQUEUE, 153 PFRW_GET_ADDRS, 154 PFRW_GET_ASTATS,
|
106 PFRW_POOL_GET
| 155 PFRW_POOL_GET, 156 PFRW_DYNADDR_UPDATE
|
107 } pfrw_op; 108 union { 109 struct pfr_addr *pfrw1_addr; 110 struct pfr_astats *pfrw1_astats; 111 struct pfr_kentryworkq *pfrw1_workq; 112 struct pfr_kentry *pfrw1_kentry;
| 157 } pfrw_op; 158 union { 159 struct pfr_addr *pfrw1_addr; 160 struct pfr_astats *pfrw1_astats; 161 struct pfr_kentryworkq *pfrw1_workq; 162 struct pfr_kentry *pfrw1_kentry;
|
| 163 struct pfi_dynaddr *pfrw1_dyn;
|
113 } pfrw_1; 114 int pfrw_free;
| 164 } pfrw_1; 165 int pfrw_free;
|
| 166 int pfrw_flags;
|
115}; 116#define pfrw_addr pfrw_1.pfrw1_addr 117#define pfrw_astats pfrw_1.pfrw1_astats 118#define pfrw_workq pfrw_1.pfrw1_workq 119#define pfrw_kentry pfrw_1.pfrw1_kentry
| 167}; 168#define pfrw_addr pfrw_1.pfrw1_addr 169#define pfrw_astats pfrw_1.pfrw1_astats 170#define pfrw_workq pfrw_1.pfrw1_workq 171#define pfrw_kentry pfrw_1.pfrw1_kentry
|
| 172#define pfrw_dyn pfrw_1.pfrw1_dyn
|
120#define pfrw_cnt pfrw_free 121 122#define senderr(e) do { rv = (e); goto _bad; } while (0) 123 124#ifdef __FreeBSD__ 125uma_zone_t pfr_ktable_pl; 126uma_zone_t pfr_kentry_pl; 127#else 128struct pool pfr_ktable_pl; 129struct pool pfr_kentry_pl; 130#endif 131struct sockaddr_in pfr_sin; 132struct sockaddr_in6 pfr_sin6;
| 173#define pfrw_cnt pfrw_free 174 175#define senderr(e) do { rv = (e); goto _bad; } while (0) 176 177#ifdef __FreeBSD__ 178uma_zone_t pfr_ktable_pl; 179uma_zone_t pfr_kentry_pl; 180#else 181struct pool pfr_ktable_pl; 182struct pool pfr_kentry_pl; 183#endif 184struct sockaddr_in pfr_sin; 185struct sockaddr_in6 pfr_sin6;
|
133union sockaddr_union pfr_mask;
| 186union sockaddr_union pfr_mask;
|
134struct pf_addr pfr_ffaddr; 135 136void pfr_copyout_addr(struct pfr_addr *, 137 struct pfr_kentry *ke); 138int pfr_validate_addr(struct pfr_addr *); 139void pfr_enqueue_addrs(struct pfr_ktable *, 140 struct pfr_kentryworkq *, int *, int); 141void pfr_mark_addrs(struct pfr_ktable *); 142struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, 143 struct pfr_addr *, int); 144struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); 145void pfr_destroy_kentries(struct pfr_kentryworkq *); 146void pfr_destroy_kentry(struct pfr_kentry *); 147void pfr_insert_kentries(struct pfr_ktable *, 148 struct pfr_kentryworkq *, long); 149void pfr_remove_kentries(struct pfr_ktable *, 150 struct pfr_kentryworkq *); 151void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 152 int);
| 187struct pf_addr pfr_ffaddr; 188 189void pfr_copyout_addr(struct pfr_addr *, 190 struct pfr_kentry *ke); 191int pfr_validate_addr(struct pfr_addr *); 192void pfr_enqueue_addrs(struct pfr_ktable *, 193 struct pfr_kentryworkq *, int *, int); 194void pfr_mark_addrs(struct pfr_ktable *); 195struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, 196 struct pfr_addr *, int); 197struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); 198void pfr_destroy_kentries(struct pfr_kentryworkq *); 199void pfr_destroy_kentry(struct pfr_kentry *); 200void pfr_insert_kentries(struct pfr_ktable *, 201 struct pfr_kentryworkq *, long); 202void pfr_remove_kentries(struct pfr_ktable *, 203 struct pfr_kentryworkq *); 204void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 205 int);
|
153void pfr_reset_feedback(struct pfr_addr *, int);
| 206void pfr_reset_feedback(struct pfr_addr *, int, int);
|
154void pfr_prepare_network(union sockaddr_union *, int, int); 155int pfr_route_kentry(struct pfr_ktable *, 156 struct pfr_kentry *); 157int pfr_unroute_kentry(struct pfr_ktable *, 158 struct pfr_kentry *); 159int pfr_walktree(struct radix_node *, void *);
| 207void pfr_prepare_network(union sockaddr_union *, int, int); 208int pfr_route_kentry(struct pfr_ktable *, 209 struct pfr_kentry *); 210int pfr_unroute_kentry(struct pfr_ktable *, 211 struct pfr_kentry *); 212int pfr_walktree(struct radix_node *, void *);
|
160int pfr_validate_table(struct pfr_table *, int);
| 213int pfr_validate_table(struct pfr_table *, int, int);
|
161void pfr_commit_ktable(struct pfr_ktable *, long); 162void pfr_insert_ktables(struct pfr_ktableworkq *); 163void pfr_insert_ktable(struct pfr_ktable *); 164void pfr_setflags_ktables(struct pfr_ktableworkq *); 165void pfr_setflags_ktable(struct pfr_ktable *, int); 166void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 167 int); 168void pfr_clstats_ktable(struct pfr_ktable *, long, int); 169struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int); 170void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 171void pfr_destroy_ktable(struct pfr_ktable *, int); 172int pfr_ktable_compare(struct pfr_ktable *, 173 struct pfr_ktable *); 174struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
| 214void pfr_commit_ktable(struct pfr_ktable *, long); 215void pfr_insert_ktables(struct pfr_ktableworkq *); 216void pfr_insert_ktable(struct pfr_ktable *); 217void pfr_setflags_ktables(struct pfr_ktableworkq *); 218void pfr_setflags_ktable(struct pfr_ktable *, int); 219void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 220 int); 221void pfr_clstats_ktable(struct pfr_ktable *, long, int); 222struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int); 223void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 224void pfr_destroy_ktable(struct pfr_ktable *, int); 225int pfr_ktable_compare(struct pfr_ktable *, 226 struct pfr_ktable *); 227struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
|
175void pfr_clean_node_mask(struct pfr_ktable *,
| 228void pfr_clean_node_mask(struct pfr_ktable *,
|
176 struct pfr_kentryworkq *); 177int pfr_table_count(struct pfr_table *, int); 178int pfr_skip_table(struct pfr_table *, 179 struct pfr_ktable *, int);
| 229 struct pfr_kentryworkq *); 230int pfr_table_count(struct pfr_table *, int); 231int pfr_skip_table(struct pfr_table *, 232 struct pfr_ktable *, int);
|
180struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
| 233struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
|
181 182RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 183RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 184 185struct pfr_ktablehead pfr_ktables; 186struct pfr_table pfr_nulltable; 187int pfr_ktable_cnt; 188 189void 190pfr_initialize(void) 191{ 192#ifndef __FreeBSD__ 193 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, 194 "pfrktable", NULL); 195 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, 196 "pfrkentry", NULL); 197#endif 198 199 pfr_sin.sin_len = sizeof(pfr_sin); 200 pfr_sin.sin_family = AF_INET; 201 pfr_sin6.sin6_len = sizeof(pfr_sin6); 202 pfr_sin6.sin6_family = AF_INET6; 203 204 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr)); 205} 206 207int 208pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 209{ 210 struct pfr_ktable *kt; 211 struct pfr_kentryworkq workq; 212 int s; 213 214 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
| 234 235RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 236RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 237 238struct pfr_ktablehead pfr_ktables; 239struct pfr_table pfr_nulltable; 240int pfr_ktable_cnt; 241 242void 243pfr_initialize(void) 244{ 245#ifndef __FreeBSD__ 246 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, 247 "pfrktable", NULL); 248 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, 249 "pfrkentry", NULL); 250#endif 251 252 pfr_sin.sin_len = sizeof(pfr_sin); 253 pfr_sin.sin_family = AF_INET; 254 pfr_sin6.sin6_len = sizeof(pfr_sin6); 255 pfr_sin6.sin6_family = AF_INET6; 256 257 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr)); 258} 259 260int 261pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 262{ 263 struct pfr_ktable *kt; 264 struct pfr_kentryworkq workq; 265 int s; 266 267 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
|
215 if (pfr_validate_table(tbl, 0))
| 268 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
|
216 return (EINVAL); 217 kt = pfr_lookup_table(tbl); 218 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 219 return (ESRCH); 220 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 221 return (EPERM); 222 pfr_enqueue_addrs(kt, &workq, ndel, 0); 223 224 if (!(flags & PFR_FLAG_DUMMY)) { 225 if (flags & PFR_FLAG_ATOMIC) 226 s = splsoftnet(); 227 pfr_remove_kentries(kt, &workq); 228 if (flags & PFR_FLAG_ATOMIC) 229 splx(s); 230 if (kt->pfrkt_cnt) { 231 printf("pfr_clr_addrs: corruption detected (%d).\n", 232 kt->pfrkt_cnt); 233 kt->pfrkt_cnt = 0; 234 } 235 } 236 return (0); 237} 238 239int 240pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 241 int *nadd, int flags) 242{ 243 struct pfr_ktable *kt, *tmpkt; 244 struct pfr_kentryworkq workq; 245 struct pfr_kentry *p, *q; 246 struct pfr_addr ad; 247 int i, rv, s, xadd = 0; 248#ifdef __FreeBSD__
| 269 return (EINVAL); 270 kt = pfr_lookup_table(tbl); 271 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 272 return (ESRCH); 273 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 274 return (EPERM); 275 pfr_enqueue_addrs(kt, &workq, ndel, 0); 276 277 if (!(flags & PFR_FLAG_DUMMY)) { 278 if (flags & PFR_FLAG_ATOMIC) 279 s = splsoftnet(); 280 pfr_remove_kentries(kt, &workq); 281 if (flags & PFR_FLAG_ATOMIC) 282 splx(s); 283 if (kt->pfrkt_cnt) { 284 printf("pfr_clr_addrs: corruption detected (%d).\n", 285 kt->pfrkt_cnt); 286 kt->pfrkt_cnt = 0; 287 } 288 } 289 return (0); 290} 291 292int 293pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 294 int *nadd, int flags) 295{ 296 struct pfr_ktable *kt, *tmpkt; 297 struct pfr_kentryworkq workq; 298 struct pfr_kentry *p, *q; 299 struct pfr_addr ad; 300 int i, rv, s, xadd = 0; 301#ifdef __FreeBSD__
|
249 int ec;
| |
250 /* 251 * XXX Is it OK under LP64 environments? 252 */ 253 long tzero = (long)time_second; 254#else 255 long tzero = time.tv_sec; 256#endif 257 258 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
| 302 /* 303 * XXX Is it OK under LP64 environments? 304 */ 305 long tzero = (long)time_second; 306#else 307 long tzero = time.tv_sec; 308#endif 309 310 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
|
259 if (pfr_validate_table(tbl, 0))
| 311 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
|
260 return (EINVAL); 261 kt = pfr_lookup_table(tbl); 262 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 263 return (ESRCH); 264 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 265 return (EPERM); 266 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 267 if (tmpkt == NULL) 268 return (ENOMEM); 269 SLIST_INIT(&workq); 270 for (i = 0; i < size; i++) {
| 312 return (EINVAL); 313 kt = pfr_lookup_table(tbl); 314 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 315 return (ESRCH); 316 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 317 return (EPERM); 318 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 319 if (tmpkt == NULL) 320 return (ENOMEM); 321 SLIST_INIT(&workq); 322 for (i = 0; i < size; i++) {
|
271#ifdef __FreeBSD__ 272 PF_COPYIN(addr+i, &ad, sizeof(ad), ec); 273 if (ec)
| 323 if (COPYIN(addr+i, &ad, sizeof(ad)))
|
274 senderr(EFAULT);
| 324 senderr(EFAULT);
|
275#else 276 if (copyin(addr+i, &ad, sizeof(ad))) 277 senderr(EFAULT); 278#endif
| |
279 if (pfr_validate_addr(&ad)) 280 senderr(EINVAL); 281 p = pfr_lookup_addr(kt, &ad, 1); 282 q = pfr_lookup_addr(tmpkt, &ad, 1); 283 if (flags & PFR_FLAG_FEEDBACK) { 284 if (q != NULL) 285 ad.pfra_fback = PFR_FB_DUPLICATE; 286 else if (p == NULL) 287 ad.pfra_fback = PFR_FB_ADDED; 288 else if (p->pfrke_not != ad.pfra_not) 289 ad.pfra_fback = PFR_FB_CONFLICT; 290 else 291 ad.pfra_fback = PFR_FB_NONE; 292 } 293 if (p == NULL && q == NULL) { 294 p = pfr_create_kentry(&ad); 295 if (p == NULL) 296 senderr(ENOMEM); 297 if (pfr_route_kentry(tmpkt, p)) { 298 pfr_destroy_kentry(p); 299 ad.pfra_fback = PFR_FB_NONE; 300 } else { 301 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 302 xadd++; 303 } 304 }
| 325 if (pfr_validate_addr(&ad)) 326 senderr(EINVAL); 327 p = pfr_lookup_addr(kt, &ad, 1); 328 q = pfr_lookup_addr(tmpkt, &ad, 1); 329 if (flags & PFR_FLAG_FEEDBACK) { 330 if (q != NULL) 331 ad.pfra_fback = PFR_FB_DUPLICATE; 332 else if (p == NULL) 333 ad.pfra_fback = PFR_FB_ADDED; 334 else if (p->pfrke_not != ad.pfra_not) 335 ad.pfra_fback = PFR_FB_CONFLICT; 336 else 337 ad.pfra_fback = PFR_FB_NONE; 338 } 339 if (p == NULL && q == NULL) { 340 p = pfr_create_kentry(&ad); 341 if (p == NULL) 342 senderr(ENOMEM); 343 if (pfr_route_kentry(tmpkt, p)) { 344 pfr_destroy_kentry(p); 345 ad.pfra_fback = PFR_FB_NONE; 346 } else { 347 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 348 xadd++; 349 } 350 }
|
305#ifdef __FreeBSD__
| |
306 if (flags & PFR_FLAG_FEEDBACK) {
| 351 if (flags & PFR_FLAG_FEEDBACK) {
|
307 PF_COPYOUT(&ad, addr+i, sizeof(ad), ec); 308 if (ec)
| 352 if (COPYOUT(&ad, addr+i, sizeof(ad)))
|
309 senderr(EFAULT); 310 }
| 353 senderr(EFAULT); 354 }
|
311#else 312 if (flags & PFR_FLAG_FEEDBACK) 313 if (copyout(&ad, addr+i, sizeof(ad))) 314 senderr(EFAULT); 315#endif
| |
316 } 317 pfr_clean_node_mask(tmpkt, &workq); 318 if (!(flags & PFR_FLAG_DUMMY)) { 319 if (flags & PFR_FLAG_ATOMIC) 320 s = splsoftnet(); 321 pfr_insert_kentries(kt, &workq, tzero); 322 if (flags & PFR_FLAG_ATOMIC) 323 splx(s); 324 } else 325 pfr_destroy_kentries(&workq); 326 if (nadd != NULL) 327 *nadd = xadd; 328 pfr_destroy_ktable(tmpkt, 0); 329 return (0); 330_bad: 331 pfr_clean_node_mask(tmpkt, &workq); 332 pfr_destroy_kentries(&workq); 333 if (flags & PFR_FLAG_FEEDBACK)
| 355 } 356 pfr_clean_node_mask(tmpkt, &workq); 357 if (!(flags & PFR_FLAG_DUMMY)) { 358 if (flags & PFR_FLAG_ATOMIC) 359 s = splsoftnet(); 360 pfr_insert_kentries(kt, &workq, tzero); 361 if (flags & PFR_FLAG_ATOMIC) 362 splx(s); 363 } else 364 pfr_destroy_kentries(&workq); 365 if (nadd != NULL) 366 *nadd = xadd; 367 pfr_destroy_ktable(tmpkt, 0); 368 return (0); 369_bad: 370 pfr_clean_node_mask(tmpkt, &workq); 371 pfr_destroy_kentries(&workq); 372 if (flags & PFR_FLAG_FEEDBACK)
|
334 pfr_reset_feedback(addr, size);
| 373 pfr_reset_feedback(addr, size, flags);
|
335 pfr_destroy_ktable(tmpkt, 0); 336 return (rv); 337} 338 339int 340pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 341 int *ndel, int flags) 342{ 343 struct pfr_ktable *kt; 344 struct pfr_kentryworkq workq; 345 struct pfr_kentry *p; 346 struct pfr_addr ad; 347 int i, rv, s, xdel = 0;
| 374 pfr_destroy_ktable(tmpkt, 0); 375 return (rv); 376} 377 378int 379pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 380 int *ndel, int flags) 381{ 382 struct pfr_ktable *kt; 383 struct pfr_kentryworkq workq; 384 struct pfr_kentry *p; 385 struct pfr_addr ad; 386 int i, rv, s, xdel = 0;
|
348#ifdef __FreeBSD__ 349 int ec; 350#endif
| |
351 352 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
| 387 388 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
|
353 if (pfr_validate_table(tbl, 0))
| 389 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
|
354 return (EINVAL); 355 kt = pfr_lookup_table(tbl); 356 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 357 return (ESRCH); 358 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 359 return (EPERM); 360 pfr_mark_addrs(kt); 361 SLIST_INIT(&workq); 362 for (i = 0; i < size; i++) {
| 390 return (EINVAL); 391 kt = pfr_lookup_table(tbl); 392 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 393 return (ESRCH); 394 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 395 return (EPERM); 396 pfr_mark_addrs(kt); 397 SLIST_INIT(&workq); 398 for (i = 0; i < size; i++) {
|
363#ifdef __FreeBSD__ 364 PF_COPYIN(addr+i, &ad, sizeof(ad), ec); 365 if (ec)
| 399 if (COPYIN(addr+i, &ad, sizeof(ad)))
|
366 senderr(EFAULT);
| 400 senderr(EFAULT);
|
367#else 368 if (copyin(addr+i, &ad, sizeof(ad))) 369 senderr(EFAULT); 370#endif
| |
371 if (pfr_validate_addr(&ad)) 372 senderr(EINVAL); 373 p = pfr_lookup_addr(kt, &ad, 1); 374 if (flags & PFR_FLAG_FEEDBACK) { 375 if (p == NULL) 376 ad.pfra_fback = PFR_FB_NONE; 377 else if (p->pfrke_not != ad.pfra_not) 378 ad.pfra_fback = PFR_FB_CONFLICT; 379 else if (p->pfrke_mark) 380 ad.pfra_fback = PFR_FB_DUPLICATE; 381 else 382 ad.pfra_fback = PFR_FB_DELETED; 383 } 384 if (p != NULL && p->pfrke_not == ad.pfra_not && 385 !p->pfrke_mark) { 386 p->pfrke_mark = 1; 387 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 388 xdel++; 389 }
| 401 if (pfr_validate_addr(&ad)) 402 senderr(EINVAL); 403 p = pfr_lookup_addr(kt, &ad, 1); 404 if (flags & PFR_FLAG_FEEDBACK) { 405 if (p == NULL) 406 ad.pfra_fback = PFR_FB_NONE; 407 else if (p->pfrke_not != ad.pfra_not) 408 ad.pfra_fback = PFR_FB_CONFLICT; 409 else if (p->pfrke_mark) 410 ad.pfra_fback = PFR_FB_DUPLICATE; 411 else 412 ad.pfra_fback = PFR_FB_DELETED; 413 } 414 if (p != NULL && p->pfrke_not == ad.pfra_not && 415 !p->pfrke_mark) { 416 p->pfrke_mark = 1; 417 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 418 xdel++; 419 }
|
390#ifdef __FreeBSD__ 391 if (flags & PFR_FLAG_FEEDBACK) { 392 PF_COPYOUT(&ad, addr+i, sizeof(ad), ec); 393 if (ec) 394 senderr(EFAULT); 395 } 396#else
| |
397 if (flags & PFR_FLAG_FEEDBACK)
| 420 if (flags & PFR_FLAG_FEEDBACK)
|
398 if (copyout(&ad, addr+i, sizeof(ad)))
| 421 if (COPYOUT(&ad, addr+i, sizeof(ad)))
|
399 senderr(EFAULT);
| 422 senderr(EFAULT);
|
400#endif
| |
401 } 402 if (!(flags & PFR_FLAG_DUMMY)) { 403 if (flags & PFR_FLAG_ATOMIC) 404 s = splsoftnet(); 405 pfr_remove_kentries(kt, &workq); 406 if (flags & PFR_FLAG_ATOMIC) 407 splx(s); 408 } 409 if (ndel != NULL) 410 *ndel = xdel; 411 return (0); 412_bad: 413 if (flags & PFR_FLAG_FEEDBACK)
| 423 } 424 if (!(flags & PFR_FLAG_DUMMY)) { 425 if (flags & PFR_FLAG_ATOMIC) 426 s = splsoftnet(); 427 pfr_remove_kentries(kt, &workq); 428 if (flags & PFR_FLAG_ATOMIC) 429 splx(s); 430 } 431 if (ndel != NULL) 432 *ndel = xdel; 433 return (0); 434_bad: 435 if (flags & PFR_FLAG_FEEDBACK)
|
414 pfr_reset_feedback(addr, size);
| 436 pfr_reset_feedback(addr, size, flags);
|
415 return (rv); 416} 417 418int 419pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 420 int *size2, int *nadd, int *ndel, int *nchange, int flags) 421{ 422 struct pfr_ktable *kt, *tmpkt; 423 struct pfr_kentryworkq addq, delq, changeq; 424 struct pfr_kentry *p, *q; 425 struct pfr_addr ad; 426 int i, rv, s, xadd = 0, xdel = 0, xchange = 0; 427#ifdef __FreeBSD__
| 437 return (rv); 438} 439 440int 441pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 442 int *size2, int *nadd, int *ndel, int *nchange, int flags) 443{ 444 struct pfr_ktable *kt, *tmpkt; 445 struct pfr_kentryworkq addq, delq, changeq; 446 struct pfr_kentry *p, *q; 447 struct pfr_addr ad; 448 int i, rv, s, xadd = 0, xdel = 0, xchange = 0; 449#ifdef __FreeBSD__
|
428 int ec;
| |
429 /* 430 * XXX Is it OK under LP64 environments? 431 */ 432 long tzero = (long)time_second; 433#else 434 long tzero = time.tv_sec; 435#endif 436 437 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
| 450 /* 451 * XXX Is it OK under LP64 environments? 452 */ 453 long tzero = (long)time_second; 454#else 455 long tzero = time.tv_sec; 456#endif 457 458 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
|
438 if (pfr_validate_table(tbl, 0))
| 459 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
|
439 return (EINVAL); 440 kt = pfr_lookup_table(tbl); 441 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 442 return (ESRCH); 443 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 444 return (EPERM); 445 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 446 if (tmpkt == NULL) 447 return (ENOMEM); 448 pfr_mark_addrs(kt); 449 SLIST_INIT(&addq); 450 SLIST_INIT(&delq); 451 SLIST_INIT(&changeq); 452 for (i = 0; i < size; i++) {
| 460 return (EINVAL); 461 kt = pfr_lookup_table(tbl); 462 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 463 return (ESRCH); 464 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 465 return (EPERM); 466 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 467 if (tmpkt == NULL) 468 return (ENOMEM); 469 pfr_mark_addrs(kt); 470 SLIST_INIT(&addq); 471 SLIST_INIT(&delq); 472 SLIST_INIT(&changeq); 473 for (i = 0; i < size; i++) {
|
453#ifdef __FreeBSD__ 454 PF_COPYIN(addr+i, &ad, sizeof(ad), ec); 455 if (ec)
| 474 if (COPYIN(addr+i, &ad, sizeof(ad)))
|
456 senderr(EFAULT);
| 475 senderr(EFAULT);
|
457#else 458 if (copyin(addr+i, &ad, sizeof(ad))) 459 senderr(EFAULT); 460#endif
| |
461 if (pfr_validate_addr(&ad)) 462 senderr(EINVAL); 463 ad.pfra_fback = PFR_FB_NONE; 464 p = pfr_lookup_addr(kt, &ad, 1); 465 if (p != NULL) { 466 if (p->pfrke_mark) { 467 ad.pfra_fback = PFR_FB_DUPLICATE; 468 goto _skip; 469 } 470 p->pfrke_mark = 1; 471 if (p->pfrke_not != ad.pfra_not) { 472 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 473 ad.pfra_fback = PFR_FB_CHANGED; 474 xchange++; 475 } 476 } else { 477 q = pfr_lookup_addr(tmpkt, &ad, 1); 478 if (q != NULL) { 479 ad.pfra_fback = PFR_FB_DUPLICATE; 480 goto _skip; 481 } 482 p = pfr_create_kentry(&ad); 483 if (p == NULL) 484 senderr(ENOMEM); 485 if (pfr_route_kentry(tmpkt, p)) { 486 pfr_destroy_kentry(p); 487 ad.pfra_fback = PFR_FB_NONE; 488 } else { 489 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 490 ad.pfra_fback = PFR_FB_ADDED; 491 xadd++; 492 } 493 } 494_skip:
| 476 if (pfr_validate_addr(&ad)) 477 senderr(EINVAL); 478 ad.pfra_fback = PFR_FB_NONE; 479 p = pfr_lookup_addr(kt, &ad, 1); 480 if (p != NULL) { 481 if (p->pfrke_mark) { 482 ad.pfra_fback = PFR_FB_DUPLICATE; 483 goto _skip; 484 } 485 p->pfrke_mark = 1; 486 if (p->pfrke_not != ad.pfra_not) { 487 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 488 ad.pfra_fback = PFR_FB_CHANGED; 489 xchange++; 490 } 491 } else { 492 q = pfr_lookup_addr(tmpkt, &ad, 1); 493 if (q != NULL) { 494 ad.pfra_fback = PFR_FB_DUPLICATE; 495 goto _skip; 496 } 497 p = pfr_create_kentry(&ad); 498 if (p == NULL) 499 senderr(ENOMEM); 500 if (pfr_route_kentry(tmpkt, p)) { 501 pfr_destroy_kentry(p); 502 ad.pfra_fback = PFR_FB_NONE; 503 } else { 504 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 505 ad.pfra_fback = PFR_FB_ADDED; 506 xadd++; 507 } 508 } 509_skip:
|
495#ifdef __FreeBSD__ 496 if (flags & PFR_FLAG_FEEDBACK) { 497 PF_COPYOUT(&ad, addr+i, sizeof(ad), ec); 498 if (ec) 499 senderr(EFAULT); 500 } 501#else
| |
502 if (flags & PFR_FLAG_FEEDBACK)
| 510 if (flags & PFR_FLAG_FEEDBACK)
|
503 if (copyout(&ad, addr+i, sizeof(ad)))
| 511 if (COPYOUT(&ad, addr+i, sizeof(ad)))
|
504 senderr(EFAULT);
| 512 senderr(EFAULT);
|
505#endif
| |
506 } 507 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 508 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 509 if (*size2 < size+xdel) { 510 *size2 = size+xdel; 511 senderr(0); 512 } 513 i = 0; 514 SLIST_FOREACH(p, &delq, pfrke_workq) { 515 pfr_copyout_addr(&ad, p); 516 ad.pfra_fback = PFR_FB_DELETED;
| 513 } 514 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 515 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 516 if (*size2 < size+xdel) { 517 *size2 = size+xdel; 518 senderr(0); 519 } 520 i = 0; 521 SLIST_FOREACH(p, &delq, pfrke_workq) { 522 pfr_copyout_addr(&ad, p); 523 ad.pfra_fback = PFR_FB_DELETED;
|
517#ifdef __FreeBSD__ 518 PF_COPYOUT(&ad, addr+size+i, sizeof(ad), ec); 519 if (ec)
| 524 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
|
520 senderr(EFAULT);
| 525 senderr(EFAULT);
|
521#else 522 if (copyout(&ad, addr+size+i, sizeof(ad))) 523 senderr(EFAULT); 524#endif
| |
525 i++; 526 } 527 } 528 pfr_clean_node_mask(tmpkt, &addq); 529 if (!(flags & PFR_FLAG_DUMMY)) { 530 if (flags & PFR_FLAG_ATOMIC) 531 s = splsoftnet(); 532 pfr_insert_kentries(kt, &addq, tzero); 533 pfr_remove_kentries(kt, &delq); 534 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 535 if (flags & PFR_FLAG_ATOMIC) 536 splx(s); 537 } else 538 pfr_destroy_kentries(&addq); 539 if (nadd != NULL) 540 *nadd = xadd; 541 if (ndel != NULL) 542 *ndel = xdel; 543 if (nchange != NULL) 544 *nchange = xchange;
| 526 i++; 527 } 528 } 529 pfr_clean_node_mask(tmpkt, &addq); 530 if (!(flags & PFR_FLAG_DUMMY)) { 531 if (flags & PFR_FLAG_ATOMIC) 532 s = splsoftnet(); 533 pfr_insert_kentries(kt, &addq, tzero); 534 pfr_remove_kentries(kt, &delq); 535 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 536 if (flags & PFR_FLAG_ATOMIC) 537 splx(s); 538 } else 539 pfr_destroy_kentries(&addq); 540 if (nadd != NULL) 541 *nadd = xadd; 542 if (ndel != NULL) 543 *ndel = xdel; 544 if (nchange != NULL) 545 *nchange = xchange;
|
545 if ((flags & PFR_FLAG_FEEDBACK) && *size2)
| 546 if ((flags & PFR_FLAG_FEEDBACK) && size2)
|
546 *size2 = size+xdel; 547 pfr_destroy_ktable(tmpkt, 0); 548 return (0); 549_bad: 550 pfr_clean_node_mask(tmpkt, &addq); 551 pfr_destroy_kentries(&addq); 552 if (flags & PFR_FLAG_FEEDBACK)
| 547 *size2 = size+xdel; 548 pfr_destroy_ktable(tmpkt, 0); 549 return (0); 550_bad: 551 pfr_clean_node_mask(tmpkt, &addq); 552 pfr_destroy_kentries(&addq); 553 if (flags & PFR_FLAG_FEEDBACK)
|
553 pfr_reset_feedback(addr, size);
| 554 pfr_reset_feedback(addr, size, flags);
|
554 pfr_destroy_ktable(tmpkt, 0); 555 return (rv); 556} 557 558int 559pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 560 int *nmatch, int flags) 561{ 562 struct pfr_ktable *kt; 563 struct pfr_kentry *p; 564 struct pfr_addr ad; 565 int i, xmatch = 0;
| 555 pfr_destroy_ktable(tmpkt, 0); 556 return (rv); 557} 558 559int 560pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 561 int *nmatch, int flags) 562{ 563 struct pfr_ktable *kt; 564 struct pfr_kentry *p; 565 struct pfr_addr ad; 566 int i, xmatch = 0;
|
566#ifdef __FreeBSD__ 567 int ec; 568#endif
| |
569 570 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
| 567 568 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
|
571 if (pfr_validate_table(tbl, 0))
| 569 if (pfr_validate_table(tbl, 0, 0))
|
572 return (EINVAL); 573 kt = pfr_lookup_table(tbl); 574 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 575 return (ESRCH); 576 577 for (i = 0; i < size; i++) {
| 570 return (EINVAL); 571 kt = pfr_lookup_table(tbl); 572 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 573 return (ESRCH); 574 575 for (i = 0; i < size; i++) {
|
578#ifdef __FreeBSD__ 579 PF_COPYIN(addr+i, &ad, sizeof(ad), ec); 580 if (ec)
| 576 if (COPYIN(addr+i, &ad, sizeof(ad)))
|
581 return (EFAULT);
| 577 return (EFAULT);
|
582#else 583 if (copyin(addr+i, &ad, sizeof(ad))) 584 return (EFAULT); 585#endif
| |
586 if (pfr_validate_addr(&ad)) 587 return (EINVAL); 588 if (ADDR_NETWORK(&ad)) 589 return (EINVAL); 590 p = pfr_lookup_addr(kt, &ad, 0); 591 if (flags & PFR_FLAG_REPLACE) 592 pfr_copyout_addr(&ad, p); 593 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : 594 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 595 if (p != NULL && !p->pfrke_not) 596 xmatch++;
| 578 if (pfr_validate_addr(&ad)) 579 return (EINVAL); 580 if (ADDR_NETWORK(&ad)) 581 return (EINVAL); 582 p = pfr_lookup_addr(kt, &ad, 0); 583 if (flags & PFR_FLAG_REPLACE) 584 pfr_copyout_addr(&ad, p); 585 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : 586 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 587 if (p != NULL && !p->pfrke_not) 588 xmatch++;
|
597#ifdef __FreeBSD__ 598 PF_COPYOUT(&ad, addr+i, sizeof(ad), ec); 599 if (ec)
| 589 if (COPYOUT(&ad, addr+i, sizeof(ad)))
|
600 return (EFAULT);
| 590 return (EFAULT);
|
601#else 602 if (copyout(&ad, addr+i, sizeof(ad))) 603 return (EFAULT); 604#endif
| |
605 } 606 if (nmatch != NULL) 607 *nmatch = xmatch; 608 return (0); 609} 610 611int 612pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 613 int flags) 614{ 615 struct pfr_ktable *kt; 616 struct pfr_walktree w; 617 int rv; 618 619 ACCEPT_FLAGS(0);
| 591 } 592 if (nmatch != NULL) 593 *nmatch = xmatch; 594 return (0); 595} 596 597int 598pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 599 int flags) 600{ 601 struct pfr_ktable *kt; 602 struct pfr_walktree w; 603 int rv; 604 605 ACCEPT_FLAGS(0);
|
620 if (pfr_validate_table(tbl, 0))
| 606 if (pfr_validate_table(tbl, 0, 0))
|
621 return (EINVAL); 622 kt = pfr_lookup_table(tbl); 623 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 624 return (ESRCH); 625 if (kt->pfrkt_cnt > *size) { 626 *size = kt->pfrkt_cnt; 627 return (0); 628 } 629 630 bzero(&w, sizeof(w)); 631 w.pfrw_op = PFRW_GET_ADDRS; 632 w.pfrw_addr = addr; 633 w.pfrw_free = kt->pfrkt_cnt;
| 607 return (EINVAL); 608 kt = pfr_lookup_table(tbl); 609 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 610 return (ESRCH); 611 if (kt->pfrkt_cnt > *size) { 612 *size = kt->pfrkt_cnt; 613 return (0); 614 } 615 616 bzero(&w, sizeof(w)); 617 w.pfrw_op = PFRW_GET_ADDRS; 618 w.pfrw_addr = addr; 619 w.pfrw_free = kt->pfrkt_cnt;
|
| 620 w.pfrw_flags = flags;
|
634#ifdef __FreeBSD__ 635 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 636#else 637 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 638#endif 639 if (!rv) 640#ifdef __FreeBSD__ 641 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 642 &w); 643#else 644 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 645#endif 646 if (rv) 647 return (rv); 648 649 if (w.pfrw_free) { 650 printf("pfr_get_addrs: corruption detected (%d).\n", 651 w.pfrw_free); 652 return (ENOTTY); 653 } 654 *size = kt->pfrkt_cnt; 655 return (0); 656} 657 658int 659pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 660 int flags) 661{ 662 struct pfr_ktable *kt; 663 struct pfr_walktree w; 664 struct pfr_kentryworkq workq; 665 int rv, s; 666#ifdef __FreeBSD__ 667 /* 668 * XXX Is it OK under LP64 environments? 669 */ 670 long tzero = (long)time_second; 671#else 672 long tzero = time.tv_sec; 673#endif 674 675 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
| 621#ifdef __FreeBSD__ 622 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 623#else 624 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 625#endif 626 if (!rv) 627#ifdef __FreeBSD__ 628 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 629 &w); 630#else 631 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 632#endif 633 if (rv) 634 return (rv); 635 636 if (w.pfrw_free) { 637 printf("pfr_get_addrs: corruption detected (%d).\n", 638 w.pfrw_free); 639 return (ENOTTY); 640 } 641 *size = kt->pfrkt_cnt; 642 return (0); 643} 644 645int 646pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 647 int flags) 648{ 649 struct pfr_ktable *kt; 650 struct pfr_walktree w; 651 struct pfr_kentryworkq workq; 652 int rv, s; 653#ifdef __FreeBSD__ 654 /* 655 * XXX Is it OK under LP64 environments? 656 */ 657 long tzero = (long)time_second; 658#else 659 long tzero = time.tv_sec; 660#endif 661 662 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
|
676 if (pfr_validate_table(tbl, 0))
| 663 if (pfr_validate_table(tbl, 0, 0))
|
677 return (EINVAL); 678 kt = pfr_lookup_table(tbl); 679 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 680 return (ESRCH); 681 if (kt->pfrkt_cnt > *size) { 682 *size = kt->pfrkt_cnt; 683 return (0); 684 } 685 686 bzero(&w, sizeof(w)); 687 w.pfrw_op = PFRW_GET_ASTATS; 688 w.pfrw_astats = addr; 689 w.pfrw_free = kt->pfrkt_cnt;
| 664 return (EINVAL); 665 kt = pfr_lookup_table(tbl); 666 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 667 return (ESRCH); 668 if (kt->pfrkt_cnt > *size) { 669 *size = kt->pfrkt_cnt; 670 return (0); 671 } 672 673 bzero(&w, sizeof(w)); 674 w.pfrw_op = PFRW_GET_ASTATS; 675 w.pfrw_astats = addr; 676 w.pfrw_free = kt->pfrkt_cnt;
|
| 677 w.pfrw_flags = flags;
|
690 if (flags & PFR_FLAG_ATOMIC) 691 s = splsoftnet(); 692#ifdef __FreeBSD__ 693 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 694#else 695 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 696#endif 697 if (!rv) 698#ifdef __FreeBSD__ 699 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 700 &w); 701#else 702 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 703#endif 704 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 705 pfr_enqueue_addrs(kt, &workq, NULL, 0); 706 pfr_clstats_kentries(&workq, tzero, 0); 707 } 708 if (flags & PFR_FLAG_ATOMIC) 709 splx(s); 710 if (rv) 711 return (rv); 712 713 if (w.pfrw_free) { 714 printf("pfr_get_astats: corruption detected (%d).\n", 715 w.pfrw_free); 716 return (ENOTTY); 717 } 718 *size = kt->pfrkt_cnt; 719 return (0); 720} 721 722int 723pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 724 int *nzero, int flags) 725{ 726 struct pfr_ktable *kt; 727 struct pfr_kentryworkq workq; 728 struct pfr_kentry *p; 729 struct pfr_addr ad; 730 int i, rv, s, xzero = 0;
| 678 if (flags & PFR_FLAG_ATOMIC) 679 s = splsoftnet(); 680#ifdef __FreeBSD__ 681 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 682#else 683 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 684#endif 685 if (!rv) 686#ifdef __FreeBSD__ 687 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 688 &w); 689#else 690 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 691#endif 692 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 693 pfr_enqueue_addrs(kt, &workq, NULL, 0); 694 pfr_clstats_kentries(&workq, tzero, 0); 695 } 696 if (flags & PFR_FLAG_ATOMIC) 697 splx(s); 698 if (rv) 699 return (rv); 700 701 if (w.pfrw_free) { 702 printf("pfr_get_astats: corruption detected (%d).\n", 703 w.pfrw_free); 704 return (ENOTTY); 705 } 706 *size = kt->pfrkt_cnt; 707 return (0); 708} 709 710int 711pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 712 int *nzero, int flags) 713{ 714 struct pfr_ktable *kt; 715 struct pfr_kentryworkq workq; 716 struct pfr_kentry *p; 717 struct pfr_addr ad; 718 int i, rv, s, xzero = 0;
|
731#ifdef __FreeBSD__ 732 int ec; 733#endif
| |
734 735 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
| 719 720 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
|
736 if (pfr_validate_table(tbl, 0))
| 721 if (pfr_validate_table(tbl, 0, 0))
|
737 return (EINVAL); 738 kt = pfr_lookup_table(tbl); 739 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 740 return (ESRCH); 741 SLIST_INIT(&workq); 742 for (i = 0; i < size; i++) {
| 722 return (EINVAL); 723 kt = pfr_lookup_table(tbl); 724 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 725 return (ESRCH); 726 SLIST_INIT(&workq); 727 for (i = 0; i < size; i++) {
|
743#ifdef __FreeBSD__ 744 PF_COPYIN(addr+i, &ad, sizeof(ad), ec); 745 if (ec)
| 728 if (COPYIN(addr+i, &ad, sizeof(ad)))
|
746 senderr(EFAULT);
| 729 senderr(EFAULT);
|
747#else 748 if (copyin(addr+i, &ad, sizeof(ad))) 749 senderr(EFAULT); 750#endif
| |
751 if (pfr_validate_addr(&ad)) 752 senderr(EINVAL); 753 p = pfr_lookup_addr(kt, &ad, 1); 754 if (flags & PFR_FLAG_FEEDBACK) { 755 ad.pfra_fback = (p != NULL) ? 756 PFR_FB_CLEARED : PFR_FB_NONE;
| 730 if (pfr_validate_addr(&ad)) 731 senderr(EINVAL); 732 p = pfr_lookup_addr(kt, &ad, 1); 733 if (flags & PFR_FLAG_FEEDBACK) { 734 ad.pfra_fback = (p != NULL) ? 735 PFR_FB_CLEARED : PFR_FB_NONE;
|
757#ifdef __FreeBSD__ 758 PF_COPYOUT(&ad, addr+i, sizeof(ad), ec); 759 if (ec)
| 736 if (COPYOUT(&ad, addr+i, sizeof(ad)))
|
760 senderr(EFAULT);
| 737 senderr(EFAULT);
|
761#else 762 if (copyout(&ad, addr+i, sizeof(ad))) 763 senderr(EFAULT); 764#endif
| |
765 } 766 if (p != NULL) { 767 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 768 xzero++; 769 } 770 } 771 772 if (!(flags & PFR_FLAG_DUMMY)) { 773 if (flags & PFR_FLAG_ATOMIC) 774 s = splsoftnet(); 775 pfr_clstats_kentries(&workq, 0, 0); 776 if (flags & PFR_FLAG_ATOMIC) 777 splx(s); 778 } 779 if (nzero != NULL) 780 *nzero = xzero; 781 return (0); 782_bad: 783 if (flags & PFR_FLAG_FEEDBACK)
| 738 } 739 if (p != NULL) { 740 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 741 xzero++; 742 } 743 } 744 745 if (!(flags & PFR_FLAG_DUMMY)) { 746 if (flags & PFR_FLAG_ATOMIC) 747 s = splsoftnet(); 748 pfr_clstats_kentries(&workq, 0, 0); 749 if (flags & PFR_FLAG_ATOMIC) 750 splx(s); 751 } 752 if (nzero != NULL) 753 *nzero = xzero; 754 return (0); 755_bad: 756 if (flags & PFR_FLAG_FEEDBACK)
|
784 pfr_reset_feedback(addr, size);
| 757 pfr_reset_feedback(addr, size, flags);
|
785 return (rv); 786} 787 788int 789pfr_validate_addr(struct pfr_addr *ad) 790{ 791 int i; 792 793 switch (ad->pfra_af) { 794 case AF_INET: 795 if (ad->pfra_net > 32) 796 return (-1); 797 break; 798 case AF_INET6: 799 if (ad->pfra_net > 128) 800 return (-1); 801 break; 802 default: 803 return (-1); 804 } 805 if (ad->pfra_net < 128 && 806 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 807 return (-1); 808 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 809 if (((caddr_t)ad)[i]) 810 return (-1); 811 if (ad->pfra_not && ad->pfra_not != 1) 812 return (-1); 813 if (ad->pfra_fback) 814 return (-1); 815 return (0); 816} 817 818void 819pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 820 int *naddr, int sweep) 821{ 822 struct pfr_walktree w; 823 824 SLIST_INIT(workq); 825 bzero(&w, sizeof(w)); 826 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 827 w.pfrw_workq = workq; 828 if (kt->pfrkt_ip4 != NULL) 829#ifdef __FreeBSD__ 830 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, 831 &w)) 832#else 833 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 834#endif 835 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 836 if (kt->pfrkt_ip6 != NULL) 837#ifdef __FreeBSD__ 838 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 839 &w)) 840#else 841 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 842#endif 843 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 844 if (naddr != NULL) 845 *naddr = w.pfrw_cnt; 846} 847 848void 849pfr_mark_addrs(struct pfr_ktable *kt) 850{ 851 struct pfr_walktree w; 852 853 bzero(&w, sizeof(w)); 854 w.pfrw_op = PFRW_MARK; 855#ifdef __FreeBSD__ 856 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 857#else 858 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 859#endif 860 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 861#ifdef __FreeBSD__ 862 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 863#else 864 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 865#endif 866 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 867} 868 869 870struct pfr_kentry * 871pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 872{ 873 union sockaddr_union sa, mask; 874 struct radix_node_head *head; 875 struct pfr_kentry *ke; 876 int s; 877 878 bzero(&sa, sizeof(sa)); 879 if (ad->pfra_af == AF_INET) { 880 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 881 head = kt->pfrkt_ip4; 882 } else { 883 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 884 head = kt->pfrkt_ip6; 885 } 886 if (ADDR_NETWORK(ad)) { 887 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 888 s = splsoftnet(); /* rn_lookup makes use of globals */ 889#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 890 RADIX_NODE_HEAD_LOCK(head); 891#endif 892 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 893#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 894 RADIX_NODE_HEAD_UNLOCK(head); 895#endif 896 splx(s); 897 if (ke && KENTRY_RNF_ROOT(ke)) 898 ke = NULL; 899 } else { 900 ke = (struct pfr_kentry *)rn_match(&sa, head); 901 if (ke && KENTRY_RNF_ROOT(ke)) 902 ke = NULL; 903 if (exact && ke && KENTRY_NETWORK(ke)) 904 ke = NULL; 905 } 906 return (ke); 907} 908 909struct pfr_kentry * 910pfr_create_kentry(struct pfr_addr *ad) 911{ 912 struct pfr_kentry *ke; 913 914 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT); 915 if (ke == NULL) 916 return (NULL); 917 bzero(ke, sizeof(*ke)); 918 919 if (ad->pfra_af == AF_INET) 920 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 921 else 922 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 923 ke->pfrke_af = ad->pfra_af; 924 ke->pfrke_net = ad->pfra_net; 925 ke->pfrke_not = ad->pfra_not; 926 return (ke); 927} 928 929void 930pfr_destroy_kentries(struct pfr_kentryworkq *workq) 931{ 932 struct pfr_kentry *p, *q; 933 934 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 935 q = SLIST_NEXT(p, pfrke_workq); 936 pfr_destroy_kentry(p); 937 } 938} 939 940void 941pfr_destroy_kentry(struct pfr_kentry *ke) 942{ 943 pool_put(&pfr_kentry_pl, ke); 944} 945 946void 947pfr_insert_kentries(struct pfr_ktable *kt, 948 struct pfr_kentryworkq *workq, long tzero) 949{ 950 struct pfr_kentry *p; 951 int rv, n = 0; 952 953 SLIST_FOREACH(p, workq, pfrke_workq) { 954 rv = pfr_route_kentry(kt, p); 955 if (rv) { 956 printf("pfr_insert_kentries: cannot route entry " 957 "(code=%d).\n", rv); 958 break; 959 } 960 p->pfrke_tzero = tzero; 961 n++; 962 } 963 kt->pfrkt_cnt += n; 964} 965 966void 967pfr_remove_kentries(struct pfr_ktable *kt, 968 struct pfr_kentryworkq *workq) 969{ 970 struct pfr_kentry *p; 971 int n = 0; 972 973 SLIST_FOREACH(p, workq, pfrke_workq) { 974 pfr_unroute_kentry(kt, p); 975 n++; 976 } 977 kt->pfrkt_cnt -= n; 978 pfr_destroy_kentries(workq); 979} 980 981void 982pfr_clean_node_mask(struct pfr_ktable *kt, 983 struct pfr_kentryworkq *workq) 984{
| 758 return (rv); 759} 760 761int 762pfr_validate_addr(struct pfr_addr *ad) 763{ 764 int i; 765 766 switch (ad->pfra_af) { 767 case AF_INET: 768 if (ad->pfra_net > 32) 769 return (-1); 770 break; 771 case AF_INET6: 772 if (ad->pfra_net > 128) 773 return (-1); 774 break; 775 default: 776 return (-1); 777 } 778 if (ad->pfra_net < 128 && 779 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 780 return (-1); 781 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 782 if (((caddr_t)ad)[i]) 783 return (-1); 784 if (ad->pfra_not && ad->pfra_not != 1) 785 return (-1); 786 if (ad->pfra_fback) 787 return (-1); 788 return (0); 789} 790 791void 792pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 793 int *naddr, int sweep) 794{ 795 struct pfr_walktree w; 796 797 SLIST_INIT(workq); 798 bzero(&w, sizeof(w)); 799 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 800 w.pfrw_workq = workq; 801 if (kt->pfrkt_ip4 != NULL) 802#ifdef __FreeBSD__ 803 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, 804 &w)) 805#else 806 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 807#endif 808 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 809 if (kt->pfrkt_ip6 != NULL) 810#ifdef __FreeBSD__ 811 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 812 &w)) 813#else 814 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 815#endif 816 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 817 if (naddr != NULL) 818 *naddr = w.pfrw_cnt; 819} 820 821void 822pfr_mark_addrs(struct pfr_ktable *kt) 823{ 824 struct pfr_walktree w; 825 826 bzero(&w, sizeof(w)); 827 w.pfrw_op = PFRW_MARK; 828#ifdef __FreeBSD__ 829 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 830#else 831 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 832#endif 833 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 834#ifdef __FreeBSD__ 835 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 836#else 837 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 838#endif 839 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 840} 841 842 843struct pfr_kentry * 844pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 845{ 846 union sockaddr_union sa, mask; 847 struct radix_node_head *head; 848 struct pfr_kentry *ke; 849 int s; 850 851 bzero(&sa, sizeof(sa)); 852 if (ad->pfra_af == AF_INET) { 853 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 854 head = kt->pfrkt_ip4; 855 } else { 856 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 857 head = kt->pfrkt_ip6; 858 } 859 if (ADDR_NETWORK(ad)) { 860 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 861 s = splsoftnet(); /* rn_lookup makes use of globals */ 862#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 863 RADIX_NODE_HEAD_LOCK(head); 864#endif 865 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 866#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 867 RADIX_NODE_HEAD_UNLOCK(head); 868#endif 869 splx(s); 870 if (ke && KENTRY_RNF_ROOT(ke)) 871 ke = NULL; 872 } else { 873 ke = (struct pfr_kentry *)rn_match(&sa, head); 874 if (ke && KENTRY_RNF_ROOT(ke)) 875 ke = NULL; 876 if (exact && ke && KENTRY_NETWORK(ke)) 877 ke = NULL; 878 } 879 return (ke); 880} 881 882struct pfr_kentry * 883pfr_create_kentry(struct pfr_addr *ad) 884{ 885 struct pfr_kentry *ke; 886 887 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT); 888 if (ke == NULL) 889 return (NULL); 890 bzero(ke, sizeof(*ke)); 891 892 if (ad->pfra_af == AF_INET) 893 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 894 else 895 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 896 ke->pfrke_af = ad->pfra_af; 897 ke->pfrke_net = ad->pfra_net; 898 ke->pfrke_not = ad->pfra_not; 899 return (ke); 900} 901 902void 903pfr_destroy_kentries(struct pfr_kentryworkq *workq) 904{ 905 struct pfr_kentry *p, *q; 906 907 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 908 q = SLIST_NEXT(p, pfrke_workq); 909 pfr_destroy_kentry(p); 910 } 911} 912 913void 914pfr_destroy_kentry(struct pfr_kentry *ke) 915{ 916 pool_put(&pfr_kentry_pl, ke); 917} 918 919void 920pfr_insert_kentries(struct pfr_ktable *kt, 921 struct pfr_kentryworkq *workq, long tzero) 922{ 923 struct pfr_kentry *p; 924 int rv, n = 0; 925 926 SLIST_FOREACH(p, workq, pfrke_workq) { 927 rv = pfr_route_kentry(kt, p); 928 if (rv) { 929 printf("pfr_insert_kentries: cannot route entry " 930 "(code=%d).\n", rv); 931 break; 932 } 933 p->pfrke_tzero = tzero; 934 n++; 935 } 936 kt->pfrkt_cnt += n; 937} 938 939void 940pfr_remove_kentries(struct pfr_ktable *kt, 941 struct pfr_kentryworkq *workq) 942{ 943 struct pfr_kentry *p; 944 int n = 0; 945 946 SLIST_FOREACH(p, workq, pfrke_workq) { 947 pfr_unroute_kentry(kt, p); 948 n++; 949 } 950 kt->pfrkt_cnt -= n; 951 pfr_destroy_kentries(workq); 952} 953 954void 955pfr_clean_node_mask(struct pfr_ktable *kt, 956 struct pfr_kentryworkq *workq) 957{
|
985 struct pfr_kentry *p;
| 958 struct pfr_kentry *p;
|
986
| 959
|
987 SLIST_FOREACH(p, workq, pfrke_workq) 988 pfr_unroute_kentry(kt, p);
| 960 SLIST_FOREACH(p, workq, pfrke_workq) 961 pfr_unroute_kentry(kt, p);
|
989} 990 991void 992pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 993{ 994 struct pfr_kentry *p; 995 int s; 996 997 SLIST_FOREACH(p, workq, pfrke_workq) { 998 s = splsoftnet(); 999 if (negchange) 1000 p->pfrke_not = !p->pfrke_not; 1001 bzero(p->pfrke_packets, sizeof(p->pfrke_packets)); 1002 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes)); 1003 splx(s); 1004 p->pfrke_tzero = tzero; 1005 } 1006} 1007 1008void
| 962} 963 964void 965pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 966{ 967 struct pfr_kentry *p; 968 int s; 969 970 SLIST_FOREACH(p, workq, pfrke_workq) { 971 s = splsoftnet(); 972 if (negchange) 973 p->pfrke_not = !p->pfrke_not; 974 bzero(p->pfrke_packets, sizeof(p->pfrke_packets)); 975 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes)); 976 splx(s); 977 p->pfrke_tzero = tzero; 978 } 979} 980 981void
|
1009pfr_reset_feedback(struct pfr_addr *addr, int size)
| 982pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
|
1010{ 1011 struct pfr_addr ad; 1012 int i;
| 983{ 984 struct pfr_addr ad; 985 int i;
|
1013#ifdef __FreeBSD__ 1014 int ec; 1015#endif
| |
1016 1017 for (i = 0; i < size; i++) {
| 986 987 for (i = 0; i < size; i++) {
|
1018#ifdef __FreeBSD__ 1019 PF_COPYIN(addr+i, &ad, sizeof(ad), ec); 1020 if (ec)
| 988 if (COPYIN(addr+i, &ad, sizeof(ad)))
|
1021 break;
| 989 break;
|
1022#else 1023 if (copyin(addr+i, &ad, sizeof(ad))) 1024 break; 1025#endif
| |
1026 ad.pfra_fback = PFR_FB_NONE;
| 990 ad.pfra_fback = PFR_FB_NONE;
|
1027#ifdef __FreeBSD__ 1028 PF_COPYOUT(&ad, addr+i, sizeof(ad), ec); 1029 if (ec)
| 991 if (COPYOUT(&ad, addr+i, sizeof(ad)))
|
1030 break;
| 992 break;
|
1031#else 1032 if (copyout(&ad, addr+i, sizeof(ad))) 1033 break; 1034#endif
| |
1035 } 1036} 1037 1038void 1039pfr_prepare_network(union sockaddr_union *sa, int af, int net) 1040{ 1041 int i; 1042 1043 bzero(sa, sizeof(*sa)); 1044 if (af == AF_INET) { 1045 sa->sin.sin_len = sizeof(sa->sin); 1046 sa->sin.sin_family = AF_INET; 1047 sa->sin.sin_addr.s_addr = htonl(-1 << (32-net)); 1048 } else { 1049 sa->sin6.sin6_len = sizeof(sa->sin6); 1050 sa->sin6.sin6_family = AF_INET6; 1051 for (i = 0; i < 4; i++) { 1052 if (net <= 32) { 1053 sa->sin6.sin6_addr.s6_addr32[i] = 1054 htonl(-1 << (32-net)); 1055 break; 1056 } 1057 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 1058 net -= 32; 1059 } 1060 } 1061} 1062 1063int 1064pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 1065{ 1066 union sockaddr_union mask; 1067 struct radix_node *rn; 1068 struct radix_node_head *head; 1069 int s; 1070 1071 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 1072 if (ke->pfrke_af == AF_INET) 1073 head = kt->pfrkt_ip4; 1074 else 1075 head = kt->pfrkt_ip6; 1076 1077 s = splsoftnet(); 1078#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 1079 RADIX_NODE_HEAD_LOCK(head); 1080#endif 1081 if (KENTRY_NETWORK(ke)) { 1082 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1083 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 1084 } else 1085 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 1086#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 1087 RADIX_NODE_HEAD_UNLOCK(head); 1088#endif 1089 splx(s); 1090 1091 return (rn == NULL ? -1 : 0); 1092} 1093 1094int 1095pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 1096{ 1097 union sockaddr_union mask; 1098 struct radix_node *rn; 1099 struct radix_node_head *head; 1100 int s; 1101 1102 if (ke->pfrke_af == AF_INET) 1103 head = kt->pfrkt_ip4; 1104 else 1105 head = kt->pfrkt_ip6; 1106 1107 s = splsoftnet(); 1108#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 1109 RADIX_NODE_HEAD_LOCK(head); 1110#endif 1111 if (KENTRY_NETWORK(ke)) { 1112 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1113 rn = rn_delete(&ke->pfrke_sa, &mask, head); 1114 } else 1115 rn = rn_delete(&ke->pfrke_sa, NULL, head); 1116#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 1117 RADIX_NODE_HEAD_UNLOCK(head); 1118#endif 1119 splx(s); 1120 1121 if (rn == NULL) { 1122 printf("pfr_unroute_kentry: delete failed.\n"); 1123 return (-1); 1124 } 1125 return (0); 1126} 1127 1128void 1129pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 1130{ 1131 bzero(ad, sizeof(*ad)); 1132 if (ke == NULL) 1133 return; 1134 ad->pfra_af = ke->pfrke_af; 1135 ad->pfra_net = ke->pfrke_net; 1136 ad->pfra_not = ke->pfrke_not; 1137 if (ad->pfra_af == AF_INET) 1138 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1139 else 1140 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1141} 1142 1143int 1144pfr_walktree(struct radix_node *rn, void *arg) 1145{ 1146 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1147 struct pfr_walktree *w = arg;
| 993 } 994} 995 996void 997pfr_prepare_network(union sockaddr_union *sa, int af, int net) 998{ 999 int i; 1000 1001 bzero(sa, sizeof(*sa)); 1002 if (af == AF_INET) { 1003 sa->sin.sin_len = sizeof(sa->sin); 1004 sa->sin.sin_family = AF_INET; 1005 sa->sin.sin_addr.s_addr = htonl(-1 << (32-net)); 1006 } else { 1007 sa->sin6.sin6_len = sizeof(sa->sin6); 1008 sa->sin6.sin6_family = AF_INET6; 1009 for (i = 0; i < 4; i++) { 1010 if (net <= 32) { 1011 sa->sin6.sin6_addr.s6_addr32[i] = 1012 htonl(-1 << (32-net)); 1013 break; 1014 } 1015 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 1016 net -= 32; 1017 } 1018 } 1019} 1020 1021int 1022pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 1023{ 1024 union sockaddr_union mask; 1025 struct radix_node *rn; 1026 struct radix_node_head *head; 1027 int s; 1028 1029 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 1030 if (ke->pfrke_af == AF_INET) 1031 head = kt->pfrkt_ip4; 1032 else 1033 head = kt->pfrkt_ip6; 1034 1035 s = splsoftnet(); 1036#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 1037 RADIX_NODE_HEAD_LOCK(head); 1038#endif 1039 if (KENTRY_NETWORK(ke)) { 1040 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1041 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 1042 } else 1043 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 1044#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 1045 RADIX_NODE_HEAD_UNLOCK(head); 1046#endif 1047 splx(s); 1048 1049 return (rn == NULL ? -1 : 0); 1050} 1051 1052int 1053pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 1054{ 1055 union sockaddr_union mask; 1056 struct radix_node *rn; 1057 struct radix_node_head *head; 1058 int s; 1059 1060 if (ke->pfrke_af == AF_INET) 1061 head = kt->pfrkt_ip4; 1062 else 1063 head = kt->pfrkt_ip6; 1064 1065 s = splsoftnet(); 1066#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 1067 RADIX_NODE_HEAD_LOCK(head); 1068#endif 1069 if (KENTRY_NETWORK(ke)) { 1070 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1071 rn = rn_delete(&ke->pfrke_sa, &mask, head); 1072 } else 1073 rn = rn_delete(&ke->pfrke_sa, NULL, head); 1074#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 1075 RADIX_NODE_HEAD_UNLOCK(head); 1076#endif 1077 splx(s); 1078 1079 if (rn == NULL) { 1080 printf("pfr_unroute_kentry: delete failed.\n"); 1081 return (-1); 1082 } 1083 return (0); 1084} 1085 1086void 1087pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 1088{ 1089 bzero(ad, sizeof(*ad)); 1090 if (ke == NULL) 1091 return; 1092 ad->pfra_af = ke->pfrke_af; 1093 ad->pfra_net = ke->pfrke_net; 1094 ad->pfra_not = ke->pfrke_not; 1095 if (ad->pfra_af == AF_INET) 1096 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1097 else 1098 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1099} 1100 1101int 1102pfr_walktree(struct radix_node *rn, void *arg) 1103{ 1104 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1105 struct pfr_walktree *w = arg;
|
1148 int s; 1149#ifdef __FreeBSD__ 1150 int ec; 1151#endif
| 1106 int s, flags = w->pfrw_flags;
|
1152 1153 switch (w->pfrw_op) { 1154 case PFRW_MARK: 1155 ke->pfrke_mark = 0; 1156 break; 1157 case PFRW_SWEEP: 1158 if (ke->pfrke_mark) 1159 break;
| 1107 1108 switch (w->pfrw_op) { 1109 case PFRW_MARK: 1110 ke->pfrke_mark = 0; 1111 break; 1112 case PFRW_SWEEP: 1113 if (ke->pfrke_mark) 1114 break;
|
1160 /* fall trough */
| 1115 /* FALLTHROUGH */
|
1161 case PFRW_ENQUEUE: 1162 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1163 w->pfrw_cnt++; 1164 break; 1165 case PFRW_GET_ADDRS: 1166 if (w->pfrw_free-- > 0) { 1167 struct pfr_addr ad; 1168 1169 pfr_copyout_addr(&ad, ke);
| 1116 case PFRW_ENQUEUE: 1117 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1118 w->pfrw_cnt++; 1119 break; 1120 case PFRW_GET_ADDRS: 1121 if (w->pfrw_free-- > 0) { 1122 struct pfr_addr ad; 1123 1124 pfr_copyout_addr(&ad, ke);
|
1170#ifdef __FreeBSD__ 1171 PF_COPYOUT(&ad, w->pfrw_addr, sizeof(ad), ec); 1172 if (ec) 1173 return (EFAULT); 1174#else
| |
1175 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) 1176 return (EFAULT);
| 1125 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) 1126 return (EFAULT);
|
1177#endif
| |
1178 w->pfrw_addr++; 1179 } 1180 break; 1181 case PFRW_GET_ASTATS: 1182 if (w->pfrw_free-- > 0) { 1183 struct pfr_astats as; 1184 1185 pfr_copyout_addr(&as.pfras_a, ke); 1186 1187 s = splsoftnet(); 1188 bcopy(ke->pfrke_packets, as.pfras_packets, 1189 sizeof(as.pfras_packets)); 1190 bcopy(ke->pfrke_bytes, as.pfras_bytes, 1191 sizeof(as.pfras_bytes)); 1192 splx(s); 1193 as.pfras_tzero = ke->pfrke_tzero; 1194
| 1127 w->pfrw_addr++; 1128 } 1129 break; 1130 case PFRW_GET_ASTATS: 1131 if (w->pfrw_free-- > 0) { 1132 struct pfr_astats as; 1133 1134 pfr_copyout_addr(&as.pfras_a, ke); 1135 1136 s = splsoftnet(); 1137 bcopy(ke->pfrke_packets, as.pfras_packets, 1138 sizeof(as.pfras_packets)); 1139 bcopy(ke->pfrke_bytes, as.pfras_bytes, 1140 sizeof(as.pfras_bytes)); 1141 splx(s); 1142 as.pfras_tzero = ke->pfrke_tzero; 1143
|
1195#ifdef __FreeBSD__ 1196 PF_COPYOUT(&as, w->pfrw_astats, sizeof(as), ec); 1197 if (ec)
| 1144 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
|
1198 return (EFAULT);
| 1145 return (EFAULT);
|
1199#else 1200 if (copyout(&as, w->pfrw_astats, sizeof(as))) 1201 return (EFAULT); 1202#endif
| |
1203 w->pfrw_astats++; 1204 } 1205 break; 1206 case PFRW_POOL_GET: 1207 if (ke->pfrke_not) 1208 break; /* negative entries are ignored */ 1209 if (!w->pfrw_cnt--) { 1210 w->pfrw_kentry = ke; 1211 return (1); /* finish search */ 1212 } 1213 break;
| 1146 w->pfrw_astats++; 1147 } 1148 break; 1149 case PFRW_POOL_GET: 1150 if (ke->pfrke_not) 1151 break; /* negative entries are ignored */ 1152 if (!w->pfrw_cnt--) { 1153 w->pfrw_kentry = ke; 1154 return (1); /* finish search */ 1155 } 1156 break;
|
| 1157 case PFRW_DYNADDR_UPDATE: 1158 if (ke->pfrke_af == AF_INET) { 1159 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1160 break; 1161 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1162 w->pfrw_dyn->pfid_addr4 = *SUNION2PF( 1163 &ke->pfrke_sa, AF_INET); 1164 w->pfrw_dyn->pfid_mask4 = *SUNION2PF( 1165 &pfr_mask, AF_INET); 1166 } else { 1167 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1168 break; 1169 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1170 w->pfrw_dyn->pfid_addr6 = *SUNION2PF( 1171 &ke->pfrke_sa, AF_INET6); 1172 w->pfrw_dyn->pfid_mask6 = *SUNION2PF( 1173 &pfr_mask, AF_INET6); 1174 } 1175 break;
|
1214 } 1215 return (0); 1216} 1217 1218int 1219pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1220{ 1221 struct pfr_ktableworkq workq; 1222 struct pfr_ktable *p; 1223 int s, xdel = 0; 1224 1225 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS); 1226 if (pfr_table_count(filter, flags) < 0) 1227 return (ENOENT); 1228 1229 SLIST_INIT(&workq); 1230 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1231 if (pfr_skip_table(filter, p, flags)) 1232 continue;
| 1176 } 1177 return (0); 1178} 1179 1180int 1181pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1182{ 1183 struct pfr_ktableworkq workq; 1184 struct pfr_ktable *p; 1185 int s, xdel = 0; 1186 1187 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS); 1188 if (pfr_table_count(filter, flags) < 0) 1189 return (ENOENT); 1190 1191 SLIST_INIT(&workq); 1192 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1193 if (pfr_skip_table(filter, p, flags)) 1194 continue;
|
| 1195 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1196 continue;
|
1233 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1234 continue; 1235 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1236 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1237 xdel++; 1238 } 1239 if (!(flags & PFR_FLAG_DUMMY)) { 1240 if (flags & PFR_FLAG_ATOMIC) 1241 s = splsoftnet(); 1242 pfr_setflags_ktables(&workq); 1243 if (flags & PFR_FLAG_ATOMIC) 1244 splx(s); 1245 } 1246 if (ndel != NULL) 1247 *ndel = xdel; 1248 return (0); 1249} 1250 1251int 1252pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1253{ 1254 struct pfr_ktableworkq addq, changeq; 1255 struct pfr_ktable *p, *q, *r, key; 1256 int i, rv, s, xadd = 0; 1257#ifdef __FreeBSD__
| 1197 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1198 continue; 1199 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1200 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1201 xdel++; 1202 } 1203 if (!(flags & PFR_FLAG_DUMMY)) { 1204 if (flags & PFR_FLAG_ATOMIC) 1205 s = splsoftnet(); 1206 pfr_setflags_ktables(&workq); 1207 if (flags & PFR_FLAG_ATOMIC) 1208 splx(s); 1209 } 1210 if (ndel != NULL) 1211 *ndel = xdel; 1212 return (0); 1213} 1214 1215int 1216pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1217{ 1218 struct pfr_ktableworkq addq, changeq; 1219 struct pfr_ktable *p, *q, *r, key; 1220 int i, rv, s, xadd = 0; 1221#ifdef __FreeBSD__
|
1258 int ec;
| |
1259 /* 1260 * XXX Is it OK under LP64 environments? 1261 */ 1262 long tzero = (long)time_second; 1263#else 1264 long tzero = time.tv_sec; 1265#endif 1266 1267 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1268 SLIST_INIT(&addq); 1269 SLIST_INIT(&changeq); 1270 for (i = 0; i < size; i++) {
| 1222 /* 1223 * XXX Is it OK under LP64 environments? 1224 */ 1225 long tzero = (long)time_second; 1226#else 1227 long tzero = time.tv_sec; 1228#endif 1229 1230 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1231 SLIST_INIT(&addq); 1232 SLIST_INIT(&changeq); 1233 for (i = 0; i < size; i++) {
|
1271#ifdef __FreeBSD__ 1272 PF_COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), ec); 1273 if (ec)
| 1234 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
|
1274 senderr(EFAULT);
| 1235 senderr(EFAULT);
|
1275#else 1276 if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1277 senderr(EFAULT); 1278#endif 1279 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK))
| 1236 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1237 flags & PFR_FLAG_USERIOCTL))
|
1280 senderr(EINVAL); 1281 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1282 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1283 if (p == NULL) { 1284 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1285 if (p == NULL) 1286 senderr(ENOMEM); 1287 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1288 if (!pfr_ktable_compare(p, q)) 1289 goto _skip; 1290 } 1291 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1292 xadd++; 1293 if (!key.pfrkt_anchor[0]) 1294 goto _skip; 1295 1296 /* find or create root table */ 1297 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1298 bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset)); 1299 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1300 if (r != NULL) { 1301 p->pfrkt_root = r; 1302 goto _skip; 1303 } 1304 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1305 if (!pfr_ktable_compare(&key, q)) { 1306 p->pfrkt_root = q; 1307 goto _skip; 1308 } 1309 } 1310 key.pfrkt_flags = 0; 1311 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1312 if (r == NULL) 1313 senderr(ENOMEM); 1314 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1315 p->pfrkt_root = r; 1316 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1317 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1318 if (!pfr_ktable_compare(&key, q)) 1319 goto _skip; 1320 p->pfrkt_nflags = (p->pfrkt_flags & 1321 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1322 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1323 xadd++; 1324 } 1325_skip: 1326 ; 1327 } 1328 if (!(flags & PFR_FLAG_DUMMY)) { 1329 if (flags & PFR_FLAG_ATOMIC) 1330 s = splsoftnet(); 1331 pfr_insert_ktables(&addq); 1332 pfr_setflags_ktables(&changeq); 1333 if (flags & PFR_FLAG_ATOMIC) 1334 splx(s); 1335 } else 1336 pfr_destroy_ktables(&addq, 0); 1337 if (nadd != NULL) 1338 *nadd = xadd; 1339 return (0); 1340_bad: 1341 pfr_destroy_ktables(&addq, 0); 1342 return (rv); 1343} 1344 1345int 1346pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1347{ 1348 struct pfr_ktableworkq workq; 1349 struct pfr_ktable *p, *q, key; 1350 int i, s, xdel = 0;
| 1238 senderr(EINVAL); 1239 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1240 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1241 if (p == NULL) { 1242 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1243 if (p == NULL) 1244 senderr(ENOMEM); 1245 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1246 if (!pfr_ktable_compare(p, q)) 1247 goto _skip; 1248 } 1249 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1250 xadd++; 1251 if (!key.pfrkt_anchor[0]) 1252 goto _skip; 1253 1254 /* find or create root table */ 1255 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1256 bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset)); 1257 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1258 if (r != NULL) { 1259 p->pfrkt_root = r; 1260 goto _skip; 1261 } 1262 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1263 if (!pfr_ktable_compare(&key, q)) { 1264 p->pfrkt_root = q; 1265 goto _skip; 1266 } 1267 } 1268 key.pfrkt_flags = 0; 1269 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1270 if (r == NULL) 1271 senderr(ENOMEM); 1272 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1273 p->pfrkt_root = r; 1274 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1275 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1276 if (!pfr_ktable_compare(&key, q)) 1277 goto _skip; 1278 p->pfrkt_nflags = (p->pfrkt_flags & 1279 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1280 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1281 xadd++; 1282 } 1283_skip: 1284 ; 1285 } 1286 if (!(flags & PFR_FLAG_DUMMY)) { 1287 if (flags & PFR_FLAG_ATOMIC) 1288 s = splsoftnet(); 1289 pfr_insert_ktables(&addq); 1290 pfr_setflags_ktables(&changeq); 1291 if (flags & PFR_FLAG_ATOMIC) 1292 splx(s); 1293 } else 1294 pfr_destroy_ktables(&addq, 0); 1295 if (nadd != NULL) 1296 *nadd = xadd; 1297 return (0); 1298_bad: 1299 pfr_destroy_ktables(&addq, 0); 1300 return (rv); 1301} 1302 1303int 1304pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1305{ 1306 struct pfr_ktableworkq workq; 1307 struct pfr_ktable *p, *q, key; 1308 int i, s, xdel = 0;
|
1351#ifdef __FreeBSD__ 1352 int ec; 1353#endif
| |
1354 1355 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1356 SLIST_INIT(&workq); 1357 for (i = 0; i < size; i++) {
| 1309 1310 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1311 SLIST_INIT(&workq); 1312 for (i = 0; i < size; i++) {
|
1358#ifdef __FreeBSD__ 1359 PF_COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), ec); 1360 if (ec)
| 1313 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
|
1361 return (EFAULT);
| 1314 return (EFAULT);
|
1362#else 1363 if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1364 return (EFAULT); 1365#endif 1366 if (pfr_validate_table(&key.pfrkt_t, 0))
| 1315 if (pfr_validate_table(&key.pfrkt_t, 0, 1316 flags & PFR_FLAG_USERIOCTL))
|
1367 return (EINVAL); 1368 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1369 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1370 SLIST_FOREACH(q, &workq, pfrkt_workq) 1371 if (!pfr_ktable_compare(p, q)) 1372 goto _skip; 1373 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1374 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1375 xdel++; 1376 } 1377_skip: 1378 ; 1379 } 1380 1381 if (!(flags & PFR_FLAG_DUMMY)) { 1382 if (flags & PFR_FLAG_ATOMIC) 1383 s = splsoftnet(); 1384 pfr_setflags_ktables(&workq); 1385 if (flags & PFR_FLAG_ATOMIC) 1386 splx(s); 1387 } 1388 if (ndel != NULL) 1389 *ndel = xdel; 1390 return (0); 1391} 1392 1393int 1394pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1395 int flags) 1396{ 1397 struct pfr_ktable *p; 1398 int n, nn;
| 1317 return (EINVAL); 1318 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1319 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1320 SLIST_FOREACH(q, &workq, pfrkt_workq) 1321 if (!pfr_ktable_compare(p, q)) 1322 goto _skip; 1323 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1324 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1325 xdel++; 1326 } 1327_skip: 1328 ; 1329 } 1330 1331 if (!(flags & PFR_FLAG_DUMMY)) { 1332 if (flags & PFR_FLAG_ATOMIC) 1333 s = splsoftnet(); 1334 pfr_setflags_ktables(&workq); 1335 if (flags & PFR_FLAG_ATOMIC) 1336 splx(s); 1337 } 1338 if (ndel != NULL) 1339 *ndel = xdel; 1340 return (0); 1341} 1342 1343int 1344pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1345 int flags) 1346{ 1347 struct pfr_ktable *p; 1348 int n, nn;
|
1399#ifdef __FreeBSD__ 1400 int ec; 1401#endif
| |
1402 1403 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS); 1404 n = nn = pfr_table_count(filter, flags); 1405 if (n < 0) 1406 return (ENOENT); 1407 if (n > *size) { 1408 *size = n; 1409 return (0); 1410 } 1411 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1412 if (pfr_skip_table(filter, p, flags)) 1413 continue; 1414 if (n-- <= 0) 1415 continue;
| 1349 1350 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS); 1351 n = nn = pfr_table_count(filter, flags); 1352 if (n < 0) 1353 return (ENOENT); 1354 if (n > *size) { 1355 *size = n; 1356 return (0); 1357 } 1358 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1359 if (pfr_skip_table(filter, p, flags)) 1360 continue; 1361 if (n-- <= 0) 1362 continue;
|
1416#ifdef __FreeBSD__ 1417 PF_COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), ec); 1418 if (ec)
| 1363 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
|
1419 return (EFAULT);
| 1364 return (EFAULT);
|
1420#else 1421 if (copyout(&p->pfrkt_t, tbl++, sizeof(*tbl))) 1422 return (EFAULT); 1423#endif
| |
1424 } 1425 if (n) { 1426 printf("pfr_get_tables: corruption detected (%d).\n", n); 1427 return (ENOTTY); 1428 } 1429 *size = nn; 1430 return (0); 1431} 1432 1433int 1434pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1435 int flags) 1436{ 1437 struct pfr_ktable *p; 1438 struct pfr_ktableworkq workq; 1439 int s, n, nn; 1440#ifdef __FreeBSD__
| 1365 } 1366 if (n) { 1367 printf("pfr_get_tables: corruption detected (%d).\n", n); 1368 return (ENOTTY); 1369 } 1370 *size = nn; 1371 return (0); 1372} 1373 1374int 1375pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1376 int flags) 1377{ 1378 struct pfr_ktable *p; 1379 struct pfr_ktableworkq workq; 1380 int s, n, nn; 1381#ifdef __FreeBSD__
|
1441 int ec;
| |
1442 /* 1443 * XXX Is it OK under LP64 environments? 1444 */ 1445 long tzero = (long)time_second; 1446#else 1447 long tzero = time.tv_sec; 1448#endif 1449 1450 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS); 1451 /* XXX PFR_FLAG_CLSTATS disabled */ 1452 n = nn = pfr_table_count(filter, flags); 1453 if (n < 0) 1454 return (ENOENT); 1455 if (n > *size) { 1456 *size = n; 1457 return (0); 1458 } 1459 SLIST_INIT(&workq); 1460 if (flags & PFR_FLAG_ATOMIC) 1461 s = splsoftnet(); 1462 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1463 if (pfr_skip_table(filter, p, flags)) 1464 continue; 1465 if (n-- <= 0) 1466 continue; 1467 if (!(flags & PFR_FLAG_ATOMIC)) 1468 s = splsoftnet();
| 1382 /* 1383 * XXX Is it OK under LP64 environments? 1384 */ 1385 long tzero = (long)time_second; 1386#else 1387 long tzero = time.tv_sec; 1388#endif 1389 1390 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS); 1391 /* XXX PFR_FLAG_CLSTATS disabled */ 1392 n = nn = pfr_table_count(filter, flags); 1393 if (n < 0) 1394 return (ENOENT); 1395 if (n > *size) { 1396 *size = n; 1397 return (0); 1398 } 1399 SLIST_INIT(&workq); 1400 if (flags & PFR_FLAG_ATOMIC) 1401 s = splsoftnet(); 1402 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1403 if (pfr_skip_table(filter, p, flags)) 1404 continue; 1405 if (n-- <= 0) 1406 continue; 1407 if (!(flags & PFR_FLAG_ATOMIC)) 1408 s = splsoftnet();
|
1469#ifdef __FreeBSD__ 1470 PF_COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), ec); 1471 if (ec) {
| 1409 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
|
1472 splx(s); 1473 return (EFAULT); 1474 }
| 1410 splx(s); 1411 return (EFAULT); 1412 }
|
1475#else 1476 if (copyout(&p->pfrkt_ts, tbl++, sizeof(*tbl))) { 1477 splx(s); 1478 return (EFAULT); 1479 } 1480#endif
| |
1481 if (!(flags & PFR_FLAG_ATOMIC)) 1482 splx(s); 1483 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1484 } 1485 if (flags & PFR_FLAG_CLSTATS) 1486 pfr_clstats_ktables(&workq, tzero, 1487 flags & PFR_FLAG_ADDRSTOO); 1488 if (flags & PFR_FLAG_ATOMIC) 1489 splx(s); 1490 if (n) { 1491 printf("pfr_get_tstats: corruption detected (%d).\n", n); 1492 return (ENOTTY); 1493 } 1494 *size = nn; 1495 return (0); 1496} 1497 1498int 1499pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1500{ 1501 struct pfr_ktableworkq workq; 1502 struct pfr_ktable *p, key; 1503 int i, s, xzero = 0; 1504#ifdef __FreeBSD__
| 1413 if (!(flags & PFR_FLAG_ATOMIC)) 1414 splx(s); 1415 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1416 } 1417 if (flags & PFR_FLAG_CLSTATS) 1418 pfr_clstats_ktables(&workq, tzero, 1419 flags & PFR_FLAG_ADDRSTOO); 1420 if (flags & PFR_FLAG_ATOMIC) 1421 splx(s); 1422 if (n) { 1423 printf("pfr_get_tstats: corruption detected (%d).\n", n); 1424 return (ENOTTY); 1425 } 1426 *size = nn; 1427 return (0); 1428} 1429 1430int 1431pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1432{ 1433 struct pfr_ktableworkq workq; 1434 struct pfr_ktable *p, key; 1435 int i, s, xzero = 0; 1436#ifdef __FreeBSD__
|
1505 int ec;
| |
1506 /* 1507 * XXX Is it OK under LP64 environments? 1508 */ 1509 long tzero = (long)time_second; 1510#else 1511 long tzero = time.tv_sec; 1512#endif 1513 1514 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO); 1515 SLIST_INIT(&workq); 1516 for (i = 0; i < size; i++) {
| 1437 /* 1438 * XXX Is it OK under LP64 environments? 1439 */ 1440 long tzero = (long)time_second; 1441#else 1442 long tzero = time.tv_sec; 1443#endif 1444 1445 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO); 1446 SLIST_INIT(&workq); 1447 for (i = 0; i < size; i++) {
|
1517#ifdef __FreeBSD__ 1518 PF_COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), ec); 1519 if (ec)
| 1448 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
|
1520 return (EFAULT);
| 1449 return (EFAULT);
|
1521#else 1522 if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1523 return (EFAULT); 1524#endif 1525 if (pfr_validate_table(&key.pfrkt_t, 0))
| 1450 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
|
1526 return (EINVAL); 1527 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1528 if (p != NULL) { 1529 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1530 xzero++; 1531 } 1532 } 1533 if (!(flags & PFR_FLAG_DUMMY)) { 1534 if (flags & PFR_FLAG_ATOMIC) 1535 s = splsoftnet(); 1536 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1537 if (flags & PFR_FLAG_ATOMIC) 1538 splx(s); 1539 } 1540 if (nzero != NULL) 1541 *nzero = xzero; 1542 return (0); 1543} 1544 1545int 1546pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1547 int *nchange, int *ndel, int flags) 1548{ 1549 struct pfr_ktableworkq workq; 1550 struct pfr_ktable *p, *q, key; 1551 int i, s, xchange = 0, xdel = 0;
| 1451 return (EINVAL); 1452 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1453 if (p != NULL) { 1454 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1455 xzero++; 1456 } 1457 } 1458 if (!(flags & PFR_FLAG_DUMMY)) { 1459 if (flags & PFR_FLAG_ATOMIC) 1460 s = splsoftnet(); 1461 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1462 if (flags & PFR_FLAG_ATOMIC) 1463 splx(s); 1464 } 1465 if (nzero != NULL) 1466 *nzero = xzero; 1467 return (0); 1468} 1469 1470int 1471pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1472 int *nchange, int *ndel, int flags) 1473{ 1474 struct pfr_ktableworkq workq; 1475 struct pfr_ktable *p, *q, key; 1476 int i, s, xchange = 0, xdel = 0;
|
1552#ifdef __FreeBSD__ 1553 int ec; 1554#endif
| |
1555 1556 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1557 if ((setflag & ~PFR_TFLAG_USRMASK) || 1558 (clrflag & ~PFR_TFLAG_USRMASK) || 1559 (setflag & clrflag)) 1560 return (EINVAL); 1561 SLIST_INIT(&workq); 1562 for (i = 0; i < size; i++) {
| 1477 1478 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1479 if ((setflag & ~PFR_TFLAG_USRMASK) || 1480 (clrflag & ~PFR_TFLAG_USRMASK) || 1481 (setflag & clrflag)) 1482 return (EINVAL); 1483 SLIST_INIT(&workq); 1484 for (i = 0; i < size; i++) {
|
1563#ifdef __FreeBSD__ 1564 PF_COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), ec); 1565 if (ec)
| 1485 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
|
1566 return (EFAULT);
| 1486 return (EFAULT);
|
1567#else 1568 if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1569 return (EFAULT); 1570#endif 1571 if (pfr_validate_table(&key.pfrkt_t, 0))
| 1487 if (pfr_validate_table(&key.pfrkt_t, 0, 1488 flags & PFR_FLAG_USERIOCTL))
|
1572 return (EINVAL); 1573 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1574 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1575 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1576 ~clrflag; 1577 if (p->pfrkt_nflags == p->pfrkt_flags) 1578 goto _skip; 1579 SLIST_FOREACH(q, &workq, pfrkt_workq) 1580 if (!pfr_ktable_compare(p, q)) 1581 goto _skip; 1582 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1583 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1584 (clrflag & PFR_TFLAG_PERSIST) && 1585 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1586 xdel++; 1587 else 1588 xchange++; 1589 } 1590_skip: 1591 ; 1592 } 1593 if (!(flags & PFR_FLAG_DUMMY)) { 1594 if (flags & PFR_FLAG_ATOMIC) 1595 s = splsoftnet(); 1596 pfr_setflags_ktables(&workq); 1597 if (flags & PFR_FLAG_ATOMIC) 1598 splx(s); 1599 } 1600 if (nchange != NULL) 1601 *nchange = xchange; 1602 if (ndel != NULL) 1603 *ndel = xdel; 1604 return (0); 1605} 1606 1607int 1608pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1609{ 1610 struct pfr_ktableworkq workq; 1611 struct pfr_ktable *p; 1612 struct pf_ruleset *rs; 1613 int xdel = 0; 1614 1615 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1616 rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset); 1617 if (rs == NULL) 1618 return (ENOMEM); 1619 SLIST_INIT(&workq); 1620 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1621 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1622 pfr_skip_table(trs, p, 0)) 1623 continue; 1624 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1625 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1626 xdel++; 1627 } 1628 if (!(flags & PFR_FLAG_DUMMY)) { 1629 pfr_setflags_ktables(&workq); 1630 if (ticket != NULL) 1631 *ticket = ++rs->tticket; 1632 rs->topen = 1; 1633 } else 1634 pf_remove_if_empty_ruleset(rs); 1635 if (ndel != NULL) 1636 *ndel = xdel; 1637 return (0); 1638} 1639 1640int 1641pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1642 int *nadd, int *naddr, u_int32_t ticket, int flags) 1643{ 1644 struct pfr_ktableworkq tableq; 1645 struct pfr_kentryworkq addrq; 1646 struct pfr_ktable *kt, *rt, *shadow, key; 1647 struct pfr_kentry *p; 1648 struct pfr_addr ad; 1649 struct pf_ruleset *rs; 1650 int i, rv, xadd = 0, xaddr = 0;
| 1489 return (EINVAL); 1490 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1491 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1492 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1493 ~clrflag; 1494 if (p->pfrkt_nflags == p->pfrkt_flags) 1495 goto _skip; 1496 SLIST_FOREACH(q, &workq, pfrkt_workq) 1497 if (!pfr_ktable_compare(p, q)) 1498 goto _skip; 1499 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1500 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1501 (clrflag & PFR_TFLAG_PERSIST) && 1502 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1503 xdel++; 1504 else 1505 xchange++; 1506 } 1507_skip: 1508 ; 1509 } 1510 if (!(flags & PFR_FLAG_DUMMY)) { 1511 if (flags & PFR_FLAG_ATOMIC) 1512 s = splsoftnet(); 1513 pfr_setflags_ktables(&workq); 1514 if (flags & PFR_FLAG_ATOMIC) 1515 splx(s); 1516 } 1517 if (nchange != NULL) 1518 *nchange = xchange; 1519 if (ndel != NULL) 1520 *ndel = xdel; 1521 return (0); 1522} 1523 1524int 1525pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1526{ 1527 struct pfr_ktableworkq workq; 1528 struct pfr_ktable *p; 1529 struct pf_ruleset *rs; 1530 int xdel = 0; 1531 1532 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1533 rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset); 1534 if (rs == NULL) 1535 return (ENOMEM); 1536 SLIST_INIT(&workq); 1537 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1538 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1539 pfr_skip_table(trs, p, 0)) 1540 continue; 1541 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1542 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1543 xdel++; 1544 } 1545 if (!(flags & PFR_FLAG_DUMMY)) { 1546 pfr_setflags_ktables(&workq); 1547 if (ticket != NULL) 1548 *ticket = ++rs->tticket; 1549 rs->topen = 1; 1550 } else 1551 pf_remove_if_empty_ruleset(rs); 1552 if (ndel != NULL) 1553 *ndel = xdel; 1554 return (0); 1555} 1556 1557int 1558pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1559 int *nadd, int *naddr, u_int32_t ticket, int flags) 1560{ 1561 struct pfr_ktableworkq tableq; 1562 struct pfr_kentryworkq addrq; 1563 struct pfr_ktable *kt, *rt, *shadow, key; 1564 struct pfr_kentry *p; 1565 struct pfr_addr ad; 1566 struct pf_ruleset *rs; 1567 int i, rv, xadd = 0, xaddr = 0;
|
1651#ifdef __FreeBSD__ 1652 int ec; 1653#endif
| |
1654 1655 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO); 1656 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1657 return (EINVAL);
| 1568 1569 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO); 1570 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1571 return (EINVAL);
|
1658 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK))
| 1572 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1573 flags & PFR_FLAG_USERIOCTL))
|
1659 return (EINVAL); 1660 rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset); 1661 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1662 return (EBUSY); 1663 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1664 SLIST_INIT(&tableq); 1665 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); 1666 if (kt == NULL) { 1667 kt = pfr_create_ktable(tbl, 0, 1); 1668 if (kt == NULL) 1669 return (ENOMEM); 1670 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1671 xadd++; 1672 if (!tbl->pfrt_anchor[0]) 1673 goto _skip; 1674 1675 /* find or create root table */ 1676 bzero(&key, sizeof(key)); 1677 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1678 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1679 if (rt != NULL) { 1680 kt->pfrkt_root = rt; 1681 goto _skip; 1682 } 1683 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1684 if (rt == NULL) { 1685 pfr_destroy_ktables(&tableq, 0); 1686 return (ENOMEM); 1687 } 1688 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1689 kt->pfrkt_root = rt; 1690 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1691 xadd++; 1692_skip: 1693 shadow = pfr_create_ktable(tbl, 0, 0); 1694 if (shadow == NULL) { 1695 pfr_destroy_ktables(&tableq, 0); 1696 return (ENOMEM); 1697 } 1698 SLIST_INIT(&addrq); 1699 for (i = 0; i < size; i++) {
| 1574 return (EINVAL); 1575 rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset); 1576 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1577 return (EBUSY); 1578 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1579 SLIST_INIT(&tableq); 1580 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); 1581 if (kt == NULL) { 1582 kt = pfr_create_ktable(tbl, 0, 1); 1583 if (kt == NULL) 1584 return (ENOMEM); 1585 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1586 xadd++; 1587 if (!tbl->pfrt_anchor[0]) 1588 goto _skip; 1589 1590 /* find or create root table */ 1591 bzero(&key, sizeof(key)); 1592 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1593 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1594 if (rt != NULL) { 1595 kt->pfrkt_root = rt; 1596 goto _skip; 1597 } 1598 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1599 if (rt == NULL) { 1600 pfr_destroy_ktables(&tableq, 0); 1601 return (ENOMEM); 1602 } 1603 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1604 kt->pfrkt_root = rt; 1605 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1606 xadd++; 1607_skip: 1608 shadow = pfr_create_ktable(tbl, 0, 0); 1609 if (shadow == NULL) { 1610 pfr_destroy_ktables(&tableq, 0); 1611 return (ENOMEM); 1612 } 1613 SLIST_INIT(&addrq); 1614 for (i = 0; i < size; i++) {
|
1700#ifdef __FreeBSD__ 1701 PF_COPYIN(addr+i, &ad, sizeof(ad), ec); 1702 if (ec)
| 1615 if (COPYIN(addr+i, &ad, sizeof(ad)))
|
1703 senderr(EFAULT);
| 1616 senderr(EFAULT);
|
1704#else 1705 if (copyin(addr+i, &ad, sizeof(ad))) 1706 senderr(EFAULT); 1707#endif
| |
1708 if (pfr_validate_addr(&ad)) 1709 senderr(EINVAL); 1710 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) 1711 continue; 1712 p = pfr_create_kentry(&ad); 1713 if (p == NULL) 1714 senderr(ENOMEM); 1715 if (pfr_route_kentry(shadow, p)) { 1716 pfr_destroy_kentry(p); 1717 continue; 1718 } 1719 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1720 xaddr++; 1721 } 1722 if (!(flags & PFR_FLAG_DUMMY)) { 1723 if (kt->pfrkt_shadow != NULL) 1724 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1725 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1726 pfr_insert_ktables(&tableq); 1727 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1728 xaddr : NO_ADDRESSES; 1729 kt->pfrkt_shadow = shadow; 1730 } else { 1731 pfr_clean_node_mask(shadow, &addrq); 1732 pfr_destroy_ktable(shadow, 0); 1733 pfr_destroy_ktables(&tableq, 0); 1734 pfr_destroy_kentries(&addrq); 1735 } 1736 if (nadd != NULL) 1737 *nadd = xadd; 1738 if (naddr != NULL) 1739 *naddr = xaddr; 1740 return (0); 1741_bad: 1742 pfr_destroy_ktable(shadow, 0); 1743 pfr_destroy_ktables(&tableq, 0); 1744 pfr_destroy_kentries(&addrq); 1745 return (rv); 1746} 1747 1748int
| 1617 if (pfr_validate_addr(&ad)) 1618 senderr(EINVAL); 1619 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) 1620 continue; 1621 p = pfr_create_kentry(&ad); 1622 if (p == NULL) 1623 senderr(ENOMEM); 1624 if (pfr_route_kentry(shadow, p)) { 1625 pfr_destroy_kentry(p); 1626 continue; 1627 } 1628 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1629 xaddr++; 1630 } 1631 if (!(flags & PFR_FLAG_DUMMY)) { 1632 if (kt->pfrkt_shadow != NULL) 1633 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1634 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1635 pfr_insert_ktables(&tableq); 1636 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1637 xaddr : NO_ADDRESSES; 1638 kt->pfrkt_shadow = shadow; 1639 } else { 1640 pfr_clean_node_mask(shadow, &addrq); 1641 pfr_destroy_ktable(shadow, 0); 1642 pfr_destroy_ktables(&tableq, 0); 1643 pfr_destroy_kentries(&addrq); 1644 } 1645 if (nadd != NULL) 1646 *nadd = xadd; 1647 if (naddr != NULL) 1648 *naddr = xaddr; 1649 return (0); 1650_bad: 1651 pfr_destroy_ktable(shadow, 0); 1652 pfr_destroy_ktables(&tableq, 0); 1653 pfr_destroy_kentries(&addrq); 1654 return (rv); 1655} 1656 1657int
|
| 1658pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1659{ 1660 struct pfr_ktableworkq workq; 1661 struct pfr_ktable *p; 1662 struct pf_ruleset *rs; 1663 int xdel = 0; 1664 1665 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1666 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset); 1667 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1668 return (0); 1669 SLIST_INIT(&workq); 1670 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1671 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1672 pfr_skip_table(trs, p, 0)) 1673 continue; 1674 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1675 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1676 xdel++; 1677 } 1678 if (!(flags & PFR_FLAG_DUMMY)) { 1679 pfr_setflags_ktables(&workq); 1680 rs->topen = 0; 1681 pf_remove_if_empty_ruleset(rs); 1682 } 1683 if (ndel != NULL) 1684 *ndel = xdel; 1685 return (0); 1686} 1687 1688int
|
1749pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1750 int *nchange, int flags) 1751{ 1752 struct pfr_ktable *p; 1753 struct pfr_ktableworkq workq; 1754 struct pf_ruleset *rs; 1755 int s, xadd = 0, xchange = 0; 1756#ifdef __FreeBSD__ 1757 /* 1758 * XXX Is it OK under LP64 environments? 1759 */ 1760 long tzero = (long)time_second; 1761#else 1762 long tzero = time.tv_sec; 1763#endif 1764 1765 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1766 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset); 1767 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1768 return (EBUSY); 1769 1770 SLIST_INIT(&workq); 1771 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1772 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1773 pfr_skip_table(trs, p, 0)) 1774 continue; 1775 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1776 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1777 xchange++; 1778 else 1779 xadd++; 1780 } 1781 1782 if (!(flags & PFR_FLAG_DUMMY)) { 1783 if (flags & PFR_FLAG_ATOMIC) 1784 s = splsoftnet(); 1785 SLIST_FOREACH(p, &workq, pfrkt_workq) 1786 pfr_commit_ktable(p, tzero); 1787 if (flags & PFR_FLAG_ATOMIC) 1788 splx(s); 1789 rs->topen = 0; 1790 pf_remove_if_empty_ruleset(rs); 1791 } 1792 if (nadd != NULL) 1793 *nadd = xadd; 1794 if (nchange != NULL) 1795 *nchange = xchange; 1796 1797 return (0); 1798} 1799 1800void 1801pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1802{ 1803 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1804 int nflags; 1805 1806 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1807 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1808 pfr_clstats_ktable(kt, tzero, 1); 1809 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1810 /* kt might contain addresses */ 1811 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1812 struct pfr_kentry *p, *q, *next; 1813 struct pfr_addr ad; 1814 1815 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1816 pfr_mark_addrs(kt); 1817 SLIST_INIT(&addq); 1818 SLIST_INIT(&changeq); 1819 SLIST_INIT(&delq); 1820 SLIST_INIT(&garbageq); 1821 pfr_clean_node_mask(shadow, &addrq); 1822 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1823 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1824 pfr_copyout_addr(&ad, p); 1825 q = pfr_lookup_addr(kt, &ad, 1); 1826 if (q != NULL) { 1827 if (q->pfrke_not != p->pfrke_not) 1828 SLIST_INSERT_HEAD(&changeq, q, 1829 pfrke_workq); 1830 q->pfrke_mark = 1; 1831 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1832 } else { 1833 p->pfrke_tzero = tzero; 1834 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1835 } 1836 } 1837 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1838 pfr_insert_kentries(kt, &addq, tzero); 1839 pfr_remove_kentries(kt, &delq); 1840 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1841 pfr_destroy_kentries(&garbageq); 1842 } else { 1843 /* kt cannot contain addresses */ 1844 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1845 shadow->pfrkt_ip4); 1846 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1847 shadow->pfrkt_ip6); 1848 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1849 pfr_clstats_ktable(kt, tzero, 1); 1850 } 1851 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1852 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1853 & ~PFR_TFLAG_INACTIVE; 1854 pfr_destroy_ktable(shadow, 0); 1855 kt->pfrkt_shadow = NULL; 1856 pfr_setflags_ktable(kt, nflags); 1857} 1858 1859int
| 1689pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1690 int *nchange, int flags) 1691{ 1692 struct pfr_ktable *p; 1693 struct pfr_ktableworkq workq; 1694 struct pf_ruleset *rs; 1695 int s, xadd = 0, xchange = 0; 1696#ifdef __FreeBSD__ 1697 /* 1698 * XXX Is it OK under LP64 environments? 1699 */ 1700 long tzero = (long)time_second; 1701#else 1702 long tzero = time.tv_sec; 1703#endif 1704 1705 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1706 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset); 1707 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1708 return (EBUSY); 1709 1710 SLIST_INIT(&workq); 1711 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1712 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1713 pfr_skip_table(trs, p, 0)) 1714 continue; 1715 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1716 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1717 xchange++; 1718 else 1719 xadd++; 1720 } 1721 1722 if (!(flags & PFR_FLAG_DUMMY)) { 1723 if (flags & PFR_FLAG_ATOMIC) 1724 s = splsoftnet(); 1725 SLIST_FOREACH(p, &workq, pfrkt_workq) 1726 pfr_commit_ktable(p, tzero); 1727 if (flags & PFR_FLAG_ATOMIC) 1728 splx(s); 1729 rs->topen = 0; 1730 pf_remove_if_empty_ruleset(rs); 1731 } 1732 if (nadd != NULL) 1733 *nadd = xadd; 1734 if (nchange != NULL) 1735 *nchange = xchange; 1736 1737 return (0); 1738} 1739 1740void 1741pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1742{ 1743 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1744 int nflags; 1745 1746 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1747 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1748 pfr_clstats_ktable(kt, tzero, 1); 1749 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1750 /* kt might contain addresses */ 1751 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1752 struct pfr_kentry *p, *q, *next; 1753 struct pfr_addr ad; 1754 1755 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1756 pfr_mark_addrs(kt); 1757 SLIST_INIT(&addq); 1758 SLIST_INIT(&changeq); 1759 SLIST_INIT(&delq); 1760 SLIST_INIT(&garbageq); 1761 pfr_clean_node_mask(shadow, &addrq); 1762 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1763 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1764 pfr_copyout_addr(&ad, p); 1765 q = pfr_lookup_addr(kt, &ad, 1); 1766 if (q != NULL) { 1767 if (q->pfrke_not != p->pfrke_not) 1768 SLIST_INSERT_HEAD(&changeq, q, 1769 pfrke_workq); 1770 q->pfrke_mark = 1; 1771 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1772 } else { 1773 p->pfrke_tzero = tzero; 1774 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1775 } 1776 } 1777 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1778 pfr_insert_kentries(kt, &addq, tzero); 1779 pfr_remove_kentries(kt, &delq); 1780 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1781 pfr_destroy_kentries(&garbageq); 1782 } else { 1783 /* kt cannot contain addresses */ 1784 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1785 shadow->pfrkt_ip4); 1786 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1787 shadow->pfrkt_ip6); 1788 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1789 pfr_clstats_ktable(kt, tzero, 1); 1790 } 1791 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1792 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1793 & ~PFR_TFLAG_INACTIVE; 1794 pfr_destroy_ktable(shadow, 0); 1795 kt->pfrkt_shadow = NULL; 1796 pfr_setflags_ktable(kt, nflags); 1797} 1798 1799int
|
1860pfr_validate_table(struct pfr_table *tbl, int allowedflags)
| 1800pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
|
1861{ 1862 int i; 1863 1864 if (!tbl->pfrt_name[0]) 1865 return (-1);
| 1801{ 1802 int i; 1803 1804 if (!tbl->pfrt_name[0]) 1805 return (-1);
|
| 1806 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1807 return (-1);
|
1866 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1867 return (-1); 1868 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1869 if (tbl->pfrt_name[i]) 1870 return (-1); 1871 if (tbl->pfrt_flags & ~allowedflags) 1872 return (-1); 1873 return (0); 1874} 1875 1876int 1877pfr_table_count(struct pfr_table *filter, int flags) 1878{ 1879 struct pf_ruleset *rs; 1880 struct pf_anchor *ac; 1881 1882 if (flags & PFR_FLAG_ALLRSETS) 1883 return (pfr_ktable_cnt); 1884 if (filter->pfrt_ruleset[0]) { 1885 rs = pf_find_ruleset(filter->pfrt_anchor, 1886 filter->pfrt_ruleset); 1887 return ((rs != NULL) ? rs->tables : -1); 1888 } 1889 if (filter->pfrt_anchor[0]) { 1890 ac = pf_find_anchor(filter->pfrt_anchor); 1891 return ((ac != NULL) ? ac->tables : -1); 1892 } 1893 return (pf_main_ruleset.tables); 1894} 1895 1896int 1897pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1898{ 1899 if (flags & PFR_FLAG_ALLRSETS) 1900 return (0); 1901 if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor, 1902 PF_ANCHOR_NAME_SIZE)) 1903 return (1); 1904 if (!filter->pfrt_ruleset[0]) 1905 return (0); 1906 if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset, 1907 PF_RULESET_NAME_SIZE)) 1908 return (1); 1909 return (0); 1910} 1911 1912void 1913pfr_insert_ktables(struct pfr_ktableworkq *workq) 1914{ 1915 struct pfr_ktable *p; 1916 1917 SLIST_FOREACH(p, workq, pfrkt_workq) 1918 pfr_insert_ktable(p); 1919} 1920 1921void 1922pfr_insert_ktable(struct pfr_ktable *kt) 1923{ 1924 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); 1925 pfr_ktable_cnt++; 1926 if (kt->pfrkt_root != NULL) 1927 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1928 pfr_setflags_ktable(kt->pfrkt_root, 1929 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1930} 1931 1932void 1933pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1934{ 1935 struct pfr_ktable *p; 1936 1937 SLIST_FOREACH(p, workq, pfrkt_workq) 1938 pfr_setflags_ktable(p, p->pfrkt_nflags); 1939} 1940 1941void 1942pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1943{ 1944 struct pfr_kentryworkq addrq; 1945 1946 if (!(newf & PFR_TFLAG_REFERENCED) && 1947 !(newf & PFR_TFLAG_PERSIST)) 1948 newf &= ~PFR_TFLAG_ACTIVE; 1949 if (!(newf & PFR_TFLAG_ACTIVE)) 1950 newf &= ~PFR_TFLAG_USRMASK; 1951 if (!(newf & PFR_TFLAG_SETMASK)) { 1952 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); 1953 if (kt->pfrkt_root != NULL) 1954 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1955 pfr_setflags_ktable(kt->pfrkt_root, 1956 kt->pfrkt_root->pfrkt_flags & 1957 ~PFR_TFLAG_REFDANCHOR); 1958 pfr_destroy_ktable(kt, 1); 1959 pfr_ktable_cnt--; 1960 return; 1961 } 1962 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1963 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1964 pfr_remove_kentries(kt, &addrq); 1965 } 1966 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1967 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1968 kt->pfrkt_shadow = NULL; 1969 } 1970 kt->pfrkt_flags = newf; 1971} 1972 1973void 1974pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1975{ 1976 struct pfr_ktable *p; 1977 1978 SLIST_FOREACH(p, workq, pfrkt_workq) 1979 pfr_clstats_ktable(p, tzero, recurse); 1980} 1981 1982void 1983pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1984{ 1985 struct pfr_kentryworkq addrq; 1986 int s; 1987 1988 if (recurse) { 1989 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1990 pfr_clstats_kentries(&addrq, tzero, 0); 1991 } 1992 s = splsoftnet(); 1993 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1994 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1995 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1996 splx(s); 1997 kt->pfrkt_tzero = tzero; 1998} 1999 2000struct pfr_ktable * 2001pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 2002{ 2003 struct pfr_ktable *kt; 2004 struct pf_ruleset *rs; 2005 2006 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT); 2007 if (kt == NULL) 2008 return (NULL); 2009 bzero(kt, sizeof(*kt)); 2010 kt->pfrkt_t = *tbl; 2011 2012 if (attachruleset) { 2013 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor, 2014 tbl->pfrt_ruleset); 2015 if (!rs) { 2016 pfr_destroy_ktable(kt, 0); 2017 return (NULL); 2018 } 2019 kt->pfrkt_rs = rs; 2020 rs->tables++; 2021 if (rs->anchor != NULL) 2022 rs->anchor->tables++; 2023 } 2024 2025 if (!rn_inithead((void **)&kt->pfrkt_ip4, 2026 offsetof(struct sockaddr_in, sin_addr) * 8) || 2027 !rn_inithead((void **)&kt->pfrkt_ip6, 2028 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 2029 pfr_destroy_ktable(kt, 0); 2030 return (NULL); 2031 } 2032 kt->pfrkt_tzero = tzero; 2033 2034 return (kt); 2035} 2036 2037void 2038pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 2039{ 2040 struct pfr_ktable *p, *q; 2041 2042 for (p = SLIST_FIRST(workq); p; p = q) { 2043 q = SLIST_NEXT(p, pfrkt_workq); 2044 pfr_destroy_ktable(p, flushaddr); 2045 } 2046} 2047 2048void 2049pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 2050{ 2051 struct pfr_kentryworkq addrq; 2052 2053 if (flushaddr) { 2054 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 2055 pfr_clean_node_mask(kt, &addrq); 2056 pfr_destroy_kentries(&addrq); 2057 } 2058#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 2059 if (kt->pfrkt_ip4 != NULL) { 2060 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4); 2061 free((caddr_t)kt->pfrkt_ip4, M_RTABLE); 2062 } 2063 if (kt->pfrkt_ip6 != NULL) { 2064 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6); 2065 free((caddr_t)kt->pfrkt_ip6, M_RTABLE); 2066 } 2067#else 2068 if (kt->pfrkt_ip4 != NULL) 2069 free((caddr_t)kt->pfrkt_ip4, M_RTABLE); 2070 if (kt->pfrkt_ip6 != NULL) 2071 free((caddr_t)kt->pfrkt_ip6, M_RTABLE); 2072#endif 2073 if (kt->pfrkt_shadow != NULL) 2074 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 2075 if (kt->pfrkt_rs != NULL) { 2076 kt->pfrkt_rs->tables--; 2077 if (kt->pfrkt_rs->anchor != NULL) 2078 kt->pfrkt_rs->anchor->tables--; 2079 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 2080 } 2081 pool_put(&pfr_ktable_pl, kt); 2082} 2083 2084int 2085pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 2086{ 2087 int d; 2088 2089 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 2090 return (d); 2091 if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor, 2092 PF_ANCHOR_NAME_SIZE))) 2093 return (d);
| 1808 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1809 return (-1); 1810 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1811 if (tbl->pfrt_name[i]) 1812 return (-1); 1813 if (tbl->pfrt_flags & ~allowedflags) 1814 return (-1); 1815 return (0); 1816} 1817 1818int 1819pfr_table_count(struct pfr_table *filter, int flags) 1820{ 1821 struct pf_ruleset *rs; 1822 struct pf_anchor *ac; 1823 1824 if (flags & PFR_FLAG_ALLRSETS) 1825 return (pfr_ktable_cnt); 1826 if (filter->pfrt_ruleset[0]) { 1827 rs = pf_find_ruleset(filter->pfrt_anchor, 1828 filter->pfrt_ruleset); 1829 return ((rs != NULL) ? rs->tables : -1); 1830 } 1831 if (filter->pfrt_anchor[0]) { 1832 ac = pf_find_anchor(filter->pfrt_anchor); 1833 return ((ac != NULL) ? ac->tables : -1); 1834 } 1835 return (pf_main_ruleset.tables); 1836} 1837 1838int 1839pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1840{ 1841 if (flags & PFR_FLAG_ALLRSETS) 1842 return (0); 1843 if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor, 1844 PF_ANCHOR_NAME_SIZE)) 1845 return (1); 1846 if (!filter->pfrt_ruleset[0]) 1847 return (0); 1848 if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset, 1849 PF_RULESET_NAME_SIZE)) 1850 return (1); 1851 return (0); 1852} 1853 1854void 1855pfr_insert_ktables(struct pfr_ktableworkq *workq) 1856{ 1857 struct pfr_ktable *p; 1858 1859 SLIST_FOREACH(p, workq, pfrkt_workq) 1860 pfr_insert_ktable(p); 1861} 1862 1863void 1864pfr_insert_ktable(struct pfr_ktable *kt) 1865{ 1866 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); 1867 pfr_ktable_cnt++; 1868 if (kt->pfrkt_root != NULL) 1869 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1870 pfr_setflags_ktable(kt->pfrkt_root, 1871 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1872} 1873 1874void 1875pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1876{ 1877 struct pfr_ktable *p; 1878 1879 SLIST_FOREACH(p, workq, pfrkt_workq) 1880 pfr_setflags_ktable(p, p->pfrkt_nflags); 1881} 1882 1883void 1884pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1885{ 1886 struct pfr_kentryworkq addrq; 1887 1888 if (!(newf & PFR_TFLAG_REFERENCED) && 1889 !(newf & PFR_TFLAG_PERSIST)) 1890 newf &= ~PFR_TFLAG_ACTIVE; 1891 if (!(newf & PFR_TFLAG_ACTIVE)) 1892 newf &= ~PFR_TFLAG_USRMASK; 1893 if (!(newf & PFR_TFLAG_SETMASK)) { 1894 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); 1895 if (kt->pfrkt_root != NULL) 1896 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1897 pfr_setflags_ktable(kt->pfrkt_root, 1898 kt->pfrkt_root->pfrkt_flags & 1899 ~PFR_TFLAG_REFDANCHOR); 1900 pfr_destroy_ktable(kt, 1); 1901 pfr_ktable_cnt--; 1902 return; 1903 } 1904 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1905 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1906 pfr_remove_kentries(kt, &addrq); 1907 } 1908 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1909 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1910 kt->pfrkt_shadow = NULL; 1911 } 1912 kt->pfrkt_flags = newf; 1913} 1914 1915void 1916pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1917{ 1918 struct pfr_ktable *p; 1919 1920 SLIST_FOREACH(p, workq, pfrkt_workq) 1921 pfr_clstats_ktable(p, tzero, recurse); 1922} 1923 1924void 1925pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1926{ 1927 struct pfr_kentryworkq addrq; 1928 int s; 1929 1930 if (recurse) { 1931 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1932 pfr_clstats_kentries(&addrq, tzero, 0); 1933 } 1934 s = splsoftnet(); 1935 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1936 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1937 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1938 splx(s); 1939 kt->pfrkt_tzero = tzero; 1940} 1941 1942struct pfr_ktable * 1943pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1944{ 1945 struct pfr_ktable *kt; 1946 struct pf_ruleset *rs; 1947 1948 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT); 1949 if (kt == NULL) 1950 return (NULL); 1951 bzero(kt, sizeof(*kt)); 1952 kt->pfrkt_t = *tbl; 1953 1954 if (attachruleset) { 1955 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor, 1956 tbl->pfrt_ruleset); 1957 if (!rs) { 1958 pfr_destroy_ktable(kt, 0); 1959 return (NULL); 1960 } 1961 kt->pfrkt_rs = rs; 1962 rs->tables++; 1963 if (rs->anchor != NULL) 1964 rs->anchor->tables++; 1965 } 1966 1967 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1968 offsetof(struct sockaddr_in, sin_addr) * 8) || 1969 !rn_inithead((void **)&kt->pfrkt_ip6, 1970 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1971 pfr_destroy_ktable(kt, 0); 1972 return (NULL); 1973 } 1974 kt->pfrkt_tzero = tzero; 1975 1976 return (kt); 1977} 1978 1979void 1980pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1981{ 1982 struct pfr_ktable *p, *q; 1983 1984 for (p = SLIST_FIRST(workq); p; p = q) { 1985 q = SLIST_NEXT(p, pfrkt_workq); 1986 pfr_destroy_ktable(p, flushaddr); 1987 } 1988} 1989 1990void 1991pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1992{ 1993 struct pfr_kentryworkq addrq; 1994 1995 if (flushaddr) { 1996 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1997 pfr_clean_node_mask(kt, &addrq); 1998 pfr_destroy_kentries(&addrq); 1999 } 2000#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100) 2001 if (kt->pfrkt_ip4 != NULL) { 2002 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4); 2003 free((caddr_t)kt->pfrkt_ip4, M_RTABLE); 2004 } 2005 if (kt->pfrkt_ip6 != NULL) { 2006 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6); 2007 free((caddr_t)kt->pfrkt_ip6, M_RTABLE); 2008 } 2009#else 2010 if (kt->pfrkt_ip4 != NULL) 2011 free((caddr_t)kt->pfrkt_ip4, M_RTABLE); 2012 if (kt->pfrkt_ip6 != NULL) 2013 free((caddr_t)kt->pfrkt_ip6, M_RTABLE); 2014#endif 2015 if (kt->pfrkt_shadow != NULL) 2016 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 2017 if (kt->pfrkt_rs != NULL) { 2018 kt->pfrkt_rs->tables--; 2019 if (kt->pfrkt_rs->anchor != NULL) 2020 kt->pfrkt_rs->anchor->tables--; 2021 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 2022 } 2023 pool_put(&pfr_ktable_pl, kt); 2024} 2025 2026int 2027pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 2028{ 2029 int d; 2030 2031 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 2032 return (d); 2033 if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor, 2034 PF_ANCHOR_NAME_SIZE))) 2035 return (d);
|
2094 return strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset, 2095 PF_RULESET_NAME_SIZE);
| 2036 return (strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset, 2037 PF_RULESET_NAME_SIZE));
|
2096} 2097 2098struct pfr_ktable * 2099pfr_lookup_table(struct pfr_table *tbl) 2100{ 2101 /* struct pfr_ktable start like a struct pfr_table */
| 2038} 2039 2040struct pfr_ktable * 2041pfr_lookup_table(struct pfr_table *tbl) 2042{ 2043 /* struct pfr_ktable start like a struct pfr_table */
|
2102 return RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
| 2044 return (RB_FIND(pfr_ktablehead, &pfr_ktables, 2045 (struct pfr_ktable *)tbl));
|
2103} 2104 2105int 2106pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 2107{ 2108 struct pfr_kentry *ke = NULL; 2109 int match; 2110 2111 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2112 kt = kt->pfrkt_root; 2113 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
| 2046} 2047 2048int 2049pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 2050{ 2051 struct pfr_kentry *ke = NULL; 2052 int match; 2053 2054 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2055 kt = kt->pfrkt_root; 2056 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
|
2114 return 0;
| 2057 return (0);
|
2115 2116 switch (af) { 2117 case AF_INET: 2118 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2119 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2120 if (ke && KENTRY_RNF_ROOT(ke)) 2121 ke = NULL; 2122 break; 2123 case AF_INET6: 2124 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2125 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2126 if (ke && KENTRY_RNF_ROOT(ke)) 2127 ke = NULL; 2128 break; 2129 } 2130 match = (ke && !ke->pfrke_not); 2131 if (match) 2132 kt->pfrkt_match++; 2133 else 2134 kt->pfrkt_nomatch++; 2135 return (match); 2136} 2137 2138void 2139pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2140 u_int64_t len, int dir_out, int op_pass, int notrule) 2141{ 2142 struct pfr_kentry *ke = NULL; 2143 2144 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2145 kt = kt->pfrkt_root; 2146 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2147 return; 2148 2149 switch (af) { 2150 case AF_INET: 2151 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2152 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2153 if (ke && KENTRY_RNF_ROOT(ke)) 2154 ke = NULL; 2155 break; 2156 case AF_INET6: 2157 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2158 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2159 if (ke && KENTRY_RNF_ROOT(ke)) 2160 ke = NULL; 2161 break; 2162 } 2163 if ((ke == NULL || ke->pfrke_not) != notrule) { 2164 if (op_pass != PFR_OP_PASS) 2165 printf("pfr_update_stats: assertion failed.\n"); 2166 op_pass = PFR_OP_XPASS; 2167 } 2168 kt->pfrkt_packets[dir_out][op_pass]++; 2169 kt->pfrkt_bytes[dir_out][op_pass] += len; 2170 if (ke != NULL && op_pass != PFR_OP_XPASS) { 2171 ke->pfrke_packets[dir_out][op_pass]++; 2172 ke->pfrke_bytes[dir_out][op_pass] += len; 2173 } 2174} 2175 2176struct pfr_ktable * 2177pfr_attach_table(struct pf_ruleset *rs, char *name) 2178{ 2179 struct pfr_ktable *kt, *rt; 2180 struct pfr_table tbl; 2181 struct pf_anchor *ac = rs->anchor; 2182 2183 bzero(&tbl, sizeof(tbl)); 2184 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2185 if (ac != NULL) { 2186 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor)); 2187 strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset)); 2188 } 2189 kt = pfr_lookup_table(&tbl); 2190 if (kt == NULL) { 2191#ifdef __FreeBSD__ 2192 /* 2193 * XXX Is it OK under LP64 environments? 2194 */ 2195 kt = pfr_create_ktable(&tbl, (long)time_second, 1); 2196#else 2197 kt = pfr_create_ktable(&tbl, time.tv_sec, 1); 2198#endif 2199 if (kt == NULL) 2200 return (NULL); 2201 if (ac != NULL) { 2202 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2203 bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset)); 2204 rt = pfr_lookup_table(&tbl); 2205 if (rt == NULL) { 2206 rt = pfr_create_ktable(&tbl, 0, 1); 2207 if (rt == NULL) { 2208 pfr_destroy_ktable(kt, 0); 2209 return (NULL); 2210 } 2211 pfr_insert_ktable(rt); 2212 } 2213 kt->pfrkt_root = rt; 2214 } 2215 pfr_insert_ktable(kt); 2216 } 2217 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2218 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
| 2058 2059 switch (af) { 2060 case AF_INET: 2061 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2062 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2063 if (ke && KENTRY_RNF_ROOT(ke)) 2064 ke = NULL; 2065 break; 2066 case AF_INET6: 2067 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2068 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2069 if (ke && KENTRY_RNF_ROOT(ke)) 2070 ke = NULL; 2071 break; 2072 } 2073 match = (ke && !ke->pfrke_not); 2074 if (match) 2075 kt->pfrkt_match++; 2076 else 2077 kt->pfrkt_nomatch++; 2078 return (match); 2079} 2080 2081void 2082pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2083 u_int64_t len, int dir_out, int op_pass, int notrule) 2084{ 2085 struct pfr_kentry *ke = NULL; 2086 2087 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2088 kt = kt->pfrkt_root; 2089 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2090 return; 2091 2092 switch (af) { 2093 case AF_INET: 2094 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2095 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2096 if (ke && KENTRY_RNF_ROOT(ke)) 2097 ke = NULL; 2098 break; 2099 case AF_INET6: 2100 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2101 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2102 if (ke && KENTRY_RNF_ROOT(ke)) 2103 ke = NULL; 2104 break; 2105 } 2106 if ((ke == NULL || ke->pfrke_not) != notrule) { 2107 if (op_pass != PFR_OP_PASS) 2108 printf("pfr_update_stats: assertion failed.\n"); 2109 op_pass = PFR_OP_XPASS; 2110 } 2111 kt->pfrkt_packets[dir_out][op_pass]++; 2112 kt->pfrkt_bytes[dir_out][op_pass] += len; 2113 if (ke != NULL && op_pass != PFR_OP_XPASS) { 2114 ke->pfrke_packets[dir_out][op_pass]++; 2115 ke->pfrke_bytes[dir_out][op_pass] += len; 2116 } 2117} 2118 2119struct pfr_ktable * 2120pfr_attach_table(struct pf_ruleset *rs, char *name) 2121{ 2122 struct pfr_ktable *kt, *rt; 2123 struct pfr_table tbl; 2124 struct pf_anchor *ac = rs->anchor; 2125 2126 bzero(&tbl, sizeof(tbl)); 2127 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2128 if (ac != NULL) { 2129 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor)); 2130 strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset)); 2131 } 2132 kt = pfr_lookup_table(&tbl); 2133 if (kt == NULL) { 2134#ifdef __FreeBSD__ 2135 /* 2136 * XXX Is it OK under LP64 environments? 2137 */ 2138 kt = pfr_create_ktable(&tbl, (long)time_second, 1); 2139#else 2140 kt = pfr_create_ktable(&tbl, time.tv_sec, 1); 2141#endif 2142 if (kt == NULL) 2143 return (NULL); 2144 if (ac != NULL) { 2145 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2146 bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset)); 2147 rt = pfr_lookup_table(&tbl); 2148 if (rt == NULL) { 2149 rt = pfr_create_ktable(&tbl, 0, 1); 2150 if (rt == NULL) { 2151 pfr_destroy_ktable(kt, 0); 2152 return (NULL); 2153 } 2154 pfr_insert_ktable(rt); 2155 } 2156 kt->pfrkt_root = rt; 2157 } 2158 pfr_insert_ktable(kt); 2159 } 2160 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2161 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
|
2219 return kt;
| 2162 return (kt);
|
2220} 2221 2222void 2223pfr_detach_table(struct pfr_ktable *kt) 2224{ 2225 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) 2226 printf("pfr_detach_table: refcount = %d.\n", 2227 kt->pfrkt_refcnt[PFR_REFCNT_RULE]); 2228 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2229 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2230} 2231 2232 2233int 2234pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2235 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af) 2236{ 2237 struct pfr_kentry *ke, *ke2; 2238 struct pf_addr *addr; 2239 union sockaddr_union mask; 2240 int idx = -1, use_counter = 0; 2241 2242 addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr : 2243 (struct pf_addr *)&pfr_sin6.sin6_addr; 2244 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2245 kt = kt->pfrkt_root; 2246 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2247 return (-1); 2248 2249 if (pidx != NULL) 2250 idx = *pidx; 2251 if (counter != NULL && idx >= 0) 2252 use_counter = 1; 2253 if (idx < 0) 2254 idx = 0; 2255 2256_next_block: 2257 ke = pfr_kentry_byidx(kt, idx, af); 2258 if (ke == NULL) 2259 return (1); 2260 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); 2261 *raddr = SUNION2PF(&ke->pfrke_sa, af); 2262 *rmask = SUNION2PF(&pfr_mask, af); 2263 2264 if (use_counter) { 2265 /* is supplied address within block? */ 2266 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) { 2267 /* no, go to next block in table */ 2268 idx++; 2269 use_counter = 0; 2270 goto _next_block; 2271 } 2272 PF_ACPY(addr, counter, af); 2273 } else { 2274 /* use first address of block */ 2275 PF_ACPY(addr, *raddr, af); 2276 } 2277 2278 if (!KENTRY_NETWORK(ke)) { 2279 /* this is a single IP address - no possible nested block */ 2280 PF_ACPY(counter, addr, af); 2281 *pidx = idx; 2282 return (0); 2283 } 2284 for (;;) { 2285 /* we don't want to use a nested block */
| 2163} 2164 2165void 2166pfr_detach_table(struct pfr_ktable *kt) 2167{ 2168 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) 2169 printf("pfr_detach_table: refcount = %d.\n", 2170 kt->pfrkt_refcnt[PFR_REFCNT_RULE]); 2171 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2172 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2173} 2174 2175 2176int 2177pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2178 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af) 2179{ 2180 struct pfr_kentry *ke, *ke2; 2181 struct pf_addr *addr; 2182 union sockaddr_union mask; 2183 int idx = -1, use_counter = 0; 2184 2185 addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr : 2186 (struct pf_addr *)&pfr_sin6.sin6_addr; 2187 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2188 kt = kt->pfrkt_root; 2189 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2190 return (-1); 2191 2192 if (pidx != NULL) 2193 idx = *pidx; 2194 if (counter != NULL && idx >= 0) 2195 use_counter = 1; 2196 if (idx < 0) 2197 idx = 0; 2198 2199_next_block: 2200 ke = pfr_kentry_byidx(kt, idx, af); 2201 if (ke == NULL) 2202 return (1); 2203 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); 2204 *raddr = SUNION2PF(&ke->pfrke_sa, af); 2205 *rmask = SUNION2PF(&pfr_mask, af); 2206 2207 if (use_counter) { 2208 /* is supplied address within block? */ 2209 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) { 2210 /* no, go to next block in table */ 2211 idx++; 2212 use_counter = 0; 2213 goto _next_block; 2214 } 2215 PF_ACPY(addr, counter, af); 2216 } else { 2217 /* use first address of block */ 2218 PF_ACPY(addr, *raddr, af); 2219 } 2220 2221 if (!KENTRY_NETWORK(ke)) { 2222 /* this is a single IP address - no possible nested block */ 2223 PF_ACPY(counter, addr, af); 2224 *pidx = idx; 2225 return (0); 2226 } 2227 for (;;) { 2228 /* we don't want to use a nested block */
|
2286 ke2 = (struct pfr_kentry *)(af == AF_INET ?
| 2229 ke2 = (struct pfr_kentry *)(af == AF_INET ?
|
2287 rn_match(&pfr_sin, kt->pfrkt_ip4) : 2288 rn_match(&pfr_sin6, kt->pfrkt_ip6)); 2289 /* no need to check KENTRY_RNF_ROOT() here */ 2290 if (ke2 == ke) { 2291 /* lookup return the same block - perfect */ 2292 PF_ACPY(counter, addr, af); 2293 *pidx = idx; 2294 return (0); 2295 } 2296 2297 /* we need to increase the counter past the nested block */ 2298 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net); 2299 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af); 2300 PF_AINC(addr, af); 2301 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) { 2302 /* ok, we reached the end of our main block */ 2303 /* go to next block in table */ 2304 idx++; 2305 use_counter = 0; 2306 goto _next_block; 2307 } 2308 } 2309} 2310 2311struct pfr_kentry * 2312pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2313{ 2314 struct pfr_walktree w; 2315
| 2230 rn_match(&pfr_sin, kt->pfrkt_ip4) : 2231 rn_match(&pfr_sin6, kt->pfrkt_ip6)); 2232 /* no need to check KENTRY_RNF_ROOT() here */ 2233 if (ke2 == ke) { 2234 /* lookup return the same block - perfect */ 2235 PF_ACPY(counter, addr, af); 2236 *pidx = idx; 2237 return (0); 2238 } 2239 2240 /* we need to increase the counter past the nested block */ 2241 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net); 2242 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af); 2243 PF_AINC(addr, af); 2244 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) { 2245 /* ok, we reached the end of our main block */ 2246 /* go to next block in table */ 2247 idx++; 2248 use_counter = 0; 2249 goto _next_block; 2250 } 2251 } 2252} 2253 2254struct pfr_kentry * 2255pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2256{ 2257 struct pfr_walktree w; 2258
|
2316 bzero(&w, sizeof(w)); 2317 w.pfrw_op = PFRW_POOL_GET; 2318 w.pfrw_cnt = idx;
| 2259 bzero(&w, sizeof(w)); 2260 w.pfrw_op = PFRW_POOL_GET; 2261 w.pfrw_cnt = idx;
|
2319
| 2262
|
2320 switch(af) {
| 2263 switch (af) {
|
2321 case AF_INET: 2322#ifdef __FreeBSD__ 2323 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2324#else 2325 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2326#endif
| 2264 case AF_INET: 2265#ifdef __FreeBSD__ 2266 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2267#else 2268 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2269#endif
|
2327 return w.pfrw_kentry;
| 2270 return (w.pfrw_kentry);
|
2328 case AF_INET6: 2329#ifdef __FreeBSD__ 2330 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2331#else 2332 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2333#endif
| 2271 case AF_INET6: 2272#ifdef __FreeBSD__ 2273 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2274#else 2275 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2276#endif
|
2334 return w.pfrw_kentry;
| 2277 return (w.pfrw_kentry);
|
2335 default:
| 2278 default:
|
2336 return NULL;
| 2279 return (NULL);
|
2337 } 2338}
| 2280 } 2281}
|
| 2282 2283void 2284pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2285{ 2286 struct pfr_walktree w; 2287 int s; 2288 2289 bzero(&w, sizeof(w)); 2290 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2291 w.pfrw_dyn = dyn; 2292 2293 s = splsoftnet(); 2294 dyn->pfid_acnt4 = 0; 2295 dyn->pfid_acnt6 = 0; 2296 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2297#ifdef __FreeBSD__ 2298 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2299#else 2300 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2301#endif 2302 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2303#ifdef __FreeBSD__ 2304 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2305#else 2306 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2307#endif 2308 splx(s); 2309}
|
| |