pf_ioctl.c revision 135196
1/* $FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 135196 2004-09-14 03:12:01Z mlaier $ */ 2/* $OpenBSD: pf_ioctl.c,v 1.112.2.2 2004/07/24 18:28:12 brad Exp $ */ 3 4/* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39#ifdef __FreeBSD__ 40#include "opt_inet.h" 41#include "opt_inet6.h" 42#endif 43 44#ifdef __FreeBSD__ 45#include "opt_bpf.h" 46#include "opt_pf.h" 47#define NBPFILTER DEV_BPF 48#define NPFLOG DEV_PFLOG 49#define NPFSYNC DEV_PFSYNC 50#else 51#include "bpfilter.h" 52#include "pflog.h" 53#include "pfsync.h" 54#endif 55 56#include <sys/param.h> 57#include <sys/systm.h> 58#include <sys/mbuf.h> 59#include <sys/filio.h> 60#include <sys/fcntl.h> 61#include <sys/socket.h> 62#include <sys/socketvar.h> 63#include <sys/kernel.h> 64#include <sys/time.h> 65#include <sys/malloc.h> 66#ifdef __FreeBSD__ 67#include <sys/module.h> 68#include <sys/conf.h> 69#include <sys/proc.h> 70#else 71#include <sys/timeout.h> 72#include <sys/pool.h> 73#endif 74 75#include <net/if.h> 76#include <net/if_types.h> 77#include <net/route.h> 78 79#include <netinet/in.h> 80#include <netinet/in_var.h> 81#include <netinet/in_systm.h> 82#include <netinet/ip.h> 83#include <netinet/ip_var.h> 84#include <netinet/ip_icmp.h> 85 86#ifndef __FreeBSD__ 87#include <dev/rndvar.h> 88#endif 89#include <net/pfvar.h> 90 91#if NPFSYNC > 0 92#include <net/if_pfsync.h> 93#endif /* NPFSYNC > 0 */ 94 95#ifdef INET6 96#include <netinet/ip6.h> 97#include <netinet/in_pcb.h> 98#endif /* INET6 */ 99 100#ifdef ALTQ 101#include <altq/altq.h> 102#endif 103 104#ifdef __FreeBSD__ 105#include <sys/limits.h> 106#include <sys/lock.h> 107#include <sys/mutex.h> 108#include <net/pfil.h> 109#endif /* __FreeBSD__ */ 110 111#ifdef __FreeBSD__ 112void init_zone_var(void); 113void cleanup_pf_zone(void); 114int pfattach(void); 115#else 116void pfattach(int); 117int pfopen(struct cdev *, int, int, struct proc *); 118int pfclose(struct cdev *, int, int, struct proc *); 119#endif 120struct pf_pool *pf_get_pool(char *, char *, u_int32_t, 121 u_int8_t, u_int32_t, u_int8_t, u_int8_t, u_int8_t); 122int pf_get_ruleset_number(u_int8_t); 123void pf_init_ruleset(struct pf_ruleset *); 124void pf_mv_pool(struct pf_palist *, struct pf_palist *); 125void pf_empty_pool(struct pf_palist *); 126#ifdef __FreeBSD__ 127int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 128#else 129int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *); 130#endif 131#ifdef ALTQ 132int pf_begin_altq(u_int32_t *); 133int pf_rollback_altq(u_int32_t); 134int pf_commit_altq(u_int32_t); 135#endif /* ALTQ */ 136int pf_begin_rules(u_int32_t *, int, char *, char *); 137int pf_rollback_rules(u_int32_t, int, char *, char *); 138int pf_commit_rules(u_int32_t, int, char *, char *); 139 140#ifdef __FreeBSD__ 141extern struct callout pf_expire_to; 142#else 143extern struct timeout pf_expire_to; 144#endif 145 146struct pf_rule pf_default_rule; 147 148#define TAGID_MAX 50000 149TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 150 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 151 152#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 153#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 154#endif 155static u_int16_t tagname2tag(struct pf_tags *, char *); 156static void tag2tagname(struct pf_tags *, u_int16_t, char *); 157static void tag_unref(struct pf_tags *, u_int16_t); 158 159#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 160 161 162#ifdef __FreeBSD__ 163static struct cdev *pf_dev; 164 165/* 166 * XXX - These are new and need to be checked when moveing to a new version 167 */ 168static void pf_clear_states(void); 169static int pf_clear_tables(void); 170static void pf_clear_srcnodes(void); 171/* 172 * XXX - These are new and need to be checked when moveing to a new version 173 */ 174 175/* 176 * Wrapper functions for pfil(9) hooks 177 */ 178static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 179 int dir); 180static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 181 int dir); 182#ifdef INET6 183static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 184 int dir); 185static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 186 int dir); 187#endif 188 189static int hook_pf(void); 190static int dehook_pf(void); 191static int shutdown_pf(void); 192static int pf_load(void); 193static int pf_unload(void); 194 195static struct cdevsw pf_cdevsw = { 196 .d_ioctl = pfioctl, 197 .d_name = PF_NAME, 198 .d_version = D_VERSION, 199}; 200 201static volatile int pf_pfil_hooked = 0; 202struct mtx pf_task_mtx; 203 204void 205init_pf_mutex(void) 206{ 207 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF); 208} 209 210void 211destroy_pf_mutex(void) 212{ 213 mtx_destroy(&pf_task_mtx); 214} 215 216void 217init_zone_var(void) 218{ 219 pf_src_tree_pl = pf_rule_pl = NULL; 220 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 221 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 222 pf_state_scrub_pl = NULL; 223 pfr_ktable_pl = pfr_kentry_pl = NULL; 224} 225 226void 227cleanup_pf_zone(void) 228{ 229 UMA_DESTROY(pf_src_tree_pl); 230 UMA_DESTROY(pf_rule_pl); 231 UMA_DESTROY(pf_state_pl); 232 UMA_DESTROY(pf_altq_pl); 233 UMA_DESTROY(pf_pooladdr_pl); 234 UMA_DESTROY(pf_frent_pl); 235 UMA_DESTROY(pf_frag_pl); 236 UMA_DESTROY(pf_cache_pl); 237 UMA_DESTROY(pf_cent_pl); 238 UMA_DESTROY(pfr_ktable_pl); 239 UMA_DESTROY(pfr_kentry_pl); 240 UMA_DESTROY(pf_state_scrub_pl); 241 UMA_DESTROY(pfi_addr_pl); 242} 243 244int 245pfattach(void) 246{ 247 u_int32_t *my_timeout = pf_default_rule.timeout; 248 int error = 1; 249 250 do { 251 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 252 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 253 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 254 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 255 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl"); 256 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 257 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 258 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 259 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 260 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 261 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 262 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 263 "pfstatescrub"); 264 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 265 error = 0; 266 } while(0); 267 if (error) { 268 cleanup_pf_zone(); 269 return (error); 270 } 271 pfr_initialize(); 272 pfi_initialize(); 273 if ( (error = pf_osfp_initialize()) ) { 274 cleanup_pf_zone(); 275 pf_osfp_cleanup(); 276 return (error); 277 } 278 279 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 280 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 281 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 282 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 283 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 284 pf_pool_limits[PF_LIMIT_STATES].limit); 285 286 RB_INIT(&tree_src_tracking); 287 TAILQ_INIT(&pf_anchors); 288 pf_init_ruleset(&pf_main_ruleset); 289 TAILQ_INIT(&pf_altqs[0]); 290 TAILQ_INIT(&pf_altqs[1]); 291 TAILQ_INIT(&pf_pabuf); 292 pf_altqs_active = &pf_altqs[0]; 293 pf_altqs_inactive = &pf_altqs[1]; 294 TAILQ_INIT(&state_updates); 295 296 /* default rule should never be garbage collected */ 297 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 298 pf_default_rule.action = PF_PASS; 299 pf_default_rule.nr = -1; 300 301 /* initialize default timeouts */ 302 my_timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */ 303 my_timeout[PFTM_TCP_OPENING] = 30; /* No response yet */ 304 my_timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */ 305 my_timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */ 306 my_timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */ 307 my_timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */ 308 my_timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */ 309 my_timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */ 310 my_timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */ 311 my_timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */ 312 my_timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */ 313 my_timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */ 314 my_timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */ 315 my_timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */ 316 my_timeout[PFTM_FRAG] = 30; /* Fragment expire */ 317 my_timeout[PFTM_INTERVAL] = 10; /* Expire interval */ 318 319 /* 320 * XXX 321 * The 2nd arg. 0 to callout_init(9) shoule be set to CALLOUT_MPSAFE 322 * if Gaint lock is removed from the network stack. 323 */ 324 callout_init(&pf_expire_to, 0); 325 callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz, 326 pf_purge_timeout, &pf_expire_to); 327 328 pf_normalize_init(); 329 pf_status.debug = PF_DEBUG_URGENT; 330 pf_pfil_hooked = 0; 331 332 /* XXX do our best to avoid a conflict */ 333 pf_status.hostid = arc4random(); 334 335 return (error); 336} 337#else /* !__FreeBSD__ */ 338void 339pfattach(int num) 340{ 341 u_int32_t *timeout = pf_default_rule.timeout; 342 343 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 344 &pool_allocator_nointr); 345 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 346 "pfsrctrpl", NULL); 347 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 348 NULL); 349 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 350 NULL); 351 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 352 "pfpooladdrpl", NULL); 353 pfr_initialize(); 354 pfi_initialize(); 355 pf_osfp_initialize(); 356 357 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 358 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 359 360 RB_INIT(&tree_src_tracking); 361 TAILQ_INIT(&pf_anchors); 362 pf_init_ruleset(&pf_main_ruleset); 363 TAILQ_INIT(&pf_altqs[0]); 364 TAILQ_INIT(&pf_altqs[1]); 365 TAILQ_INIT(&pf_pabuf); 366 pf_altqs_active = &pf_altqs[0]; 367 pf_altqs_inactive = &pf_altqs[1]; 368 TAILQ_INIT(&state_updates); 369 370 /* default rule should never be garbage collected */ 371 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 372 pf_default_rule.action = PF_PASS; 373 pf_default_rule.nr = -1; 374 375 /* initialize default timeouts */ 376 timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */ 377 timeout[PFTM_TCP_OPENING] = 30; /* No response yet */ 378 timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */ 379 timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */ 380 timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */ 381 timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */ 382 timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */ 383 timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */ 384 timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */ 385 timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */ 386 timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */ 387 timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */ 388 timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */ 389 timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */ 390 timeout[PFTM_FRAG] = 30; /* Fragment expire */ 391 timeout[PFTM_INTERVAL] = 10; /* Expire interval */ 392 timeout[PFTM_SRC_NODE] = 0; /* Source tracking */ 393 394 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to); 395 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz); 396 397 pf_normalize_init(); 398 bzero(&pf_status, sizeof(pf_status)); 399 pf_status.debug = PF_DEBUG_URGENT; 400 401 /* XXX do our best to avoid a conflict */ 402 pf_status.hostid = arc4random(); 403} 404 405int 406pfopen(struct cdev *dev, int flags, int fmt, struct proc *p) 407{ 408 if (minor(dev) >= 1) 409 return (ENXIO); 410 return (0); 411} 412 413int 414pfclose(struct cdev *dev, int flags, int fmt, struct proc *p) 415{ 416 if (minor(dev) >= 1) 417 return (ENXIO); 418 return (0); 419} 420#endif /* __FreeBSD__ */ 421 422struct pf_pool * 423pf_get_pool(char *anchorname, char *rulesetname, u_int32_t ticket, 424 u_int8_t rule_action, u_int32_t rule_number, u_int8_t r_last, 425 u_int8_t active, u_int8_t check_ticket) 426{ 427 struct pf_ruleset *ruleset; 428 struct pf_rule *rule; 429 int rs_num; 430 431 ruleset = pf_find_ruleset(anchorname, rulesetname); 432 if (ruleset == NULL) 433 return (NULL); 434 rs_num = pf_get_ruleset_number(rule_action); 435 if (rs_num >= PF_RULESET_MAX) 436 return (NULL); 437 if (active) { 438 if (check_ticket && ticket != 439 ruleset->rules[rs_num].active.ticket) 440 return (NULL); 441 if (r_last) 442 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 443 pf_rulequeue); 444 else 445 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 446 } else { 447 if (check_ticket && ticket != 448 ruleset->rules[rs_num].inactive.ticket) 449 return (NULL); 450 if (r_last) 451 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 452 pf_rulequeue); 453 else 454 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 455 } 456 if (!r_last) { 457 while ((rule != NULL) && (rule->nr != rule_number)) 458 rule = TAILQ_NEXT(rule, entries); 459 } 460 if (rule == NULL) 461 return (NULL); 462 463 return (&rule->rpool); 464} 465 466int 467pf_get_ruleset_number(u_int8_t action) 468{ 469 switch (action) { 470 case PF_SCRUB: 471 return (PF_RULESET_SCRUB); 472 break; 473 case PF_PASS: 474 case PF_DROP: 475 return (PF_RULESET_FILTER); 476 break; 477 case PF_NAT: 478 case PF_NONAT: 479 return (PF_RULESET_NAT); 480 break; 481 case PF_BINAT: 482 case PF_NOBINAT: 483 return (PF_RULESET_BINAT); 484 break; 485 case PF_RDR: 486 case PF_NORDR: 487 return (PF_RULESET_RDR); 488 break; 489 default: 490 return (PF_RULESET_MAX); 491 break; 492 } 493} 494 495void 496pf_init_ruleset(struct pf_ruleset *ruleset) 497{ 498 int i; 499 500 memset(ruleset, 0, sizeof(struct pf_ruleset)); 501 for (i = 0; i < PF_RULESET_MAX; i++) { 502 TAILQ_INIT(&ruleset->rules[i].queues[0]); 503 TAILQ_INIT(&ruleset->rules[i].queues[1]); 504 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0]; 505 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1]; 506 } 507} 508 509struct pf_anchor * 510pf_find_anchor(const char *anchorname) 511{ 512 struct pf_anchor *anchor; 513 int n = -1; 514 515 anchor = TAILQ_FIRST(&pf_anchors); 516 while (anchor != NULL && (n = strcmp(anchor->name, anchorname)) < 0) 517 anchor = TAILQ_NEXT(anchor, entries); 518 if (n == 0) 519 return (anchor); 520 else 521 return (NULL); 522} 523 524struct pf_ruleset * 525pf_find_ruleset(char *anchorname, char *rulesetname) 526{ 527 struct pf_anchor *anchor; 528 struct pf_ruleset *ruleset; 529 530 if (!anchorname[0] && !rulesetname[0]) 531 return (&pf_main_ruleset); 532 if (!anchorname[0] || !rulesetname[0]) 533 return (NULL); 534 anchorname[PF_ANCHOR_NAME_SIZE-1] = 0; 535 rulesetname[PF_RULESET_NAME_SIZE-1] = 0; 536 anchor = pf_find_anchor(anchorname); 537 if (anchor == NULL) 538 return (NULL); 539 ruleset = TAILQ_FIRST(&anchor->rulesets); 540 while (ruleset != NULL && strcmp(ruleset->name, rulesetname) < 0) 541 ruleset = TAILQ_NEXT(ruleset, entries); 542 if (ruleset != NULL && !strcmp(ruleset->name, rulesetname)) 543 return (ruleset); 544 else 545 return (NULL); 546} 547 548struct pf_ruleset * 549pf_find_or_create_ruleset(char anchorname[PF_ANCHOR_NAME_SIZE], 550 char rulesetname[PF_RULESET_NAME_SIZE]) 551{ 552 struct pf_anchor *anchor, *a; 553 struct pf_ruleset *ruleset, *r; 554 555 if (!anchorname[0] && !rulesetname[0]) 556 return (&pf_main_ruleset); 557 if (!anchorname[0] || !rulesetname[0]) 558 return (NULL); 559 anchorname[PF_ANCHOR_NAME_SIZE-1] = 0; 560 rulesetname[PF_RULESET_NAME_SIZE-1] = 0; 561 a = TAILQ_FIRST(&pf_anchors); 562 while (a != NULL && strcmp(a->name, anchorname) < 0) 563 a = TAILQ_NEXT(a, entries); 564 if (a != NULL && !strcmp(a->name, anchorname)) 565 anchor = a; 566 else { 567 anchor = (struct pf_anchor *)malloc(sizeof(struct pf_anchor), 568 M_TEMP, M_NOWAIT); 569 if (anchor == NULL) 570 return (NULL); 571 memset(anchor, 0, sizeof(struct pf_anchor)); 572 bcopy(anchorname, anchor->name, sizeof(anchor->name)); 573 TAILQ_INIT(&anchor->rulesets); 574 if (a != NULL) 575 TAILQ_INSERT_BEFORE(a, anchor, entries); 576 else 577 TAILQ_INSERT_TAIL(&pf_anchors, anchor, entries); 578 } 579 r = TAILQ_FIRST(&anchor->rulesets); 580 while (r != NULL && strcmp(r->name, rulesetname) < 0) 581 r = TAILQ_NEXT(r, entries); 582 if (r != NULL && !strcmp(r->name, rulesetname)) 583 return (r); 584 ruleset = (struct pf_ruleset *)malloc(sizeof(struct pf_ruleset), 585 M_TEMP, M_NOWAIT); 586 if (ruleset != NULL) { 587 pf_init_ruleset(ruleset); 588 bcopy(rulesetname, ruleset->name, sizeof(ruleset->name)); 589 ruleset->anchor = anchor; 590 if (r != NULL) 591 TAILQ_INSERT_BEFORE(r, ruleset, entries); 592 else 593 TAILQ_INSERT_TAIL(&anchor->rulesets, ruleset, entries); 594 } 595 return (ruleset); 596} 597 598void 599pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset) 600{ 601 struct pf_anchor *anchor; 602 int i; 603 604 if (ruleset == NULL || ruleset->anchor == NULL || ruleset->tables > 0 || 605 ruleset->topen) 606 return; 607 for (i = 0; i < PF_RULESET_MAX; ++i) 608 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) || 609 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) || 610 ruleset->rules[i].inactive.open) 611 return; 612 613 anchor = ruleset->anchor; 614 TAILQ_REMOVE(&anchor->rulesets, ruleset, entries); 615 free(ruleset, M_TEMP); 616 617 if (TAILQ_EMPTY(&anchor->rulesets)) { 618 TAILQ_REMOVE(&pf_anchors, anchor, entries); 619 free(anchor, M_TEMP); 620 pf_update_anchor_rules(); 621 } 622} 623 624void 625pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 626{ 627 struct pf_pooladdr *mv_pool_pa; 628 629 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 630 TAILQ_REMOVE(poola, mv_pool_pa, entries); 631 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 632 } 633} 634 635void 636pf_empty_pool(struct pf_palist *poola) 637{ 638 struct pf_pooladdr *empty_pool_pa; 639 640 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 641 pfi_dynaddr_remove(&empty_pool_pa->addr); 642 pf_tbladdr_remove(&empty_pool_pa->addr); 643 pfi_detach_rule(empty_pool_pa->kif); 644 TAILQ_REMOVE(poola, empty_pool_pa, entries); 645 pool_put(&pf_pooladdr_pl, empty_pool_pa); 646 } 647} 648 649void 650pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 651{ 652 if (rulequeue != NULL) { 653 if (rule->states <= 0) { 654 /* 655 * XXX - we need to remove the table *before* detaching 656 * the rule to make sure the table code does not delete 657 * the anchor under our feet. 658 */ 659 pf_tbladdr_remove(&rule->src.addr); 660 pf_tbladdr_remove(&rule->dst.addr); 661 } 662 TAILQ_REMOVE(rulequeue, rule, entries); 663 rule->entries.tqe_prev = NULL; 664 rule->nr = -1; 665 } 666 667 if (rule->states > 0 || rule->src_nodes > 0 || 668 rule->entries.tqe_prev != NULL) 669 return; 670 pf_tag_unref(rule->tag); 671 pf_tag_unref(rule->match_tag); 672#ifdef ALTQ 673 if (rule->pqid != rule->qid) 674 pf_qid_unref(rule->pqid); 675 pf_qid_unref(rule->qid); 676#endif 677 pfi_dynaddr_remove(&rule->src.addr); 678 pfi_dynaddr_remove(&rule->dst.addr); 679 if (rulequeue == NULL) { 680 pf_tbladdr_remove(&rule->src.addr); 681 pf_tbladdr_remove(&rule->dst.addr); 682 } 683 pfi_detach_rule(rule->kif); 684 pf_empty_pool(&rule->rpool.list); 685 pool_put(&pf_rule_pl, rule); 686} 687 688static u_int16_t 689tagname2tag(struct pf_tags *head, char *tagname) 690{ 691 struct pf_tagname *tag, *p = NULL; 692 u_int16_t new_tagid = 1; 693 694 TAILQ_FOREACH(tag, head, entries) 695 if (strcmp(tagname, tag->name) == 0) { 696 tag->ref++; 697 return (tag->tag); 698 } 699 700 /* 701 * to avoid fragmentation, we do a linear search from the beginning 702 * and take the first free slot we find. if there is none or the list 703 * is empty, append a new entry at the end. 704 */ 705 706 /* new entry */ 707 if (!TAILQ_EMPTY(head)) 708 for (p = TAILQ_FIRST(head); p != NULL && 709 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 710 new_tagid = p->tag + 1; 711 712 if (new_tagid > TAGID_MAX) 713 return (0); 714 715 /* allocate and fill new struct pf_tagname */ 716 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 717 M_TEMP, M_NOWAIT); 718 if (tag == NULL) 719 return (0); 720 bzero(tag, sizeof(struct pf_tagname)); 721 strlcpy(tag->name, tagname, sizeof(tag->name)); 722 tag->tag = new_tagid; 723 tag->ref++; 724 725 if (p != NULL) /* insert new entry before p */ 726 TAILQ_INSERT_BEFORE(p, tag, entries); 727 else /* either list empty or no free slot in between */ 728 TAILQ_INSERT_TAIL(head, tag, entries); 729 730 return (tag->tag); 731} 732 733static void 734tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 735{ 736 struct pf_tagname *tag; 737 738 TAILQ_FOREACH(tag, head, entries) 739 if (tag->tag == tagid) { 740 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 741 return; 742 } 743} 744 745static void 746tag_unref(struct pf_tags *head, u_int16_t tag) 747{ 748 struct pf_tagname *p, *next; 749 750 if (tag == 0) 751 return; 752 753 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 754 next = TAILQ_NEXT(p, entries); 755 if (tag == p->tag) { 756 if (--p->ref == 0) { 757 TAILQ_REMOVE(head, p, entries); 758 free(p, M_TEMP); 759 } 760 break; 761 } 762 } 763} 764 765u_int16_t 766pf_tagname2tag(char *tagname) 767{ 768 return (tagname2tag(&pf_tags, tagname)); 769} 770 771void 772pf_tag2tagname(u_int16_t tagid, char *p) 773{ 774 return (tag2tagname(&pf_tags, tagid, p)); 775} 776 777void 778pf_tag_unref(u_int16_t tag) 779{ 780 return (tag_unref(&pf_tags, tag)); 781} 782 783#ifdef ALTQ 784u_int32_t 785pf_qname2qid(char *qname) 786{ 787 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 788} 789 790void 791pf_qid2qname(u_int32_t qid, char *p) 792{ 793 return (tag2tagname(&pf_qids, (u_int16_t)qid, p)); 794} 795 796void 797pf_qid_unref(u_int32_t qid) 798{ 799 return (tag_unref(&pf_qids, (u_int16_t)qid)); 800} 801 802int 803pf_begin_altq(u_int32_t *ticket) 804{ 805 struct pf_altq *altq; 806 int error = 0; 807 808 /* Purge the old altq list */ 809 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 810 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 811 if (altq->qname[0] == 0) { 812 /* detach and destroy the discipline */ 813 error = altq_remove(altq); 814 } else 815 pf_qid_unref(altq->qid); 816 pool_put(&pf_altq_pl, altq); 817 } 818 if (error) 819 return (error); 820 *ticket = ++ticket_altqs_inactive; 821 altqs_inactive_open = 1; 822 return (0); 823} 824 825int 826pf_rollback_altq(u_int32_t ticket) 827{ 828 struct pf_altq *altq; 829 int error = 0; 830 831 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 832 return (0); 833 /* Purge the old altq list */ 834 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 835 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 836 if (altq->qname[0] == 0) { 837 /* detach and destroy the discipline */ 838 error = altq_remove(altq); 839 } else 840 pf_qid_unref(altq->qid); 841 pool_put(&pf_altq_pl, altq); 842 } 843 altqs_inactive_open = 0; 844 return (error); 845} 846 847int 848pf_commit_altq(u_int32_t ticket) 849{ 850 struct pf_altqqueue *old_altqs; 851 struct pf_altq *altq; 852 int s, err, error = 0; 853 854 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 855 return (EBUSY); 856 857 /* swap altqs, keep the old. */ 858 s = splsoftnet(); 859 old_altqs = pf_altqs_active; 860 pf_altqs_active = pf_altqs_inactive; 861 pf_altqs_inactive = old_altqs; 862 ticket_altqs_active = ticket_altqs_inactive; 863 864 /* Attach new disciplines */ 865 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 866 if (altq->qname[0] == 0) { 867 /* attach the discipline */ 868 error = altq_pfattach(altq); 869 if (error) { 870 splx(s); 871 return (error); 872 } 873 } 874 } 875 876 /* Purge the old altq list */ 877 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 878 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 879 if (altq->qname[0] == 0) { 880 /* detach and destroy the discipline */ 881 err = altq_pfdetach(altq); 882 if (err != 0 && error == 0) 883 error = err; 884 err = altq_remove(altq); 885 if (err != 0 && error == 0) 886 error = err; 887 } else 888 pf_qid_unref(altq->qid); 889 pool_put(&pf_altq_pl, altq); 890 } 891 splx(s); 892 893 altqs_inactive_open = 0; 894 return (error); 895} 896#endif /* ALTQ */ 897 898int 899pf_begin_rules(u_int32_t *ticket, int rs_num, char *anchor, char *ruleset) 900{ 901 struct pf_ruleset *rs; 902 struct pf_rule *rule; 903 904 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 905 return (EINVAL); 906 rs = pf_find_or_create_ruleset(anchor, ruleset); 907 if (rs == NULL) 908 return (EINVAL); 909 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 910 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 911 *ticket = ++rs->rules[rs_num].inactive.ticket; 912 rs->rules[rs_num].inactive.open = 1; 913 return (0); 914} 915 916int 917pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset) 918{ 919 struct pf_ruleset *rs; 920 struct pf_rule *rule; 921 922 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 923 return (EINVAL); 924 rs = pf_find_ruleset(anchor, ruleset); 925 if (rs == NULL || !rs->rules[rs_num].inactive.open || 926 rs->rules[rs_num].inactive.ticket != ticket) 927 return (0); 928 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 929 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 930 rs->rules[rs_num].inactive.open = 0; 931 return (0); 932} 933 934int 935pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset) 936{ 937 struct pf_ruleset *rs; 938 struct pf_rule *rule; 939 struct pf_rulequeue *old_rules; 940 int s; 941 942 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 943 return (EINVAL); 944 rs = pf_find_ruleset(anchor, ruleset); 945 if (rs == NULL || !rs->rules[rs_num].inactive.open || 946 ticket != rs->rules[rs_num].inactive.ticket) 947 return (EBUSY); 948 949 /* Swap rules, keep the old. */ 950 s = splsoftnet(); 951 old_rules = rs->rules[rs_num].active.ptr; 952 rs->rules[rs_num].active.ptr = 953 rs->rules[rs_num].inactive.ptr; 954 rs->rules[rs_num].inactive.ptr = old_rules; 955 rs->rules[rs_num].active.ticket = 956 rs->rules[rs_num].inactive.ticket; 957 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 958 959 /* Purge the old rule list. */ 960 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 961 pf_rm_rule(old_rules, rule); 962 rs->rules[rs_num].inactive.open = 0; 963 pf_remove_if_empty_ruleset(rs); 964 pf_update_anchor_rules(); 965 splx(s); 966 return (0); 967} 968 969#ifdef __FreeBSD__ 970int 971pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 972#else 973int 974pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 975#endif 976{ 977 struct pf_pooladdr *pa = NULL; 978 struct pf_pool *pool = NULL; 979 int s; 980 int error = 0; 981 982 /* XXX keep in sync with switch() below */ 983#ifdef __FreeBSD__ 984 if (securelevel_gt(td->td_ucred, 1)) 985#else 986 if (securelevel > 1) 987#endif 988 switch (cmd) { 989 case DIOCGETRULES: 990 case DIOCGETRULE: 991 case DIOCGETADDRS: 992 case DIOCGETADDR: 993 case DIOCGETSTATE: 994 case DIOCSETSTATUSIF: 995 case DIOCGETSTATUS: 996 case DIOCCLRSTATUS: 997 case DIOCNATLOOK: 998 case DIOCSETDEBUG: 999 case DIOCGETSTATES: 1000 case DIOCGETTIMEOUT: 1001 case DIOCCLRRULECTRS: 1002 case DIOCGETLIMIT: 1003 case DIOCGETALTQS: 1004 case DIOCGETALTQ: 1005 case DIOCGETQSTATS: 1006 case DIOCGETANCHORS: 1007 case DIOCGETANCHOR: 1008 case DIOCGETRULESETS: 1009 case DIOCGETRULESET: 1010 case DIOCRGETTABLES: 1011 case DIOCRGETTSTATS: 1012 case DIOCRCLRTSTATS: 1013 case DIOCRCLRADDRS: 1014 case DIOCRADDADDRS: 1015 case DIOCRDELADDRS: 1016 case DIOCRSETADDRS: 1017 case DIOCRGETADDRS: 1018 case DIOCRGETASTATS: 1019 case DIOCRCLRASTATS: 1020 case DIOCRTSTADDRS: 1021 case DIOCOSFPGET: 1022 case DIOCGETSRCNODES: 1023 case DIOCCLRSRCNODES: 1024 case DIOCIGETIFACES: 1025 case DIOCICLRISTATS: 1026#ifdef __FreeBSD__ 1027 case DIOCGIFSPEED: 1028#endif 1029 break; 1030 case DIOCRCLRTABLES: 1031 case DIOCRADDTABLES: 1032 case DIOCRDELTABLES: 1033 case DIOCRSETTFLAGS: 1034 if (((struct pfioc_table *)addr)->pfrio_flags & 1035 PFR_FLAG_DUMMY) 1036 break; /* dummy operation ok */ 1037 return (EPERM); 1038 default: 1039 return (EPERM); 1040 } 1041 1042 if (!(flags & FWRITE)) 1043 switch (cmd) { 1044 case DIOCGETRULES: 1045 case DIOCGETRULE: 1046 case DIOCGETADDRS: 1047 case DIOCGETADDR: 1048 case DIOCGETSTATE: 1049 case DIOCGETSTATUS: 1050 case DIOCGETSTATES: 1051 case DIOCGETTIMEOUT: 1052 case DIOCGETLIMIT: 1053 case DIOCGETALTQS: 1054 case DIOCGETALTQ: 1055 case DIOCGETQSTATS: 1056 case DIOCGETANCHORS: 1057 case DIOCGETANCHOR: 1058 case DIOCGETRULESETS: 1059 case DIOCGETRULESET: 1060 case DIOCRGETTABLES: 1061 case DIOCRGETTSTATS: 1062 case DIOCRGETADDRS: 1063 case DIOCRGETASTATS: 1064 case DIOCRTSTADDRS: 1065 case DIOCOSFPGET: 1066 case DIOCGETSRCNODES: 1067 case DIOCIGETIFACES: 1068#ifdef __FreeBSD__ 1069 case DIOCGIFSPEED: 1070#endif 1071 break; 1072 case DIOCRCLRTABLES: 1073 case DIOCRADDTABLES: 1074 case DIOCRDELTABLES: 1075 case DIOCRCLRTSTATS: 1076 case DIOCRCLRADDRS: 1077 case DIOCRADDADDRS: 1078 case DIOCRDELADDRS: 1079 case DIOCRSETADDRS: 1080 case DIOCRSETTFLAGS: 1081 if (((struct pfioc_table *)addr)->pfrio_flags & 1082 PFR_FLAG_DUMMY) 1083 break; /* dummy operation ok */ 1084 return (EACCES); 1085 default: 1086 return (EACCES); 1087 } 1088 1089#ifdef __FreeBSD__ 1090 PF_LOCK(); 1091#endif 1092 1093 switch (cmd) { 1094 1095 case DIOCSTART: 1096 if (pf_status.running) 1097 error = EEXIST; 1098 else { 1099#ifdef __FreeBSD__ 1100 PF_UNLOCK(); 1101 error = hook_pf(); 1102 PF_LOCK(); 1103 if (error) { 1104 DPFPRINTF(PF_DEBUG_MISC, 1105 ("pf: pfil registeration fail\n")); 1106 break; 1107 } 1108#endif 1109 pf_status.running = 1; 1110#ifdef __FreeBSD__ 1111 pf_status.since = time_second; 1112#else 1113 pf_status.since = time.tv_sec; 1114#endif 1115 if (pf_status.stateid == 0) { 1116#ifdef __FreeBSD__ 1117 pf_status.stateid = time_second; 1118#else 1119 pf_status.stateid = time.tv_sec; 1120#endif 1121 pf_status.stateid = pf_status.stateid << 32; 1122 } 1123 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1124 } 1125 break; 1126 1127 case DIOCSTOP: 1128 if (!pf_status.running) 1129 error = ENOENT; 1130 else { 1131 pf_status.running = 0; 1132#ifdef __FreeBSD__ 1133 PF_UNLOCK(); 1134 error = dehook_pf(); 1135 PF_LOCK(); 1136 if (error) { 1137 pf_status.running = 1; 1138 DPFPRINTF(PF_DEBUG_MISC, 1139 ("pf: pfil unregisteration failed\n")); 1140 } 1141 pf_status.since = time_second; 1142#else 1143 pf_status.since = time.tv_sec; 1144#endif 1145 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1146 } 1147 break; 1148 1149 case DIOCBEGINRULES: { 1150 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1151 1152 error = pf_begin_rules(&pr->ticket, pf_get_ruleset_number( 1153 pr->rule.action), pr->anchor, pr->ruleset); 1154 break; 1155 } 1156 1157 case DIOCADDRULE: { 1158 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1159 struct pf_ruleset *ruleset; 1160 struct pf_rule *rule, *tail; 1161 struct pf_pooladdr *pa; 1162 int rs_num; 1163 1164 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset); 1165 if (ruleset == NULL) { 1166 error = EINVAL; 1167 break; 1168 } 1169 rs_num = pf_get_ruleset_number(pr->rule.action); 1170 if (rs_num >= PF_RULESET_MAX) { 1171 error = EINVAL; 1172 break; 1173 } 1174 if (pr->rule.anchorname[0] && ruleset != &pf_main_ruleset) { 1175 error = EINVAL; 1176 break; 1177 } 1178 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1179 error = EINVAL; 1180 break; 1181 } 1182 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1183 error = EBUSY; 1184 break; 1185 } 1186 if (pr->pool_ticket != ticket_pabuf) { 1187 error = EBUSY; 1188 break; 1189 } 1190 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1191 if (rule == NULL) { 1192 error = ENOMEM; 1193 break; 1194 } 1195 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1196 rule->anchor = NULL; 1197 rule->kif = NULL; 1198 TAILQ_INIT(&rule->rpool.list); 1199 /* initialize refcounting */ 1200 rule->states = 0; 1201 rule->src_nodes = 0; 1202 rule->entries.tqe_prev = NULL; 1203#ifndef INET 1204 if (rule->af == AF_INET) { 1205 pool_put(&pf_rule_pl, rule); 1206 error = EAFNOSUPPORT; 1207 break; 1208 } 1209#endif /* INET */ 1210#ifndef INET6 1211 if (rule->af == AF_INET6) { 1212 pool_put(&pf_rule_pl, rule); 1213 error = EAFNOSUPPORT; 1214 break; 1215 } 1216#endif /* INET6 */ 1217 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1218 pf_rulequeue); 1219 if (tail) 1220 rule->nr = tail->nr + 1; 1221 else 1222 rule->nr = 0; 1223 if (rule->ifname[0]) { 1224 rule->kif = pfi_attach_rule(rule->ifname); 1225 if (rule->kif == NULL) { 1226 pool_put(&pf_rule_pl, rule); 1227 error = EINVAL; 1228 break; 1229 } 1230 } 1231 1232#ifdef ALTQ 1233 /* set queue IDs */ 1234 if (rule->qname[0] != 0) { 1235 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1236 error = EBUSY; 1237 else if (rule->pqname[0] != 0) { 1238 if ((rule->pqid = 1239 pf_qname2qid(rule->pqname)) == 0) 1240 error = EBUSY; 1241 } else 1242 rule->pqid = rule->qid; 1243 } 1244#endif 1245 if (rule->tagname[0]) 1246 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1247 error = EBUSY; 1248 if (rule->match_tagname[0]) 1249 if ((rule->match_tag = 1250 pf_tagname2tag(rule->match_tagname)) == 0) 1251 error = EBUSY; 1252 if (rule->rt && !rule->direction) 1253 error = EINVAL; 1254 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1255 error = EINVAL; 1256 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1257 error = EINVAL; 1258 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1259 error = EINVAL; 1260 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1261 error = EINVAL; 1262 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1263 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1264 error = EINVAL; 1265 1266 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1267 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1268 (rule->action == PF_BINAT)) && !rule->anchorname[0]) || 1269 (rule->rt > PF_FASTROUTE)) && 1270 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1271 error = EINVAL; 1272 1273 if (error) { 1274 pf_rm_rule(NULL, rule); 1275 break; 1276 } 1277 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1278 rule->evaluations = rule->packets = rule->bytes = 0; 1279 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1280 rule, entries); 1281 break; 1282 } 1283 1284 case DIOCCOMMITRULES: { 1285 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1286 1287 error = pf_commit_rules(pr->ticket, pf_get_ruleset_number( 1288 pr->rule.action), pr->anchor, pr->ruleset); 1289 break; 1290 } 1291 1292 case DIOCGETRULES: { 1293 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1294 struct pf_ruleset *ruleset; 1295 struct pf_rule *tail; 1296 int rs_num; 1297 1298 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset); 1299 if (ruleset == NULL) { 1300 error = EINVAL; 1301 break; 1302 } 1303 rs_num = pf_get_ruleset_number(pr->rule.action); 1304 if (rs_num >= PF_RULESET_MAX) { 1305 error = EINVAL; 1306 break; 1307 } 1308 s = splsoftnet(); 1309 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1310 pf_rulequeue); 1311 if (tail) 1312 pr->nr = tail->nr + 1; 1313 else 1314 pr->nr = 0; 1315 pr->ticket = ruleset->rules[rs_num].active.ticket; 1316 splx(s); 1317 break; 1318 } 1319 1320 case DIOCGETRULE: { 1321 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1322 struct pf_ruleset *ruleset; 1323 struct pf_rule *rule; 1324 int rs_num, i; 1325 1326 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset); 1327 if (ruleset == NULL) { 1328 error = EINVAL; 1329 break; 1330 } 1331 rs_num = pf_get_ruleset_number(pr->rule.action); 1332 if (rs_num >= PF_RULESET_MAX) { 1333 error = EINVAL; 1334 break; 1335 } 1336 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1337 error = EBUSY; 1338 break; 1339 } 1340 s = splsoftnet(); 1341 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1342 while ((rule != NULL) && (rule->nr != pr->nr)) 1343 rule = TAILQ_NEXT(rule, entries); 1344 if (rule == NULL) { 1345 error = EBUSY; 1346 splx(s); 1347 break; 1348 } 1349 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1350 pfi_dynaddr_copyout(&pr->rule.src.addr); 1351 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1352 pf_tbladdr_copyout(&pr->rule.src.addr); 1353 pf_tbladdr_copyout(&pr->rule.dst.addr); 1354 for (i = 0; i < PF_SKIP_COUNT; ++i) 1355 if (rule->skip[i].ptr == NULL) 1356 pr->rule.skip[i].nr = -1; 1357 else 1358 pr->rule.skip[i].nr = 1359 rule->skip[i].ptr->nr; 1360 splx(s); 1361 break; 1362 } 1363 1364 case DIOCCHANGERULE: { 1365 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1366 struct pf_ruleset *ruleset; 1367 struct pf_rule *oldrule = NULL, *newrule = NULL; 1368 u_int32_t nr = 0; 1369 int rs_num; 1370 1371 if (!(pcr->action == PF_CHANGE_REMOVE || 1372 pcr->action == PF_CHANGE_GET_TICKET) && 1373 pcr->pool_ticket != ticket_pabuf) { 1374 error = EBUSY; 1375 break; 1376 } 1377 1378 if (pcr->action < PF_CHANGE_ADD_HEAD || 1379 pcr->action > PF_CHANGE_GET_TICKET) { 1380 error = EINVAL; 1381 break; 1382 } 1383 ruleset = pf_find_ruleset(pcr->anchor, pcr->ruleset); 1384 if (ruleset == NULL) { 1385 error = EINVAL; 1386 break; 1387 } 1388 rs_num = pf_get_ruleset_number(pcr->rule.action); 1389 if (rs_num >= PF_RULESET_MAX) { 1390 error = EINVAL; 1391 break; 1392 } 1393 1394 if (pcr->action == PF_CHANGE_GET_TICKET) { 1395 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1396 break; 1397 } else { 1398 if (pcr->ticket != 1399 ruleset->rules[rs_num].active.ticket) { 1400 error = EINVAL; 1401 break; 1402 } 1403 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1404 error = EINVAL; 1405 break; 1406 } 1407 } 1408 1409 if (pcr->action != PF_CHANGE_REMOVE) { 1410 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1411 if (newrule == NULL) { 1412 error = ENOMEM; 1413 break; 1414 } 1415 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1416 TAILQ_INIT(&newrule->rpool.list); 1417 /* initialize refcounting */ 1418 newrule->states = 0; 1419 newrule->entries.tqe_prev = NULL; 1420#ifndef INET 1421 if (newrule->af == AF_INET) { 1422 pool_put(&pf_rule_pl, newrule); 1423 error = EAFNOSUPPORT; 1424 break; 1425 } 1426#endif /* INET */ 1427#ifndef INET6 1428 if (newrule->af == AF_INET6) { 1429 pool_put(&pf_rule_pl, newrule); 1430 error = EAFNOSUPPORT; 1431 break; 1432 } 1433#endif /* INET6 */ 1434 if (newrule->ifname[0]) { 1435 newrule->kif = pfi_attach_rule(newrule->ifname); 1436 if (newrule->kif == NULL) { 1437 pool_put(&pf_rule_pl, newrule); 1438 error = EINVAL; 1439 break; 1440 } 1441 } else 1442 newrule->kif = NULL; 1443 1444#ifdef ALTQ 1445 /* set queue IDs */ 1446 if (newrule->qname[0] != 0) { 1447 if ((newrule->qid = 1448 pf_qname2qid(newrule->qname)) == 0) 1449 error = EBUSY; 1450 else if (newrule->pqname[0] != 0) { 1451 if ((newrule->pqid = 1452 pf_qname2qid(newrule->pqname)) == 0) 1453 error = EBUSY; 1454 } else 1455 newrule->pqid = newrule->qid; 1456 } 1457#endif 1458 if (newrule->tagname[0]) 1459 if ((newrule->tag = 1460 pf_tagname2tag(newrule->tagname)) == 0) 1461 error = EBUSY; 1462 if (newrule->match_tagname[0]) 1463 if ((newrule->match_tag = pf_tagname2tag( 1464 newrule->match_tagname)) == 0) 1465 error = EBUSY; 1466 1467 if (newrule->rt && !newrule->direction) 1468 error = EINVAL; 1469 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1470 error = EINVAL; 1471 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1472 error = EINVAL; 1473 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1474 error = EINVAL; 1475 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1476 error = EINVAL; 1477 1478 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1479 if (((((newrule->action == PF_NAT) || 1480 (newrule->action == PF_RDR) || 1481 (newrule->action == PF_BINAT) || 1482 (newrule->rt > PF_FASTROUTE)) && 1483 !newrule->anchorname[0])) && 1484 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1485 error = EINVAL; 1486 1487 if (error) { 1488 pf_rm_rule(NULL, newrule); 1489 break; 1490 } 1491 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1492 newrule->evaluations = newrule->packets = 0; 1493 newrule->bytes = 0; 1494 } 1495 pf_empty_pool(&pf_pabuf); 1496 1497 s = splsoftnet(); 1498 1499 if (pcr->action == PF_CHANGE_ADD_HEAD) 1500 oldrule = TAILQ_FIRST( 1501 ruleset->rules[rs_num].active.ptr); 1502 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1503 oldrule = TAILQ_LAST( 1504 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1505 else { 1506 oldrule = TAILQ_FIRST( 1507 ruleset->rules[rs_num].active.ptr); 1508 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1509 oldrule = TAILQ_NEXT(oldrule, entries); 1510 if (oldrule == NULL) { 1511 if (newrule != NULL) 1512 pf_rm_rule(NULL, newrule); 1513 error = EINVAL; 1514 splx(s); 1515 break; 1516 } 1517 } 1518 1519 if (pcr->action == PF_CHANGE_REMOVE) 1520 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1521 else { 1522 if (oldrule == NULL) 1523 TAILQ_INSERT_TAIL( 1524 ruleset->rules[rs_num].active.ptr, 1525 newrule, entries); 1526 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1527 pcr->action == PF_CHANGE_ADD_BEFORE) 1528 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1529 else 1530 TAILQ_INSERT_AFTER( 1531 ruleset->rules[rs_num].active.ptr, 1532 oldrule, newrule, entries); 1533 } 1534 1535 nr = 0; 1536 TAILQ_FOREACH(oldrule, 1537 ruleset->rules[rs_num].active.ptr, entries) 1538 oldrule->nr = nr++; 1539 1540 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1541 pf_remove_if_empty_ruleset(ruleset); 1542 pf_update_anchor_rules(); 1543 1544 ruleset->rules[rs_num].active.ticket++; 1545 splx(s); 1546 break; 1547 } 1548 1549 case DIOCCLRSTATES: { 1550 struct pf_state *state; 1551 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1552 int killed = 0; 1553 1554 s = splsoftnet(); 1555 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1556 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1557 state->u.s.kif->pfik_name)) { 1558 state->timeout = PFTM_PURGE; 1559#if NPFSYNC 1560 /* don't send out individual delete messages */ 1561 state->sync_flags = PFSTATE_NOSYNC; 1562#endif 1563 killed++; 1564 } 1565 } 1566 pf_purge_expired_states(); 1567 pf_status.states = 0; 1568 psk->psk_af = killed; 1569#if NPFSYNC 1570 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1571#endif 1572 splx(s); 1573 break; 1574 } 1575 1576 case DIOCKILLSTATES: { 1577 struct pf_state *state; 1578 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1579 int killed = 0; 1580 1581 s = splsoftnet(); 1582 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1583 if ((!psk->psk_af || state->af == psk->psk_af) 1584 && (!psk->psk_proto || psk->psk_proto == 1585 state->proto) && 1586 PF_MATCHA(psk->psk_src.not, 1587 &psk->psk_src.addr.v.a.addr, 1588 &psk->psk_src.addr.v.a.mask, 1589 &state->lan.addr, state->af) && 1590 PF_MATCHA(psk->psk_dst.not, 1591 &psk->psk_dst.addr.v.a.addr, 1592 &psk->psk_dst.addr.v.a.mask, 1593 &state->ext.addr, state->af) && 1594 (psk->psk_src.port_op == 0 || 1595 pf_match_port(psk->psk_src.port_op, 1596 psk->psk_src.port[0], psk->psk_src.port[1], 1597 state->lan.port)) && 1598 (psk->psk_dst.port_op == 0 || 1599 pf_match_port(psk->psk_dst.port_op, 1600 psk->psk_dst.port[0], psk->psk_dst.port[1], 1601 state->ext.port)) && 1602 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1603 state->u.s.kif->pfik_name))) { 1604 state->timeout = PFTM_PURGE; 1605 killed++; 1606 } 1607 } 1608 pf_purge_expired_states(); 1609 splx(s); 1610 psk->psk_af = killed; 1611 break; 1612 } 1613 1614 case DIOCADDSTATE: { 1615 struct pfioc_state *ps = (struct pfioc_state *)addr; 1616 struct pf_state *state; 1617 struct pfi_kif *kif; 1618 1619 if (ps->state.timeout >= PFTM_MAX && 1620 ps->state.timeout != PFTM_UNTIL_PACKET) { 1621 error = EINVAL; 1622 break; 1623 } 1624 state = pool_get(&pf_state_pl, PR_NOWAIT); 1625 if (state == NULL) { 1626 error = ENOMEM; 1627 break; 1628 } 1629 s = splsoftnet(); 1630 kif = pfi_lookup_create(ps->state.u.ifname); 1631 if (kif == NULL) { 1632 pool_put(&pf_state_pl, state); 1633 error = ENOENT; 1634 splx(s); 1635 break; 1636 } 1637 bcopy(&ps->state, state, sizeof(struct pf_state)); 1638 bzero(&state->u, sizeof(state->u)); 1639 state->rule.ptr = &pf_default_rule; 1640 state->nat_rule.ptr = NULL; 1641 state->anchor.ptr = NULL; 1642 state->rt_kif = NULL; 1643#ifdef __FreeBSD__ 1644 state->creation = time_second; 1645#else 1646 state->creation = time.tv_sec; 1647#endif 1648 state->pfsync_time = 0; 1649 state->packets[0] = state->packets[1] = 0; 1650 state->bytes[0] = state->bytes[1] = 0; 1651 1652 if (pf_insert_state(kif, state)) { 1653 pfi_maybe_destroy(kif); 1654 pool_put(&pf_state_pl, state); 1655 error = ENOMEM; 1656 } 1657 splx(s); 1658 break; 1659 } 1660 1661 case DIOCGETSTATE: { 1662 struct pfioc_state *ps = (struct pfioc_state *)addr; 1663 struct pf_state *state; 1664 u_int32_t nr; 1665 1666 nr = 0; 1667 s = splsoftnet(); 1668 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1669 if (nr >= ps->nr) 1670 break; 1671 nr++; 1672 } 1673 if (state == NULL) { 1674 error = EBUSY; 1675 splx(s); 1676 break; 1677 } 1678 bcopy(state, &ps->state, sizeof(struct pf_state)); 1679 ps->state.rule.nr = state->rule.ptr->nr; 1680 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ? 1681 -1 : state->nat_rule.ptr->nr; 1682 ps->state.anchor.nr = (state->anchor.ptr == NULL) ? 1683 -1 : state->anchor.ptr->nr; 1684 splx(s); 1685 ps->state.expire = pf_state_expires(state); 1686#ifdef __FreeBSD__ 1687 if (ps->state.expire > time_second) 1688 ps->state.expire -= time_second; 1689#else 1690 if (ps->state.expire > time.tv_sec) 1691 ps->state.expire -= time.tv_sec; 1692#endif 1693 else 1694 ps->state.expire = 0; 1695 break; 1696 } 1697 1698 case DIOCGETSTATES: { 1699 struct pfioc_states *ps = (struct pfioc_states *)addr; 1700 struct pf_state *state; 1701 struct pf_state *p, pstore; 1702 struct pfi_kif *kif; 1703 u_int32_t nr = 0; 1704 int space = ps->ps_len; 1705 1706 if (space == 0) { 1707 s = splsoftnet(); 1708 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 1709 nr += kif->pfik_states; 1710 splx(s); 1711 ps->ps_len = sizeof(struct pf_state) * nr; 1712#ifdef __FreeBSD__ 1713 PF_UNLOCK(); 1714#endif 1715 return (0); 1716 } 1717 1718 s = splsoftnet(); 1719 p = ps->ps_states; 1720 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 1721 RB_FOREACH(state, pf_state_tree_ext_gwy, 1722 &kif->pfik_ext_gwy) { 1723#ifdef __FreeBSD__ 1724 int secs = time_second; 1725#else 1726 int secs = time.tv_sec; 1727#endif 1728 1729 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1730 break; 1731 1732 bcopy(state, &pstore, sizeof(pstore)); 1733 strlcpy(pstore.u.ifname, kif->pfik_name, 1734 sizeof(pstore.u.ifname)); 1735 pstore.rule.nr = state->rule.ptr->nr; 1736 pstore.nat_rule.nr = (state->nat_rule.ptr == 1737 NULL) ? -1 : state->nat_rule.ptr->nr; 1738 pstore.anchor.nr = (state->anchor.ptr == 1739 NULL) ? -1 : state->anchor.ptr->nr; 1740 pstore.creation = secs - pstore.creation; 1741 pstore.expire = pf_state_expires(state); 1742 if (pstore.expire > secs) 1743 pstore.expire -= secs; 1744 else 1745 pstore.expire = 0; 1746#ifdef __FreeBSD__ 1747 PF_COPYOUT(&pstore, p, sizeof(*p), error); 1748#else 1749 error = copyout(&pstore, p, sizeof(*p)); 1750#endif 1751 if (error) { 1752 splx(s); 1753 goto fail; 1754 } 1755 p++; 1756 nr++; 1757 } 1758 ps->ps_len = sizeof(struct pf_state) * nr; 1759 splx(s); 1760 break; 1761 } 1762 1763 case DIOCGETSTATUS: { 1764 struct pf_status *s = (struct pf_status *)addr; 1765 bcopy(&pf_status, s, sizeof(struct pf_status)); 1766 pfi_fill_oldstatus(s); 1767 break; 1768 } 1769 1770 case DIOCSETSTATUSIF: { 1771 struct pfioc_if *pi = (struct pfioc_if *)addr; 1772 1773 if (pi->ifname[0] == 0) { 1774 bzero(pf_status.ifname, IFNAMSIZ); 1775 break; 1776 } 1777 if (ifunit(pi->ifname) == NULL) { 1778 error = EINVAL; 1779 break; 1780 } 1781 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 1782 break; 1783 } 1784 1785 case DIOCCLRSTATUS: { 1786 bzero(pf_status.counters, sizeof(pf_status.counters)); 1787 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 1788 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 1789 if (*pf_status.ifname) 1790 pfi_clr_istats(pf_status.ifname, NULL, 1791 PFI_FLAG_INSTANCE); 1792 break; 1793 } 1794 1795 case DIOCNATLOOK: { 1796 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1797 struct pf_state *state; 1798 struct pf_state key; 1799 int m = 0, direction = pnl->direction; 1800 1801 key.af = pnl->af; 1802 key.proto = pnl->proto; 1803 1804 if (!pnl->proto || 1805 PF_AZERO(&pnl->saddr, pnl->af) || 1806 PF_AZERO(&pnl->daddr, pnl->af) || 1807 !pnl->dport || !pnl->sport) 1808 error = EINVAL; 1809 else { 1810 s = splsoftnet(); 1811 1812 /* 1813 * userland gives us source and dest of connection, 1814 * reverse the lookup so we ask for what happens with 1815 * the return traffic, enabling us to find it in the 1816 * state tree. 1817 */ 1818 if (direction == PF_IN) { 1819 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 1820 key.ext.port = pnl->dport; 1821 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 1822 key.gwy.port = pnl->sport; 1823 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 1824 } else { 1825 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 1826 key.lan.port = pnl->dport; 1827 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 1828 key.ext.port = pnl->sport; 1829 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 1830 } 1831 if (m > 1) 1832 error = E2BIG; /* more than one state */ 1833 else if (state != NULL) { 1834 if (direction == PF_IN) { 1835 PF_ACPY(&pnl->rsaddr, &state->lan.addr, 1836 state->af); 1837 pnl->rsport = state->lan.port; 1838 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 1839 pnl->af); 1840 pnl->rdport = pnl->dport; 1841 } else { 1842 PF_ACPY(&pnl->rdaddr, &state->gwy.addr, 1843 state->af); 1844 pnl->rdport = state->gwy.port; 1845 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 1846 pnl->af); 1847 pnl->rsport = pnl->sport; 1848 } 1849 } else 1850 error = ENOENT; 1851 splx(s); 1852 } 1853 break; 1854 } 1855 1856 case DIOCSETTIMEOUT: { 1857 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1858 int old; 1859 1860 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1861 pt->seconds < 0) { 1862 error = EINVAL; 1863 goto fail; 1864 } 1865 old = pf_default_rule.timeout[pt->timeout]; 1866 pf_default_rule.timeout[pt->timeout] = pt->seconds; 1867 pt->seconds = old; 1868 break; 1869 } 1870 1871 case DIOCGETTIMEOUT: { 1872 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1873 1874 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1875 error = EINVAL; 1876 goto fail; 1877 } 1878 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1879 break; 1880 } 1881 1882 case DIOCGETLIMIT: { 1883 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1884 1885 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1886 error = EINVAL; 1887 goto fail; 1888 } 1889 pl->limit = pf_pool_limits[pl->index].limit; 1890 break; 1891 } 1892 1893 case DIOCSETLIMIT: { 1894 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1895 int old_limit; 1896 1897 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1898 pf_pool_limits[pl->index].pp == NULL) { 1899 error = EINVAL; 1900 goto fail; 1901 } 1902#ifdef __FreeBSD__ 1903 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit); 1904#else 1905 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 1906 pl->limit, NULL, 0) != 0) { 1907 error = EBUSY; 1908 goto fail; 1909 } 1910#endif 1911 old_limit = pf_pool_limits[pl->index].limit; 1912 pf_pool_limits[pl->index].limit = pl->limit; 1913 pl->limit = old_limit; 1914 break; 1915 } 1916 1917 case DIOCSETDEBUG: { 1918 u_int32_t *level = (u_int32_t *)addr; 1919 1920 pf_status.debug = *level; 1921 break; 1922 } 1923 1924 case DIOCCLRRULECTRS: { 1925 struct pf_ruleset *ruleset = &pf_main_ruleset; 1926 struct pf_rule *rule; 1927 1928 s = splsoftnet(); 1929 TAILQ_FOREACH(rule, 1930 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) 1931 rule->evaluations = rule->packets = 1932 rule->bytes = 0; 1933 splx(s); 1934 break; 1935 } 1936 1937#ifdef __FreeBSD__ 1938 case DIOCGIFSPEED: { 1939 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 1940 struct pf_ifspeed ps; 1941 struct ifnet *ifp; 1942 1943 if (psp->ifname[0] != 0) { 1944 /* Can we completely trust user-land? */ 1945 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 1946 ifp = ifunit(ps.ifname); 1947 if (ifp ) 1948 psp->baudrate = ifp->if_baudrate; 1949 else 1950 error = EINVAL; 1951 } else 1952 error = EINVAL; 1953 break; 1954 } 1955#endif /* __FreeBSD__ */ 1956 1957#ifdef ALTQ 1958 case DIOCSTARTALTQ: { 1959 struct pf_altq *altq; 1960 struct ifnet *ifp; 1961 struct tb_profile tb; 1962 1963 /* enable all altq interfaces on active list */ 1964 s = splsoftnet(); 1965 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 1966 if (altq->qname[0] == 0) { 1967 if ((ifp = ifunit(altq->ifname)) == NULL) { 1968 error = EINVAL; 1969 break; 1970 } 1971 if (ifp->if_snd.altq_type != ALTQT_NONE) 1972 error = altq_enable(&ifp->if_snd); 1973 if (error != 0) 1974 break; 1975 /* set tokenbucket regulator */ 1976 tb.rate = altq->ifbandwidth; 1977 tb.depth = altq->tbrsize; 1978 PF_UNLOCK(); 1979 error = tbr_set(&ifp->if_snd, &tb); 1980 PF_LOCK(); 1981 if (error != 0) 1982 break; 1983 } 1984 } 1985#ifndef __FreeBSD__ 1986 if (error == 0) 1987 pfaltq_running = 1; 1988#endif 1989 splx(s); 1990 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 1991 break; 1992 } 1993 1994 case DIOCSTOPALTQ: { 1995 struct pf_altq *altq; 1996 struct ifnet *ifp; 1997 struct tb_profile tb; 1998 int err; 1999 2000 /* disable all altq interfaces on active list */ 2001 s = splsoftnet(); 2002 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2003 if (altq->qname[0] == 0) { 2004 if ((ifp = ifunit(altq->ifname)) == NULL) { 2005 error = EINVAL; 2006 break; 2007 } 2008 if (ifp->if_snd.altq_type != ALTQT_NONE) { 2009 err = altq_disable(&ifp->if_snd); 2010 if (err != 0 && error == 0) 2011 error = err; 2012 } 2013 /* clear tokenbucket regulator */ 2014 tb.rate = 0; 2015 PF_UNLOCK(); 2016 err = tbr_set(&ifp->if_snd, &tb); 2017 PF_LOCK(); 2018 if (err != 0 && error == 0) 2019 error = err; 2020 } 2021 } 2022#ifndef __FreeBSD__ 2023 if (error == 0) 2024 pfaltq_running = 0; 2025#endif 2026 splx(s); 2027 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2028 break; 2029 } 2030 2031 case DIOCBEGINALTQS: { 2032 u_int32_t *ticket = (u_int32_t *)addr; 2033 2034 error = pf_begin_altq(ticket); 2035 break; 2036 } 2037 2038 case DIOCADDALTQ: { 2039 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2040 struct pf_altq *altq, *a; 2041 2042 if (pa->ticket != ticket_altqs_inactive) { 2043 error = EBUSY; 2044 break; 2045 } 2046 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2047 if (altq == NULL) { 2048 error = ENOMEM; 2049 break; 2050 } 2051 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2052 2053 /* 2054 * if this is for a queue, find the discipline and 2055 * copy the necessary fields 2056 */ 2057 if (altq->qname[0] != 0) { 2058 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2059 error = EBUSY; 2060 pool_put(&pf_altq_pl, altq); 2061 break; 2062 } 2063 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2064 if (strncmp(a->ifname, altq->ifname, 2065 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2066 altq->altq_disc = a->altq_disc; 2067 break; 2068 } 2069 } 2070 } 2071 2072#ifdef __FreeBSD__ 2073 PF_UNLOCK(); 2074#endif 2075 error = altq_add(altq); 2076#ifdef __FreeBSD__ 2077 PF_LOCK(); 2078#endif 2079 if (error) { 2080 pool_put(&pf_altq_pl, altq); 2081 break; 2082 } 2083 2084 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2085 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2086 break; 2087 } 2088 2089 case DIOCCOMMITALTQS: { 2090 u_int32_t ticket = *(u_int32_t *)addr; 2091 2092 error = pf_commit_altq(ticket); 2093 break; 2094 } 2095 2096 case DIOCGETALTQS: { 2097 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2098 struct pf_altq *altq; 2099 2100 pa->nr = 0; 2101 s = splsoftnet(); 2102 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2103 pa->nr++; 2104 pa->ticket = ticket_altqs_active; 2105 splx(s); 2106 break; 2107 } 2108 2109 case DIOCGETALTQ: { 2110 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2111 struct pf_altq *altq; 2112 u_int32_t nr; 2113 2114 if (pa->ticket != ticket_altqs_active) { 2115 error = EBUSY; 2116 break; 2117 } 2118 nr = 0; 2119 s = splsoftnet(); 2120 altq = TAILQ_FIRST(pf_altqs_active); 2121 while ((altq != NULL) && (nr < pa->nr)) { 2122 altq = TAILQ_NEXT(altq, entries); 2123 nr++; 2124 } 2125 if (altq == NULL) { 2126 error = EBUSY; 2127 splx(s); 2128 break; 2129 } 2130 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2131 splx(s); 2132 break; 2133 } 2134 2135 case DIOCCHANGEALTQ: 2136 /* CHANGEALTQ not supported yet! */ 2137 error = ENODEV; 2138 break; 2139 2140 case DIOCGETQSTATS: { 2141 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2142 struct pf_altq *altq; 2143 u_int32_t nr; 2144 int nbytes; 2145 2146 if (pq->ticket != ticket_altqs_active) { 2147 error = EBUSY; 2148 break; 2149 } 2150 nbytes = pq->nbytes; 2151 nr = 0; 2152 s = splsoftnet(); 2153 altq = TAILQ_FIRST(pf_altqs_active); 2154 while ((altq != NULL) && (nr < pq->nr)) { 2155 altq = TAILQ_NEXT(altq, entries); 2156 nr++; 2157 } 2158 if (altq == NULL) { 2159 error = EBUSY; 2160 splx(s); 2161 break; 2162 } 2163#ifdef __FreeBSD__ 2164 PF_UNLOCK(); 2165#endif 2166 error = altq_getqstats(altq, pq->buf, &nbytes); 2167#ifdef __FreeBSD__ 2168 PF_LOCK(); 2169#endif 2170 splx(s); 2171 if (error == 0) { 2172 pq->scheduler = altq->scheduler; 2173 pq->nbytes = nbytes; 2174 } 2175 break; 2176 } 2177#endif /* ALTQ */ 2178 2179 case DIOCBEGINADDRS: { 2180 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2181 2182 pf_empty_pool(&pf_pabuf); 2183 pp->ticket = ++ticket_pabuf; 2184 break; 2185 } 2186 2187 case DIOCADDADDR: { 2188 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2189 2190#ifndef INET 2191 if (pp->af == AF_INET) { 2192 error = EAFNOSUPPORT; 2193 break; 2194 } 2195#endif /* INET */ 2196#ifndef INET6 2197 if (pp->af == AF_INET6) { 2198 error = EAFNOSUPPORT; 2199 break; 2200 } 2201#endif /* INET6 */ 2202 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2203 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2204 pp->addr.addr.type != PF_ADDR_TABLE) { 2205 error = EINVAL; 2206 break; 2207 } 2208 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2209 if (pa == NULL) { 2210 error = ENOMEM; 2211 break; 2212 } 2213 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2214 if (pa->ifname[0]) { 2215 pa->kif = pfi_attach_rule(pa->ifname); 2216 if (pa->kif == NULL) { 2217 pool_put(&pf_pooladdr_pl, pa); 2218 error = EINVAL; 2219 break; 2220 } 2221 } 2222 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2223 pfi_dynaddr_remove(&pa->addr); 2224 pfi_detach_rule(pa->kif); 2225 pool_put(&pf_pooladdr_pl, pa); 2226 error = EINVAL; 2227 break; 2228 } 2229 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2230 break; 2231 } 2232 2233 case DIOCGETADDRS: { 2234 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2235 2236 pp->nr = 0; 2237 s = splsoftnet(); 2238 pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket, 2239 pp->r_action, pp->r_num, 0, 1, 0); 2240 if (pool == NULL) { 2241 error = EBUSY; 2242 splx(s); 2243 break; 2244 } 2245 TAILQ_FOREACH(pa, &pool->list, entries) 2246 pp->nr++; 2247 splx(s); 2248 break; 2249 } 2250 2251 case DIOCGETADDR: { 2252 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2253 u_int32_t nr = 0; 2254 2255 s = splsoftnet(); 2256 pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket, 2257 pp->r_action, pp->r_num, 0, 1, 1); 2258 if (pool == NULL) { 2259 error = EBUSY; 2260 splx(s); 2261 break; 2262 } 2263 pa = TAILQ_FIRST(&pool->list); 2264 while ((pa != NULL) && (nr < pp->nr)) { 2265 pa = TAILQ_NEXT(pa, entries); 2266 nr++; 2267 } 2268 if (pa == NULL) { 2269 error = EBUSY; 2270 splx(s); 2271 break; 2272 } 2273 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2274 pfi_dynaddr_copyout(&pp->addr.addr); 2275 pf_tbladdr_copyout(&pp->addr.addr); 2276 splx(s); 2277 break; 2278 } 2279 2280 case DIOCCHANGEADDR: { 2281 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2282 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2283 struct pf_ruleset *ruleset; 2284 2285 if (pca->action < PF_CHANGE_ADD_HEAD || 2286 pca->action > PF_CHANGE_REMOVE) { 2287 error = EINVAL; 2288 break; 2289 } 2290 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2291 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2292 pca->addr.addr.type != PF_ADDR_TABLE) { 2293 error = EINVAL; 2294 break; 2295 } 2296 2297 ruleset = pf_find_ruleset(pca->anchor, pca->ruleset); 2298 if (ruleset == NULL) { 2299 error = EBUSY; 2300 break; 2301 } 2302 pool = pf_get_pool(pca->anchor, pca->ruleset, pca->ticket, 2303 pca->r_action, pca->r_num, pca->r_last, 1, 1); 2304 if (pool == NULL) { 2305 error = EBUSY; 2306 break; 2307 } 2308 if (pca->action != PF_CHANGE_REMOVE) { 2309 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2310 if (newpa == NULL) { 2311 error = ENOMEM; 2312 break; 2313 } 2314 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2315#ifndef INET 2316 if (pca->af == AF_INET) { 2317 pool_put(&pf_pooladdr_pl, newpa); 2318 error = EAFNOSUPPORT; 2319 break; 2320 } 2321#endif /* INET */ 2322#ifndef INET6 2323 if (pca->af == AF_INET6) { 2324 pool_put(&pf_pooladdr_pl, newpa); 2325 error = EAFNOSUPPORT; 2326 break; 2327 } 2328#endif /* INET6 */ 2329 if (newpa->ifname[0]) { 2330 newpa->kif = pfi_attach_rule(newpa->ifname); 2331 if (newpa->kif == NULL) { 2332 pool_put(&pf_pooladdr_pl, newpa); 2333 error = EINVAL; 2334 break; 2335 } 2336 } else 2337 newpa->kif = NULL; 2338 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2339 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2340 pfi_dynaddr_remove(&newpa->addr); 2341 pfi_detach_rule(newpa->kif); 2342 pool_put(&pf_pooladdr_pl, newpa); 2343 error = EINVAL; 2344 break; 2345 } 2346 } 2347 2348 s = splsoftnet(); 2349 2350 if (pca->action == PF_CHANGE_ADD_HEAD) 2351 oldpa = TAILQ_FIRST(&pool->list); 2352 else if (pca->action == PF_CHANGE_ADD_TAIL) 2353 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2354 else { 2355 int i = 0; 2356 2357 oldpa = TAILQ_FIRST(&pool->list); 2358 while ((oldpa != NULL) && (i < pca->nr)) { 2359 oldpa = TAILQ_NEXT(oldpa, entries); 2360 i++; 2361 } 2362 if (oldpa == NULL) { 2363 error = EINVAL; 2364 splx(s); 2365 break; 2366 } 2367 } 2368 2369 if (pca->action == PF_CHANGE_REMOVE) { 2370 TAILQ_REMOVE(&pool->list, oldpa, entries); 2371 pfi_dynaddr_remove(&oldpa->addr); 2372 pf_tbladdr_remove(&oldpa->addr); 2373 pfi_detach_rule(oldpa->kif); 2374 pool_put(&pf_pooladdr_pl, oldpa); 2375 } else { 2376 if (oldpa == NULL) 2377 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2378 else if (pca->action == PF_CHANGE_ADD_HEAD || 2379 pca->action == PF_CHANGE_ADD_BEFORE) 2380 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2381 else 2382 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2383 newpa, entries); 2384 } 2385 2386 pool->cur = TAILQ_FIRST(&pool->list); 2387 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2388 pca->af); 2389 splx(s); 2390 break; 2391 } 2392 2393 case DIOCGETANCHORS: { 2394 struct pfioc_anchor *pa = (struct pfioc_anchor *)addr; 2395 struct pf_anchor *anchor; 2396 2397 pa->nr = 0; 2398 TAILQ_FOREACH(anchor, &pf_anchors, entries) 2399 pa->nr++; 2400 break; 2401 } 2402 2403 case DIOCGETANCHOR: { 2404 struct pfioc_anchor *pa = (struct pfioc_anchor *)addr; 2405 struct pf_anchor *anchor; 2406 u_int32_t nr = 0; 2407 2408 anchor = TAILQ_FIRST(&pf_anchors); 2409 while (anchor != NULL && nr < pa->nr) { 2410 anchor = TAILQ_NEXT(anchor, entries); 2411 nr++; 2412 } 2413 if (anchor == NULL) 2414 error = EBUSY; 2415 else 2416 bcopy(anchor->name, pa->name, sizeof(pa->name)); 2417 break; 2418 } 2419 2420 case DIOCGETRULESETS: { 2421 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2422 struct pf_anchor *anchor; 2423 struct pf_ruleset *ruleset; 2424 2425 pr->anchor[PF_ANCHOR_NAME_SIZE-1] = 0; 2426 if ((anchor = pf_find_anchor(pr->anchor)) == NULL) { 2427 error = EINVAL; 2428 break; 2429 } 2430 pr->nr = 0; 2431 TAILQ_FOREACH(ruleset, &anchor->rulesets, entries) 2432 pr->nr++; 2433 break; 2434 } 2435 2436 case DIOCGETRULESET: { 2437 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2438 struct pf_anchor *anchor; 2439 struct pf_ruleset *ruleset; 2440 u_int32_t nr = 0; 2441 2442 if ((anchor = pf_find_anchor(pr->anchor)) == NULL) { 2443 error = EINVAL; 2444 break; 2445 } 2446 ruleset = TAILQ_FIRST(&anchor->rulesets); 2447 while (ruleset != NULL && nr < pr->nr) { 2448 ruleset = TAILQ_NEXT(ruleset, entries); 2449 nr++; 2450 } 2451 if (ruleset == NULL) 2452 error = EBUSY; 2453 else 2454 bcopy(ruleset->name, pr->name, sizeof(pr->name)); 2455 break; 2456 } 2457 2458 case DIOCRCLRTABLES: { 2459 struct pfioc_table *io = (struct pfioc_table *)addr; 2460 2461 if (io->pfrio_esize != 0) { 2462 error = ENODEV; 2463 break; 2464 } 2465 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2466 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2467 break; 2468 } 2469 2470 case DIOCRADDTABLES: { 2471 struct pfioc_table *io = (struct pfioc_table *)addr; 2472 2473 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2474 error = ENODEV; 2475 break; 2476 } 2477 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2478 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2479 break; 2480 } 2481 2482 case DIOCRDELTABLES: { 2483 struct pfioc_table *io = (struct pfioc_table *)addr; 2484 2485 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2486 error = ENODEV; 2487 break; 2488 } 2489 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2490 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2491 break; 2492 } 2493 2494 case DIOCRGETTABLES: { 2495 struct pfioc_table *io = (struct pfioc_table *)addr; 2496 2497 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2498 error = ENODEV; 2499 break; 2500 } 2501 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2502 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2503 break; 2504 } 2505 2506 case DIOCRGETTSTATS: { 2507 struct pfioc_table *io = (struct pfioc_table *)addr; 2508 2509 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2510 error = ENODEV; 2511 break; 2512 } 2513 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2514 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2515 break; 2516 } 2517 2518 case DIOCRCLRTSTATS: { 2519 struct pfioc_table *io = (struct pfioc_table *)addr; 2520 2521 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2522 error = ENODEV; 2523 break; 2524 } 2525 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2526 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2527 break; 2528 } 2529 2530 case DIOCRSETTFLAGS: { 2531 struct pfioc_table *io = (struct pfioc_table *)addr; 2532 2533 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2534 error = ENODEV; 2535 break; 2536 } 2537 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2538 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2539 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2540 break; 2541 } 2542 2543 case DIOCRCLRADDRS: { 2544 struct pfioc_table *io = (struct pfioc_table *)addr; 2545 2546 if (io->pfrio_esize != 0) { 2547 error = ENODEV; 2548 break; 2549 } 2550 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2551 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2552 break; 2553 } 2554 2555 case DIOCRADDADDRS: { 2556 struct pfioc_table *io = (struct pfioc_table *)addr; 2557 2558 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2559 error = ENODEV; 2560 break; 2561 } 2562 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2563 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2564 PFR_FLAG_USERIOCTL); 2565 break; 2566 } 2567 2568 case DIOCRDELADDRS: { 2569 struct pfioc_table *io = (struct pfioc_table *)addr; 2570 2571 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2572 error = ENODEV; 2573 break; 2574 } 2575 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2576 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2577 PFR_FLAG_USERIOCTL); 2578 break; 2579 } 2580 2581 case DIOCRSETADDRS: { 2582 struct pfioc_table *io = (struct pfioc_table *)addr; 2583 2584 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2585 error = ENODEV; 2586 break; 2587 } 2588 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2589 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2590 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2591 PFR_FLAG_USERIOCTL); 2592 break; 2593 } 2594 2595 case DIOCRGETADDRS: { 2596 struct pfioc_table *io = (struct pfioc_table *)addr; 2597 2598 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2599 error = ENODEV; 2600 break; 2601 } 2602 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2603 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2604 break; 2605 } 2606 2607 case DIOCRGETASTATS: { 2608 struct pfioc_table *io = (struct pfioc_table *)addr; 2609 2610 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2611 error = ENODEV; 2612 break; 2613 } 2614 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2615 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2616 break; 2617 } 2618 2619 case DIOCRCLRASTATS: { 2620 struct pfioc_table *io = (struct pfioc_table *)addr; 2621 2622 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2623 error = ENODEV; 2624 break; 2625 } 2626 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2627 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2628 PFR_FLAG_USERIOCTL); 2629 break; 2630 } 2631 2632 case DIOCRTSTADDRS: { 2633 struct pfioc_table *io = (struct pfioc_table *)addr; 2634 2635 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2636 error = ENODEV; 2637 break; 2638 } 2639 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2640 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2641 PFR_FLAG_USERIOCTL); 2642 break; 2643 } 2644 2645 case DIOCRINABEGIN: { 2646 struct pfioc_table *io = (struct pfioc_table *)addr; 2647 2648 if (io->pfrio_esize != 0) { 2649 error = ENODEV; 2650 break; 2651 } 2652 error = pfr_ina_begin(&io->pfrio_table, &io->pfrio_ticket, 2653 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2654 break; 2655 } 2656 2657 case DIOCRINACOMMIT: { 2658 struct pfioc_table *io = (struct pfioc_table *)addr; 2659 2660 if (io->pfrio_esize != 0) { 2661 error = ENODEV; 2662 break; 2663 } 2664 error = pfr_ina_commit(&io->pfrio_table, io->pfrio_ticket, 2665 &io->pfrio_nadd, &io->pfrio_nchange, io->pfrio_flags | 2666 PFR_FLAG_USERIOCTL); 2667 break; 2668 } 2669 2670 case DIOCRINADEFINE: { 2671 struct pfioc_table *io = (struct pfioc_table *)addr; 2672 2673 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2674 error = ENODEV; 2675 break; 2676 } 2677 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2678 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2679 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2680 break; 2681 } 2682 2683 case DIOCOSFPADD: { 2684 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2685 s = splsoftnet(); 2686 error = pf_osfp_add(io); 2687 splx(s); 2688 break; 2689 } 2690 2691 case DIOCOSFPGET: { 2692 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2693 s = splsoftnet(); 2694 error = pf_osfp_get(io); 2695 splx(s); 2696 break; 2697 } 2698 2699 case DIOCXBEGIN: { 2700 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2701 struct pfioc_trans_e ioe; 2702 struct pfr_table table; 2703 int i; 2704 2705 if (io->esize != sizeof(ioe)) { 2706 error = ENODEV; 2707 goto fail; 2708 } 2709 for (i = 0; i < io->size; i++) { 2710#ifdef __FreeBSD__ 2711 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2712 if (error) { 2713#else 2714 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2715#endif 2716 error = EFAULT; 2717 goto fail; 2718 } 2719 switch (ioe.rs_num) { 2720#ifdef ALTQ 2721 case PF_RULESET_ALTQ: 2722 if (ioe.anchor[0] || ioe.ruleset[0]) { 2723 error = EINVAL; 2724 goto fail; 2725 } 2726 if ((error = pf_begin_altq(&ioe.ticket))) 2727 goto fail; 2728 break; 2729#endif /* ALTQ */ 2730 case PF_RULESET_TABLE: 2731 bzero(&table, sizeof(table)); 2732 strlcpy(table.pfrt_anchor, ioe.anchor, 2733 sizeof(table.pfrt_anchor)); 2734 strlcpy(table.pfrt_ruleset, ioe.ruleset, 2735 sizeof(table.pfrt_ruleset)); 2736 if ((error = pfr_ina_begin(&table, 2737 &ioe.ticket, NULL, 0))) 2738 goto fail; 2739 break; 2740 default: 2741 if ((error = pf_begin_rules(&ioe.ticket, 2742 ioe.rs_num, ioe.anchor, ioe.ruleset))) 2743 goto fail; 2744 break; 2745 } 2746#ifdef __FreeBSD__ 2747 PF_COPYOUT(&ioe, io->array+i, sizeof(io->array[i]), 2748 error); 2749 if (error) { 2750#else 2751 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) { 2752#endif 2753 error = EFAULT; 2754 goto fail; 2755 } 2756 } 2757 break; 2758 } 2759 2760 case DIOCXROLLBACK: { 2761 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2762 struct pfioc_trans_e ioe; 2763 struct pfr_table table; 2764 int i; 2765 2766 if (io->esize != sizeof(ioe)) { 2767 error = ENODEV; 2768 goto fail; 2769 } 2770 for (i = 0; i < io->size; i++) { 2771#ifdef __FreeBSD__ 2772 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2773 if (error) { 2774#else 2775 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2776#endif 2777 error = EFAULT; 2778 goto fail; 2779 } 2780 switch (ioe.rs_num) { 2781#ifdef ALTQ 2782 case PF_RULESET_ALTQ: 2783 if (ioe.anchor[0] || ioe.ruleset[0]) { 2784 error = EINVAL; 2785 goto fail; 2786 } 2787 if ((error = pf_rollback_altq(ioe.ticket))) 2788 goto fail; /* really bad */ 2789 break; 2790#endif /* ALTQ */ 2791 case PF_RULESET_TABLE: 2792 bzero(&table, sizeof(table)); 2793 strlcpy(table.pfrt_anchor, ioe.anchor, 2794 sizeof(table.pfrt_anchor)); 2795 strlcpy(table.pfrt_ruleset, ioe.ruleset, 2796 sizeof(table.pfrt_ruleset)); 2797 if ((error = pfr_ina_rollback(&table, 2798 ioe.ticket, NULL, 0))) 2799 goto fail; /* really bad */ 2800 break; 2801 default: 2802 if ((error = pf_rollback_rules(ioe.ticket, 2803 ioe.rs_num, ioe.anchor, ioe.ruleset))) 2804 goto fail; /* really bad */ 2805 break; 2806 } 2807 } 2808 break; 2809 } 2810 2811 case DIOCXCOMMIT: { 2812 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2813 struct pfioc_trans_e ioe; 2814 struct pfr_table table; 2815 struct pf_ruleset *rs; 2816 int i; 2817 2818 if (io->esize != sizeof(ioe)) { 2819 error = ENODEV; 2820 goto fail; 2821 } 2822 /* first makes sure everything will succeed */ 2823 for (i = 0; i < io->size; i++) { 2824#ifdef __FreeBSD__ 2825 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2826 if (error) { 2827#else 2828 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2829#endif 2830 error = EFAULT; 2831 goto fail; 2832 } 2833 switch (ioe.rs_num) { 2834#ifdef ALTQ 2835 case PF_RULESET_ALTQ: 2836 if (ioe.anchor[0] || ioe.ruleset[0]) { 2837 error = EINVAL; 2838 goto fail; 2839 } 2840 if (!altqs_inactive_open || ioe.ticket != 2841 ticket_altqs_inactive) { 2842 error = EBUSY; 2843 goto fail; 2844 } 2845 break; 2846#endif /* ALTQ */ 2847 case PF_RULESET_TABLE: 2848 rs = pf_find_ruleset(ioe.anchor, ioe.ruleset); 2849 if (rs == NULL || !rs->topen || ioe.ticket != 2850 rs->tticket) { 2851 error = EBUSY; 2852 goto fail; 2853 } 2854 break; 2855 default: 2856 if (ioe.rs_num < 0 || ioe.rs_num >= 2857 PF_RULESET_MAX) { 2858 error = EINVAL; 2859 goto fail; 2860 } 2861 rs = pf_find_ruleset(ioe.anchor, ioe.ruleset); 2862 if (rs == NULL || 2863 !rs->rules[ioe.rs_num].inactive.open || 2864 rs->rules[ioe.rs_num].inactive.ticket != 2865 ioe.ticket) { 2866 error = EBUSY; 2867 goto fail; 2868 } 2869 break; 2870 } 2871 } 2872 /* now do the commit - no errors should happen here */ 2873 for (i = 0; i < io->size; i++) { 2874#ifdef __FreeBSD__ 2875 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2876 if (error) { 2877#else 2878 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2879#endif 2880 error = EFAULT; 2881 goto fail; 2882 } 2883 switch (ioe.rs_num) { 2884#ifdef ALTQ 2885 case PF_RULESET_ALTQ: 2886 if ((error = pf_commit_altq(ioe.ticket))) 2887 goto fail; /* really bad */ 2888 break; 2889#endif /* ALTQ */ 2890 case PF_RULESET_TABLE: 2891 bzero(&table, sizeof(table)); 2892 strlcpy(table.pfrt_anchor, ioe.anchor, 2893 sizeof(table.pfrt_anchor)); 2894 strlcpy(table.pfrt_ruleset, ioe.ruleset, 2895 sizeof(table.pfrt_ruleset)); 2896 if ((error = pfr_ina_commit(&table, ioe.ticket, 2897 NULL, NULL, 0))) 2898 goto fail; /* really bad */ 2899 break; 2900 default: 2901 if ((error = pf_commit_rules(ioe.ticket, 2902 ioe.rs_num, ioe.anchor, ioe.ruleset))) 2903 goto fail; /* really bad */ 2904 break; 2905 } 2906 } 2907 break; 2908 } 2909 2910 case DIOCGETSRCNODES: { 2911 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2912 struct pf_src_node *n; 2913 struct pf_src_node *p, pstore; 2914 u_int32_t nr = 0; 2915 int space = psn->psn_len; 2916 2917 if (space == 0) { 2918 s = splsoftnet(); 2919 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2920 nr++; 2921 splx(s); 2922 psn->psn_len = sizeof(struct pf_src_node) * nr; 2923#ifdef __FreeBSD__ 2924 PF_UNLOCK(); 2925#endif 2926 return (0); 2927 } 2928 2929 s = splsoftnet(); 2930 p = psn->psn_src_nodes; 2931 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2932#ifdef __FreeBSD__ 2933 int secs = time_second; 2934#else 2935 int secs = time.tv_sec; 2936#endif 2937 2938 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 2939 break; 2940 2941 bcopy(n, &pstore, sizeof(pstore)); 2942 if (n->rule.ptr != NULL) 2943 pstore.rule.nr = n->rule.ptr->nr; 2944 pstore.creation = secs - pstore.creation; 2945 if (pstore.expire > secs) 2946 pstore.expire -= secs; 2947 else 2948 pstore.expire = 0; 2949#ifdef __FreeBSD__ 2950 PF_COPYOUT(&pstore, p, sizeof(*p), error); 2951#else 2952 error = copyout(&pstore, p, sizeof(*p)); 2953#endif 2954 if (error) { 2955 splx(s); 2956 goto fail; 2957 } 2958 p++; 2959 nr++; 2960 } 2961 psn->psn_len = sizeof(struct pf_src_node) * nr; 2962 splx(s); 2963 break; 2964 } 2965 2966 case DIOCCLRSRCNODES: { 2967 struct pf_src_node *n; 2968 struct pf_state *state; 2969 2970 s = splsoftnet(); 2971 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2972 state->src_node = NULL; 2973 state->nat_src_node = NULL; 2974 } 2975 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2976 n->expire = 1; 2977 n->states = 0; 2978 } 2979 pf_purge_expired_src_nodes(); 2980 pf_status.src_nodes = 0; 2981 splx(s); 2982 break; 2983 } 2984 2985 case DIOCSETHOSTID: { 2986 u_int32_t *hostid = (u_int32_t *)addr; 2987 2988 if (*hostid == 0) { 2989 error = EINVAL; 2990 goto fail; 2991 } 2992 pf_status.hostid = *hostid; 2993 break; 2994 } 2995 2996 case DIOCOSFPFLUSH: 2997 s = splsoftnet(); 2998 pf_osfp_flush(); 2999 splx(s); 3000 break; 3001 3002 case DIOCIGETIFACES: { 3003 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3004 3005 if (io->pfiio_esize != sizeof(struct pfi_if)) { 3006 error = ENODEV; 3007 break; 3008 } 3009 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3010 &io->pfiio_size, io->pfiio_flags); 3011 break; 3012 } 3013 3014 case DIOCICLRISTATS: { 3015 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3016 3017 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero, 3018 io->pfiio_flags); 3019 break; 3020 } 3021 3022 default: 3023 error = ENODEV; 3024 break; 3025 } 3026fail: 3027#ifdef __FreeBSD__ 3028 PF_UNLOCK(); 3029#endif 3030 return (error); 3031} 3032 3033#ifdef __FreeBSD__ 3034/* 3035 * XXX - Check for version missmatch!!! 3036 */ 3037static void 3038pf_clear_states(void) 3039{ 3040 struct pf_state *state; 3041 3042 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3043 state->timeout = PFTM_PURGE; 3044#if NPFSYNC 3045 /* don't send out individual delete messages */ 3046 state->sync_flags = PFSTATE_NOSYNC; 3047#endif 3048 } 3049 pf_purge_expired_states(); 3050 pf_status.states = 0; 3051#if 0 /* NPFSYNC */ 3052/* 3053 * XXX This is called on module unload, we do not want to sync that over? */ 3054 */ 3055 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 3056#endif 3057} 3058 3059static int 3060pf_clear_tables(void) 3061{ 3062 struct pfioc_table io; 3063 int error; 3064 3065 bzero(&io, sizeof(io)); 3066 3067 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3068 io.pfrio_flags); 3069 3070 return (error); 3071} 3072 3073static void 3074pf_clear_srcnodes(void) 3075{ 3076 struct pf_src_node *n; 3077 struct pf_state *state; 3078 3079 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3080 state->src_node = NULL; 3081 state->nat_src_node = NULL; 3082 } 3083 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3084 n->expire = 1; 3085 n->states = 0; 3086 } 3087 pf_purge_expired_src_nodes(); 3088 pf_status.src_nodes = 0; 3089} 3090/* 3091 * XXX - Check for version missmatch!!! 3092 */ 3093 3094/* 3095 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3096 */ 3097static int 3098shutdown_pf(void) 3099{ 3100 int error = 0; 3101 u_int32_t t[5]; 3102 char nn = '\0'; 3103 3104 callout_stop(&pf_expire_to); 3105 3106 pf_status.running = 0; 3107 do { 3108 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn, 3109 &nn)) != 0) { 3110 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3111 break; 3112 } 3113 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn, 3114 &nn)) != 0) { 3115 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3116 break; /* XXX: rollback? */ 3117 } 3118 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn, &nn)) 3119 != 0) { 3120 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3121 break; /* XXX: rollback? */ 3122 } 3123 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn, &nn)) 3124 != 0) { 3125 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3126 break; /* XXX: rollback? */ 3127 } 3128 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn, &nn)) 3129 != 0) { 3130 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3131 break; /* XXX: rollback? */ 3132 } 3133 3134 /* XXX: these should always succeed here */ 3135 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn, &nn); 3136 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn, &nn); 3137 pf_commit_rules(t[2], PF_RULESET_NAT, &nn, &nn); 3138 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn, &nn); 3139 pf_commit_rules(t[4], PF_RULESET_RDR, &nn, &nn); 3140 3141 if ((error = pf_clear_tables()) != 0) 3142 break; 3143 3144#ifdef ALTQ 3145 if ((error = pf_begin_altq(&t[0])) != 0) { 3146 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3147 break; 3148 } 3149 pf_commit_altq(t[0]); 3150#endif 3151 3152 pf_clear_states(); 3153 3154 pf_clear_srcnodes(); 3155 3156 /* status does not use malloced mem so no need to cleanup */ 3157 /* fingerprints and interfaces have thier own cleanup code */ 3158 } while(0); 3159 3160 return (error); 3161} 3162 3163static int 3164pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3165{ 3166 /* 3167 * XXX Wed Jul 9 22:03:16 2003 UTC 3168 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3169 * in network stack. OpenBSD's network stack have converted 3170 * ip_len/ip_off to host byte order frist as FreeBSD. 3171 * Now this is not true anymore , so we should convert back to network 3172 * byte order. 3173 */ 3174 struct ip *h = NULL; 3175 int chk; 3176 3177 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) { 3178 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3179 h = mtod(*m, struct ip *); 3180 HTONS(h->ip_len); 3181 HTONS(h->ip_off); 3182 } 3183 chk = pf_test(PF_IN, ifp, m); 3184 if (chk && *m) { 3185 m_freem(*m); 3186 *m = NULL; 3187 } 3188 if (*m != NULL) { 3189 /* pf_test can change ip header location */ 3190 h = mtod(*m, struct ip *); 3191 NTOHS(h->ip_len); 3192 NTOHS(h->ip_off); 3193 } 3194 return chk; 3195} 3196 3197static int 3198pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3199{ 3200 /* 3201 * XXX Wed Jul 9 22:03:16 2003 UTC 3202 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3203 * in network stack. OpenBSD's network stack have converted 3204 * ip_len/ip_off to host byte order frist as FreeBSD. 3205 * Now this is not true anymore , so we should convert back to network 3206 * byte order. 3207 */ 3208 struct ip *h = NULL; 3209 int chk; 3210 3211 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3212 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3213 in_delayed_cksum(*m); 3214 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3215 } 3216 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) { 3217 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3218 h = mtod(*m, struct ip *); 3219 HTONS(h->ip_len); 3220 HTONS(h->ip_off); 3221 } 3222 chk = pf_test(PF_OUT, ifp, m); 3223 if (chk && *m) { 3224 m_freem(*m); 3225 *m = NULL; 3226 } 3227 if (*m != NULL) { 3228 /* pf_test can change ip header location */ 3229 h = mtod(*m, struct ip *); 3230 NTOHS(h->ip_len); 3231 NTOHS(h->ip_off); 3232 } 3233 return chk; 3234} 3235 3236#ifdef INET6 3237static int 3238pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3239{ 3240 /* 3241 * IPv6 does not affected ip_len/ip_off byte order changes. 3242 */ 3243 int chk; 3244 3245 chk = pf_test6(PF_IN, ifp, m); 3246 if (chk && *m) { 3247 m_freem(*m); 3248 *m = NULL; 3249 } 3250 return chk; 3251} 3252 3253static int 3254pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3255{ 3256 /* 3257 * IPv6 does not affected ip_len/ip_off byte order changes. 3258 */ 3259 int chk; 3260 3261 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3262 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3263 in_delayed_cksum(*m); 3264 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3265 } 3266 chk = pf_test6(PF_OUT, ifp, m); 3267 if (chk && *m) { 3268 m_freem(*m); 3269 *m = NULL; 3270 } 3271 return chk; 3272} 3273#endif /* INET6 */ 3274 3275static int 3276hook_pf(void) 3277{ 3278 struct pfil_head *pfh_inet; 3279#ifdef INET6 3280 struct pfil_head *pfh_inet6; 3281#endif 3282 3283 PF_ASSERT(MA_NOTOWNED); 3284 3285 if (pf_pfil_hooked) 3286 return (0); 3287 3288 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3289 if (pfh_inet == NULL) 3290 return (ESRCH); /* XXX */ 3291 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3292 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3293#ifdef INET6 3294 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3295 if (pfh_inet6 == NULL) { 3296 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3297 pfh_inet); 3298 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3299 pfh_inet); 3300 return (ESRCH); /* XXX */ 3301 } 3302 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3303 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3304#endif 3305 3306 pf_pfil_hooked = 1; 3307 return (0); 3308} 3309 3310static int 3311dehook_pf(void) 3312{ 3313 struct pfil_head *pfh_inet; 3314#ifdef INET6 3315 struct pfil_head *pfh_inet6; 3316#endif 3317 3318 PF_ASSERT(MA_NOTOWNED); 3319 3320 if (pf_pfil_hooked == 0) 3321 return (0); 3322 3323 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3324 if (pfh_inet == NULL) 3325 return (ESRCH); /* XXX */ 3326 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3327 pfh_inet); 3328 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3329 pfh_inet); 3330#ifdef INET6 3331 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3332 if (pfh_inet6 == NULL) 3333 return (ESRCH); /* XXX */ 3334 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3335 pfh_inet6); 3336 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3337 pfh_inet6); 3338#endif 3339 3340 pf_pfil_hooked = 0; 3341 return (0); 3342} 3343 3344static int 3345pf_load(void) 3346{ 3347 init_zone_var(); 3348 init_pf_mutex(); 3349 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3350 if (pfattach() < 0) { 3351 destroy_dev(pf_dev); 3352 destroy_pf_mutex(); 3353 return (ENOMEM); 3354 } 3355 return (0); 3356} 3357 3358static int 3359pf_unload(void) 3360{ 3361 int error = 0; 3362 3363 PF_LOCK(); 3364 pf_status.running = 0; 3365 PF_UNLOCK(); 3366 error = dehook_pf(); 3367 if (error) { 3368 /* 3369 * Should not happen! 3370 * XXX Due to error code ESRCH, kldunload will show 3371 * a message like 'No such process'. 3372 */ 3373 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 3374 return error; 3375 } 3376 PF_LOCK(); 3377 shutdown_pf(); 3378 pfi_cleanup(); 3379 pf_osfp_flush(); 3380 pf_osfp_cleanup(); 3381 cleanup_pf_zone(); 3382 PF_UNLOCK(); 3383 destroy_dev(pf_dev); 3384 destroy_pf_mutex(); 3385 return error; 3386} 3387 3388static int 3389pf_modevent(module_t mod, int type, void *data) 3390{ 3391 int error = 0; 3392 3393 switch(type) { 3394 case MOD_LOAD: 3395 error = pf_load(); 3396 break; 3397 3398 case MOD_UNLOAD: 3399 error = pf_unload(); 3400 break; 3401 default: 3402 error = EINVAL; 3403 break; 3404 } 3405 return error; 3406} 3407 3408static moduledata_t pf_mod = { 3409 "pf", 3410 pf_modevent, 3411 0 3412}; 3413 3414DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST); 3415MODULE_VERSION(pf, PF_MODVER); 3416#endif /* __FreeBSD__ */ 3417