pf_ioctl.c revision 158486
1/* $FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 158486 2006-05-12 16:15:34Z mlaier $ */ 2/* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */ 3 4/* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39#ifdef __FreeBSD__ 40#include "opt_inet.h" 41#include "opt_inet6.h" 42#endif 43 44#ifdef __FreeBSD__ 45#include "opt_bpf.h" 46#include "opt_pf.h" 47 48#ifdef DEV_BPF 49#define NBPFILTER DEV_BPF 50#else 51#define NBPFILTER 0 52#endif 53 54#ifdef DEV_PFLOG 55#define NPFLOG DEV_PFLOG 56#else 57#define NPFLOG 0 58#endif 59 60#ifdef DEV_PFSYNC 61#define NPFSYNC DEV_PFSYNC 62#else 63#define NPFSYNC 0 64#endif 65 66#else 67#include "bpfilter.h" 68#include "pflog.h" 69#include "pfsync.h" 70#endif 71 72#include <sys/param.h> 73#include <sys/systm.h> 74#include <sys/mbuf.h> 75#include <sys/filio.h> 76#include <sys/fcntl.h> 77#include <sys/socket.h> 78#include <sys/socketvar.h> 79#include <sys/kernel.h> 80#include <sys/time.h> 81#include <sys/malloc.h> 82#ifdef __FreeBSD__ 83#include <sys/module.h> 84#include <sys/conf.h> 85#include <sys/proc.h> 86#else 87#include <sys/timeout.h> 88#include <sys/pool.h> 89#endif 90 91#include <net/if.h> 92#include <net/if_types.h> 93#include <net/route.h> 94 95#include <netinet/in.h> 96#include <netinet/in_var.h> 97#include <netinet/in_systm.h> 98#include <netinet/ip.h> 99#include <netinet/ip_var.h> 100#include <netinet/ip_icmp.h> 101 102#ifndef __FreeBSD__ 103#include <dev/rndvar.h> 104#endif 105#include <net/pfvar.h> 106 107#if NPFSYNC > 0 108#include <net/if_pfsync.h> 109#endif /* NPFSYNC > 0 */ 110 111#ifdef __FreeBSD__ 112#include <net/if_pflog.h> 113#endif 114 115#ifdef INET6 116#include <netinet/ip6.h> 117#include <netinet/in_pcb.h> 118#endif /* INET6 */ 119 120#ifdef ALTQ 121#include <altq/altq.h> 122#endif 123 124#ifdef __FreeBSD__ 125#include <sys/limits.h> 126#include <sys/lock.h> 127#include <sys/mutex.h> 128#include <net/pfil.h> 129#endif /* __FreeBSD__ */ 130 131#ifdef __FreeBSD__ 132void init_zone_var(void); 133void cleanup_pf_zone(void); 134int pfattach(void); 135#else 136void pfattach(int); 137int pfopen(dev_t, int, int, struct proc *); 138int pfclose(dev_t, int, int, struct proc *); 139#endif 140struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 141 u_int8_t, u_int8_t, u_int8_t); 142int pf_get_ruleset_number(u_int8_t); 143void pf_init_ruleset(struct pf_ruleset *); 144int pf_anchor_setup(struct pf_rule *, 145 const struct pf_ruleset *, const char *); 146int pf_anchor_copyout(const struct pf_ruleset *, 147 const struct pf_rule *, struct pfioc_rule *); 148void pf_anchor_remove(struct pf_rule *); 149 150void pf_mv_pool(struct pf_palist *, struct pf_palist *); 151void pf_empty_pool(struct pf_palist *); 152#ifdef __FreeBSD__ 153int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 154#else 155int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *); 156#endif 157#ifdef ALTQ 158int pf_begin_altq(u_int32_t *); 159int pf_rollback_altq(u_int32_t); 160int pf_commit_altq(u_int32_t); 161int pf_enable_altq(struct pf_altq *); 162int pf_disable_altq(struct pf_altq *); 163#endif /* ALTQ */ 164int pf_begin_rules(u_int32_t *, int, const char *); 165int pf_rollback_rules(u_int32_t, int, char *); 166int pf_commit_rules(u_int32_t, int, char *); 167 168#ifdef __FreeBSD__ 169extern struct callout pf_expire_to; 170#else 171extern struct timeout pf_expire_to; 172#endif 173 174struct pf_rule pf_default_rule; 175#ifdef ALTQ 176static int pf_altq_running; 177#endif 178 179#define TAGID_MAX 50000 180TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 181 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 182 183#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 184#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 185#endif 186static u_int16_t tagname2tag(struct pf_tags *, char *); 187static void tag2tagname(struct pf_tags *, u_int16_t, char *); 188static void tag_unref(struct pf_tags *, u_int16_t); 189int pf_rtlabel_add(struct pf_addr_wrap *); 190void pf_rtlabel_remove(struct pf_addr_wrap *); 191void pf_rtlabel_copyout(struct pf_addr_wrap *); 192 193#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 194 195 196#ifdef __FreeBSD__ 197static struct cdev *pf_dev; 198 199/* 200 * XXX - These are new and need to be checked when moveing to a new version 201 */ 202static void pf_clear_states(void); 203static int pf_clear_tables(void); 204static void pf_clear_srcnodes(void); 205/* 206 * XXX - These are new and need to be checked when moveing to a new version 207 */ 208 209/* 210 * Wrapper functions for pfil(9) hooks 211 */ 212static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 213 int dir, struct inpcb *inp); 214static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 215 int dir, struct inpcb *inp); 216#ifdef INET6 217static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 218 int dir, struct inpcb *inp); 219static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 220 int dir, struct inpcb *inp); 221#endif 222 223static int hook_pf(void); 224static int dehook_pf(void); 225static int shutdown_pf(void); 226static int pf_load(void); 227static int pf_unload(void); 228 229static struct cdevsw pf_cdevsw = { 230 .d_ioctl = pfioctl, 231 .d_name = PF_NAME, 232 .d_version = D_VERSION, 233}; 234 235static volatile int pf_pfil_hooked = 0; 236struct mtx pf_task_mtx; 237pflog_packet_t *pflog_packet_ptr = NULL; 238 239void 240init_pf_mutex(void) 241{ 242 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF); 243} 244 245void 246destroy_pf_mutex(void) 247{ 248 mtx_destroy(&pf_task_mtx); 249} 250 251void 252init_zone_var(void) 253{ 254 pf_src_tree_pl = pf_rule_pl = NULL; 255 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 256 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 257 pf_state_scrub_pl = NULL; 258 pfr_ktable_pl = pfr_kentry_pl = NULL; 259} 260 261void 262cleanup_pf_zone(void) 263{ 264 UMA_DESTROY(pf_src_tree_pl); 265 UMA_DESTROY(pf_rule_pl); 266 UMA_DESTROY(pf_state_pl); 267 UMA_DESTROY(pf_altq_pl); 268 UMA_DESTROY(pf_pooladdr_pl); 269 UMA_DESTROY(pf_frent_pl); 270 UMA_DESTROY(pf_frag_pl); 271 UMA_DESTROY(pf_cache_pl); 272 UMA_DESTROY(pf_cent_pl); 273 UMA_DESTROY(pfr_ktable_pl); 274 UMA_DESTROY(pfr_kentry_pl); 275 UMA_DESTROY(pf_state_scrub_pl); 276 UMA_DESTROY(pfi_addr_pl); 277} 278 279int 280pfattach(void) 281{ 282 u_int32_t *my_timeout = pf_default_rule.timeout; 283 int error = 1; 284 285 do { 286 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 287 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 288 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 289 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 290 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl"); 291 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 292 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 293 UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2"); 294 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 295 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 296 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 297 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 298 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 299 "pfstatescrub"); 300 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 301 error = 0; 302 } while(0); 303 if (error) { 304 cleanup_pf_zone(); 305 return (error); 306 } 307 pfr_initialize(); 308 pfi_initialize(); 309 if ( (error = pf_osfp_initialize()) ) { 310 cleanup_pf_zone(); 311 pf_osfp_cleanup(); 312 return (error); 313 } 314 315 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 316 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 317 pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl; 318 pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 319 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 320 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 321 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 322 pf_pool_limits[PF_LIMIT_STATES].limit); 323 324 RB_INIT(&tree_src_tracking); 325 RB_INIT(&pf_anchors); 326 pf_init_ruleset(&pf_main_ruleset); 327 TAILQ_INIT(&pf_altqs[0]); 328 TAILQ_INIT(&pf_altqs[1]); 329 TAILQ_INIT(&pf_pabuf); 330 pf_altqs_active = &pf_altqs[0]; 331 pf_altqs_inactive = &pf_altqs[1]; 332 TAILQ_INIT(&state_updates); 333 334 /* default rule should never be garbage collected */ 335 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 336 pf_default_rule.action = PF_PASS; 337 pf_default_rule.nr = -1; 338 339 /* initialize default timeouts */ 340 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 341 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 342 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 343 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 344 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 345 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 346 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 347 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 348 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 349 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 350 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 351 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 352 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 353 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 354 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 355 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 356 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 357 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 358 359 callout_init(&pf_expire_to, NET_CALLOUT_MPSAFE); 360 callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz, 361 pf_purge_timeout, &pf_expire_to); 362 363 pf_normalize_init(); 364 bzero(&pf_status, sizeof(pf_status)); 365 pf_pfil_hooked = 0; 366 367 /* XXX do our best to avoid a conflict */ 368 pf_status.hostid = arc4random(); 369 370 return (error); 371} 372#else /* !__FreeBSD__ */ 373void 374pfattach(int num) 375{ 376 u_int32_t *timeout = pf_default_rule.timeout; 377 378 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 379 &pool_allocator_nointr); 380 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 381 "pfsrctrpl", NULL); 382 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 383 NULL); 384 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 385 &pool_allocator_nointr); 386 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 387 "pfpooladdrpl", &pool_allocator_nointr); 388 pfr_initialize(); 389 pfi_initialize(); 390 pf_osfp_initialize(); 391 392 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 393 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 394 395 RB_INIT(&tree_src_tracking); 396 RB_INIT(&pf_anchors); 397 pf_init_ruleset(&pf_main_ruleset); 398 TAILQ_INIT(&pf_altqs[0]); 399 TAILQ_INIT(&pf_altqs[1]); 400 TAILQ_INIT(&pf_pabuf); 401 pf_altqs_active = &pf_altqs[0]; 402 pf_altqs_inactive = &pf_altqs[1]; 403 TAILQ_INIT(&state_updates); 404 405 /* default rule should never be garbage collected */ 406 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 407 pf_default_rule.action = PF_PASS; 408 pf_default_rule.nr = -1; 409 410 /* initialize default timeouts */ 411 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 412 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 413 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 414 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 415 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 416 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 417 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 418 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 419 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 420 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 421 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 422 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 423 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 424 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 425 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 426 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 427 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 428 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 429 430 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to); 431 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz); 432 433 pf_normalize_init(); 434 bzero(&pf_status, sizeof(pf_status)); 435 pf_status.debug = PF_DEBUG_URGENT; 436 437 /* XXX do our best to avoid a conflict */ 438 pf_status.hostid = arc4random(); 439} 440 441int 442pfopen(struct cdev *dev, int flags, int fmt, struct proc *p) 443{ 444 if (minor(dev) >= 1) 445 return (ENXIO); 446 return (0); 447} 448 449int 450pfclose(struct cdev *dev, int flags, int fmt, struct proc *p) 451{ 452 if (minor(dev) >= 1) 453 return (ENXIO); 454 return (0); 455} 456#endif /* __FreeBSD__ */ 457 458struct pf_pool * 459pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 460 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 461 u_int8_t check_ticket) 462{ 463 struct pf_ruleset *ruleset; 464 struct pf_rule *rule; 465 int rs_num; 466 467 ruleset = pf_find_ruleset(anchor); 468 if (ruleset == NULL) 469 return (NULL); 470 rs_num = pf_get_ruleset_number(rule_action); 471 if (rs_num >= PF_RULESET_MAX) 472 return (NULL); 473 if (active) { 474 if (check_ticket && ticket != 475 ruleset->rules[rs_num].active.ticket) 476 return (NULL); 477 if (r_last) 478 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 479 pf_rulequeue); 480 else 481 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 482 } else { 483 if (check_ticket && ticket != 484 ruleset->rules[rs_num].inactive.ticket) 485 return (NULL); 486 if (r_last) 487 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 488 pf_rulequeue); 489 else 490 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 491 } 492 if (!r_last) { 493 while ((rule != NULL) && (rule->nr != rule_number)) 494 rule = TAILQ_NEXT(rule, entries); 495 } 496 if (rule == NULL) 497 return (NULL); 498 499 return (&rule->rpool); 500} 501 502int 503pf_get_ruleset_number(u_int8_t action) 504{ 505 switch (action) { 506 case PF_SCRUB: 507 case PF_NOSCRUB: 508 return (PF_RULESET_SCRUB); 509 break; 510 case PF_PASS: 511 case PF_DROP: 512 return (PF_RULESET_FILTER); 513 break; 514 case PF_NAT: 515 case PF_NONAT: 516 return (PF_RULESET_NAT); 517 break; 518 case PF_BINAT: 519 case PF_NOBINAT: 520 return (PF_RULESET_BINAT); 521 break; 522 case PF_RDR: 523 case PF_NORDR: 524 return (PF_RULESET_RDR); 525 break; 526 default: 527 return (PF_RULESET_MAX); 528 break; 529 } 530} 531 532void 533pf_init_ruleset(struct pf_ruleset *ruleset) 534{ 535 int i; 536 537 memset(ruleset, 0, sizeof(struct pf_ruleset)); 538 for (i = 0; i < PF_RULESET_MAX; i++) { 539 TAILQ_INIT(&ruleset->rules[i].queues[0]); 540 TAILQ_INIT(&ruleset->rules[i].queues[1]); 541 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0]; 542 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1]; 543 } 544} 545 546struct pf_anchor * 547pf_find_anchor(const char *path) 548{ 549 static struct pf_anchor key; 550 551 memset(&key, 0, sizeof(key)); 552 strlcpy(key.path, path, sizeof(key.path)); 553 return (RB_FIND(pf_anchor_global, &pf_anchors, &key)); 554} 555 556struct pf_ruleset * 557pf_find_ruleset(const char *path) 558{ 559 struct pf_anchor *anchor; 560 561 while (*path == '/') 562 path++; 563 if (!*path) 564 return (&pf_main_ruleset); 565 anchor = pf_find_anchor(path); 566 if (anchor == NULL) 567 return (NULL); 568 else 569 return (&anchor->ruleset); 570} 571 572struct pf_ruleset * 573pf_find_or_create_ruleset(const char *path) 574{ 575 static char p[MAXPATHLEN]; 576 char *q = NULL, *r; /* make the compiler happy */ 577 struct pf_ruleset *ruleset; 578 struct pf_anchor *anchor = NULL, *dup, *parent = NULL; 579 580 while (*path == '/') 581 path++; 582 ruleset = pf_find_ruleset(path); 583 if (ruleset != NULL) 584 return (ruleset); 585 strlcpy(p, path, sizeof(p)); 586#ifdef __FreeBSD__ 587 while (parent == NULL && (q = rindex(p, '/')) != NULL) { 588#else 589 while (parent == NULL && (q = strrchr(p, '/')) != NULL) { 590#endif 591 *q = 0; 592 if ((ruleset = pf_find_ruleset(p)) != NULL) { 593 parent = ruleset->anchor; 594 break; 595 } 596 } 597 if (q == NULL) 598 q = p; 599 else 600 q++; 601 strlcpy(p, path, sizeof(p)); 602 if (!*q) 603 return (NULL); 604#ifdef __FreeBSD__ 605 while ((r = index(q, '/')) != NULL || *q) { 606#else 607 while ((r = strchr(q, '/')) != NULL || *q) { 608#endif 609 if (r != NULL) 610 *r = 0; 611 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE || 612 (parent != NULL && strlen(parent->path) >= 613 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1)) 614 return (NULL); 615 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP, 616 M_NOWAIT); 617 if (anchor == NULL) 618 return (NULL); 619 memset(anchor, 0, sizeof(*anchor)); 620 RB_INIT(&anchor->children); 621 strlcpy(anchor->name, q, sizeof(anchor->name)); 622 if (parent != NULL) { 623 strlcpy(anchor->path, parent->path, 624 sizeof(anchor->path)); 625 strlcat(anchor->path, "/", sizeof(anchor->path)); 626 } 627 strlcat(anchor->path, anchor->name, sizeof(anchor->path)); 628 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) != 629 NULL) { 630 printf("pf_find_or_create_ruleset: RB_INSERT1 " 631 "'%s' '%s' collides with '%s' '%s'\n", 632 anchor->path, anchor->name, dup->path, dup->name); 633 free(anchor, M_TEMP); 634 return (NULL); 635 } 636 if (parent != NULL) { 637 anchor->parent = parent; 638 if ((dup = RB_INSERT(pf_anchor_node, &parent->children, 639 anchor)) != NULL) { 640 printf("pf_find_or_create_ruleset: " 641 "RB_INSERT2 '%s' '%s' collides with " 642 "'%s' '%s'\n", anchor->path, anchor->name, 643 dup->path, dup->name); 644 RB_REMOVE(pf_anchor_global, &pf_anchors, 645 anchor); 646 free(anchor, M_TEMP); 647 return (NULL); 648 } 649 } 650 pf_init_ruleset(&anchor->ruleset); 651 anchor->ruleset.anchor = anchor; 652 parent = anchor; 653 if (r != NULL) 654 q = r + 1; 655 else 656 *q = 0; 657 } 658 return (&anchor->ruleset); 659} 660 661void 662pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset) 663{ 664 struct pf_anchor *parent; 665 int i; 666 667 while (ruleset != NULL) { 668 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL || 669 !RB_EMPTY(&ruleset->anchor->children) || 670 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 || 671 ruleset->topen) 672 return; 673 for (i = 0; i < PF_RULESET_MAX; ++i) 674 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) || 675 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) || 676 ruleset->rules[i].inactive.open) 677 return; 678 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor); 679 if ((parent = ruleset->anchor->parent) != NULL) 680 RB_REMOVE(pf_anchor_node, &parent->children, 681 ruleset->anchor); 682 free(ruleset->anchor, M_TEMP); 683 if (parent == NULL) 684 return; 685 ruleset = &parent->ruleset; 686 } 687} 688 689int 690pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s, 691 const char *name) 692{ 693 static char *p, path[MAXPATHLEN]; 694 struct pf_ruleset *ruleset; 695 696 r->anchor = NULL; 697 r->anchor_relative = 0; 698 r->anchor_wildcard = 0; 699 if (!name[0]) 700 return (0); 701 if (name[0] == '/') 702 strlcpy(path, name + 1, sizeof(path)); 703 else { 704 /* relative path */ 705 r->anchor_relative = 1; 706 if (s->anchor == NULL || !s->anchor->path[0]) 707 path[0] = 0; 708 else 709 strlcpy(path, s->anchor->path, sizeof(path)); 710 while (name[0] == '.' && name[1] == '.' && name[2] == '/') { 711 if (!path[0]) { 712 printf("pf_anchor_setup: .. beyond root\n"); 713 return (1); 714 } 715#ifdef __FreeBSD__ 716 if ((p = rindex(path, '/')) != NULL) 717#else 718 if ((p = strrchr(path, '/')) != NULL) 719#endif 720 *p = 0; 721 else 722 path[0] = 0; 723 r->anchor_relative++; 724 name += 3; 725 } 726 if (path[0]) 727 strlcat(path, "/", sizeof(path)); 728 strlcat(path, name, sizeof(path)); 729 } 730#ifdef __FreeBSD__ 731 if ((p = rindex(path, '/')) != NULL && !strcmp(p, "/*")) { 732#else 733 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) { 734#endif 735 r->anchor_wildcard = 1; 736 *p = 0; 737 } 738 ruleset = pf_find_or_create_ruleset(path); 739 if (ruleset == NULL || ruleset->anchor == NULL) { 740 printf("pf_anchor_setup: ruleset\n"); 741 return (1); 742 } 743 r->anchor = ruleset->anchor; 744 r->anchor->refcnt++; 745 return (0); 746} 747 748int 749pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r, 750 struct pfioc_rule *pr) 751{ 752 pr->anchor_call[0] = 0; 753 if (r->anchor == NULL) 754 return (0); 755 if (!r->anchor_relative) { 756 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call)); 757 strlcat(pr->anchor_call, r->anchor->path, 758 sizeof(pr->anchor_call)); 759 } else { 760 char a[MAXPATHLEN], b[MAXPATHLEN], *p; 761 int i; 762 763 if (rs->anchor == NULL) 764 a[0] = 0; 765 else 766 strlcpy(a, rs->anchor->path, sizeof(a)); 767 strlcpy(b, r->anchor->path, sizeof(b)); 768 for (i = 1; i < r->anchor_relative; ++i) { 769#ifdef __FreeBSD__ 770 if ((p = rindex(a, '/')) == NULL) 771#else 772 if ((p = strrchr(a, '/')) == NULL) 773#endif 774 p = a; 775 *p = 0; 776 strlcat(pr->anchor_call, "../", 777 sizeof(pr->anchor_call)); 778 } 779 if (strncmp(a, b, strlen(a))) { 780 printf("pf_anchor_copyout: '%s' '%s'\n", a, b); 781 return (1); 782 } 783 if (strlen(b) > strlen(a)) 784 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0), 785 sizeof(pr->anchor_call)); 786 } 787 if (r->anchor_wildcard) 788 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*", 789 sizeof(pr->anchor_call)); 790 return (0); 791} 792 793void 794pf_anchor_remove(struct pf_rule *r) 795{ 796 if (r->anchor == NULL) 797 return; 798 if (r->anchor->refcnt <= 0) { 799 printf("pf_anchor_remove: broken refcount"); 800 r->anchor = NULL; 801 return; 802 } 803 if (!--r->anchor->refcnt) 804 pf_remove_if_empty_ruleset(&r->anchor->ruleset); 805 r->anchor = NULL; 806} 807 808void 809pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 810{ 811 struct pf_pooladdr *mv_pool_pa; 812 813 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 814 TAILQ_REMOVE(poola, mv_pool_pa, entries); 815 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 816 } 817} 818 819void 820pf_empty_pool(struct pf_palist *poola) 821{ 822 struct pf_pooladdr *empty_pool_pa; 823 824 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 825 pfi_dynaddr_remove(&empty_pool_pa->addr); 826 pf_tbladdr_remove(&empty_pool_pa->addr); 827 pfi_detach_rule(empty_pool_pa->kif); 828 TAILQ_REMOVE(poola, empty_pool_pa, entries); 829 pool_put(&pf_pooladdr_pl, empty_pool_pa); 830 } 831} 832 833void 834pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 835{ 836 if (rulequeue != NULL) { 837 if (rule->states <= 0) { 838 /* 839 * XXX - we need to remove the table *before* detaching 840 * the rule to make sure the table code does not delete 841 * the anchor under our feet. 842 */ 843 pf_tbladdr_remove(&rule->src.addr); 844 pf_tbladdr_remove(&rule->dst.addr); 845 if (rule->overload_tbl) 846 pfr_detach_table(rule->overload_tbl); 847 } 848 TAILQ_REMOVE(rulequeue, rule, entries); 849 rule->entries.tqe_prev = NULL; 850 rule->nr = -1; 851 } 852 853 if (rule->states > 0 || rule->src_nodes > 0 || 854 rule->entries.tqe_prev != NULL) 855 return; 856 pf_tag_unref(rule->tag); 857 pf_tag_unref(rule->match_tag); 858#ifdef ALTQ 859 if (rule->pqid != rule->qid) 860 pf_qid_unref(rule->pqid); 861 pf_qid_unref(rule->qid); 862#endif 863 pf_rtlabel_remove(&rule->src.addr); 864 pf_rtlabel_remove(&rule->dst.addr); 865 pfi_dynaddr_remove(&rule->src.addr); 866 pfi_dynaddr_remove(&rule->dst.addr); 867 if (rulequeue == NULL) { 868 pf_tbladdr_remove(&rule->src.addr); 869 pf_tbladdr_remove(&rule->dst.addr); 870 if (rule->overload_tbl) 871 pfr_detach_table(rule->overload_tbl); 872 } 873 pfi_detach_rule(rule->kif); 874 pf_anchor_remove(rule); 875 pf_empty_pool(&rule->rpool.list); 876 pool_put(&pf_rule_pl, rule); 877} 878 879static u_int16_t 880tagname2tag(struct pf_tags *head, char *tagname) 881{ 882 struct pf_tagname *tag, *p = NULL; 883 u_int16_t new_tagid = 1; 884 885 TAILQ_FOREACH(tag, head, entries) 886 if (strcmp(tagname, tag->name) == 0) { 887 tag->ref++; 888 return (tag->tag); 889 } 890 891 /* 892 * to avoid fragmentation, we do a linear search from the beginning 893 * and take the first free slot we find. if there is none or the list 894 * is empty, append a new entry at the end. 895 */ 896 897 /* new entry */ 898 if (!TAILQ_EMPTY(head)) 899 for (p = TAILQ_FIRST(head); p != NULL && 900 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 901 new_tagid = p->tag + 1; 902 903 if (new_tagid > TAGID_MAX) 904 return (0); 905 906 /* allocate and fill new struct pf_tagname */ 907 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 908 M_TEMP, M_NOWAIT); 909 if (tag == NULL) 910 return (0); 911 bzero(tag, sizeof(struct pf_tagname)); 912 strlcpy(tag->name, tagname, sizeof(tag->name)); 913 tag->tag = new_tagid; 914 tag->ref++; 915 916 if (p != NULL) /* insert new entry before p */ 917 TAILQ_INSERT_BEFORE(p, tag, entries); 918 else /* either list empty or no free slot in between */ 919 TAILQ_INSERT_TAIL(head, tag, entries); 920 921 return (tag->tag); 922} 923 924static void 925tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 926{ 927 struct pf_tagname *tag; 928 929 TAILQ_FOREACH(tag, head, entries) 930 if (tag->tag == tagid) { 931 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 932 return; 933 } 934} 935 936static void 937tag_unref(struct pf_tags *head, u_int16_t tag) 938{ 939 struct pf_tagname *p, *next; 940 941 if (tag == 0) 942 return; 943 944 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 945 next = TAILQ_NEXT(p, entries); 946 if (tag == p->tag) { 947 if (--p->ref == 0) { 948 TAILQ_REMOVE(head, p, entries); 949 free(p, M_TEMP); 950 } 951 break; 952 } 953 } 954} 955 956u_int16_t 957pf_tagname2tag(char *tagname) 958{ 959 return (tagname2tag(&pf_tags, tagname)); 960} 961 962void 963pf_tag2tagname(u_int16_t tagid, char *p) 964{ 965 return (tag2tagname(&pf_tags, tagid, p)); 966} 967 968void 969pf_tag_ref(u_int16_t tag) 970{ 971 struct pf_tagname *t; 972 973 TAILQ_FOREACH(t, &pf_tags, entries) 974 if (t->tag == tag) 975 break; 976 if (t != NULL) 977 t->ref++; 978} 979 980void 981pf_tag_unref(u_int16_t tag) 982{ 983 return (tag_unref(&pf_tags, tag)); 984} 985 986int 987pf_rtlabel_add(struct pf_addr_wrap *a) 988{ 989#ifdef __FreeBSD__ 990 /* XXX_IMPORT: later */ 991 return (0); 992#else 993 if (a->type == PF_ADDR_RTLABEL && 994 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 995 return (-1); 996 return (0); 997#endif 998} 999 1000void 1001pf_rtlabel_remove(struct pf_addr_wrap *a) 1002{ 1003#ifdef __FreeBSD__ 1004 /* XXX_IMPORT: later */ 1005#else 1006 if (a->type == PF_ADDR_RTLABEL) 1007 rtlabel_unref(a->v.rtlabel); 1008#endif 1009} 1010 1011void 1012pf_rtlabel_copyout(struct pf_addr_wrap *a) 1013{ 1014#ifdef __FreeBSD__ 1015 /* XXX_IMPORT: later */ 1016 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 1017 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 1018#else 1019 const char *name; 1020 1021 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 1022 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 1023 strlcpy(a->v.rtlabelname, "?", 1024 sizeof(a->v.rtlabelname)); 1025 else 1026 strlcpy(a->v.rtlabelname, name, 1027 sizeof(a->v.rtlabelname)); 1028 } 1029#endif 1030} 1031 1032#ifdef ALTQ 1033u_int32_t 1034pf_qname2qid(char *qname) 1035{ 1036 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 1037} 1038 1039void 1040pf_qid2qname(u_int32_t qid, char *p) 1041{ 1042 return (tag2tagname(&pf_qids, (u_int16_t)qid, p)); 1043} 1044 1045void 1046pf_qid_unref(u_int32_t qid) 1047{ 1048 return (tag_unref(&pf_qids, (u_int16_t)qid)); 1049} 1050 1051int 1052pf_begin_altq(u_int32_t *ticket) 1053{ 1054 struct pf_altq *altq; 1055 int error = 0; 1056 1057 /* Purge the old altq list */ 1058 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1059 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1060 if (altq->qname[0] == 0) { 1061 /* detach and destroy the discipline */ 1062 error = altq_remove(altq); 1063 } else 1064 pf_qid_unref(altq->qid); 1065 pool_put(&pf_altq_pl, altq); 1066 } 1067 if (error) 1068 return (error); 1069 *ticket = ++ticket_altqs_inactive; 1070 altqs_inactive_open = 1; 1071 return (0); 1072} 1073 1074int 1075pf_rollback_altq(u_int32_t ticket) 1076{ 1077 struct pf_altq *altq; 1078 int error = 0; 1079 1080 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 1081 return (0); 1082 /* Purge the old altq list */ 1083 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1084 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1085 if (altq->qname[0] == 0) { 1086 /* detach and destroy the discipline */ 1087 error = altq_remove(altq); 1088 } else 1089 pf_qid_unref(altq->qid); 1090 pool_put(&pf_altq_pl, altq); 1091 } 1092 altqs_inactive_open = 0; 1093 return (error); 1094} 1095 1096int 1097pf_commit_altq(u_int32_t ticket) 1098{ 1099 struct pf_altqqueue *old_altqs; 1100 struct pf_altq *altq; 1101 int s, err, error = 0; 1102 1103 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 1104 return (EBUSY); 1105 1106 /* swap altqs, keep the old. */ 1107 s = splsoftnet(); 1108 old_altqs = pf_altqs_active; 1109 pf_altqs_active = pf_altqs_inactive; 1110 pf_altqs_inactive = old_altqs; 1111 ticket_altqs_active = ticket_altqs_inactive; 1112 1113 /* Attach new disciplines */ 1114 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 1115 if (altq->qname[0] == 0) { 1116 /* attach the discipline */ 1117 error = altq_pfattach(altq); 1118 if (error == 0 && pf_altq_running) 1119 error = pf_enable_altq(altq); 1120 if (error != 0) { 1121 splx(s); 1122 return (error); 1123 } 1124 } 1125 } 1126 1127 /* Purge the old altq list */ 1128 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1129 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1130 if (altq->qname[0] == 0) { 1131 /* detach and destroy the discipline */ 1132 if (pf_altq_running) 1133 error = pf_disable_altq(altq); 1134 err = altq_pfdetach(altq); 1135 if (err != 0 && error == 0) 1136 error = err; 1137 err = altq_remove(altq); 1138 if (err != 0 && error == 0) 1139 error = err; 1140 } else 1141 pf_qid_unref(altq->qid); 1142 pool_put(&pf_altq_pl, altq); 1143 } 1144 splx(s); 1145 1146 altqs_inactive_open = 0; 1147 return (error); 1148} 1149 1150int 1151pf_enable_altq(struct pf_altq *altq) 1152{ 1153 struct ifnet *ifp; 1154 struct tb_profile tb; 1155 int s, error = 0; 1156 1157 if ((ifp = ifunit(altq->ifname)) == NULL) 1158 return (EINVAL); 1159 1160 if (ifp->if_snd.altq_type != ALTQT_NONE) 1161 error = altq_enable(&ifp->if_snd); 1162 1163 /* set tokenbucket regulator */ 1164 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1165 tb.rate = altq->ifbandwidth; 1166 tb.depth = altq->tbrsize; 1167 s = splimp(); 1168#ifdef __FreeBSD__ 1169 PF_UNLOCK(); 1170#endif 1171 error = tbr_set(&ifp->if_snd, &tb); 1172#ifdef __FreeBSD__ 1173 PF_LOCK(); 1174#endif 1175 splx(s); 1176 } 1177 1178 return (error); 1179} 1180 1181int 1182pf_disable_altq(struct pf_altq *altq) 1183{ 1184 struct ifnet *ifp; 1185 struct tb_profile tb; 1186 int s, error; 1187 1188 if ((ifp = ifunit(altq->ifname)) == NULL) 1189 return (EINVAL); 1190 1191 /* 1192 * when the discipline is no longer referenced, it was overridden 1193 * by a new one. if so, just return. 1194 */ 1195 if (altq->altq_disc != ifp->if_snd.altq_disc) 1196 return (0); 1197 1198 error = altq_disable(&ifp->if_snd); 1199 1200 if (error == 0) { 1201 /* clear tokenbucket regulator */ 1202 tb.rate = 0; 1203 s = splimp(); 1204#ifdef __FreeBSD__ 1205 PF_UNLOCK(); 1206#endif 1207 error = tbr_set(&ifp->if_snd, &tb); 1208#ifdef __FreeBSD__ 1209 PF_LOCK(); 1210#endif 1211 splx(s); 1212 } 1213 1214 return (error); 1215} 1216#endif /* ALTQ */ 1217 1218int 1219pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1220{ 1221 struct pf_ruleset *rs; 1222 struct pf_rule *rule; 1223 1224 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1225 return (EINVAL); 1226 rs = pf_find_or_create_ruleset(anchor); 1227 if (rs == NULL) 1228 return (EINVAL); 1229 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 1230 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1231 *ticket = ++rs->rules[rs_num].inactive.ticket; 1232 rs->rules[rs_num].inactive.open = 1; 1233 return (0); 1234} 1235 1236int 1237pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1238{ 1239 struct pf_ruleset *rs; 1240 struct pf_rule *rule; 1241 1242 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1243 return (EINVAL); 1244 rs = pf_find_ruleset(anchor); 1245 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1246 rs->rules[rs_num].inactive.ticket != ticket) 1247 return (0); 1248 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 1249 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1250 rs->rules[rs_num].inactive.open = 0; 1251 return (0); 1252} 1253 1254int 1255pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1256{ 1257 struct pf_ruleset *rs; 1258 struct pf_rule *rule; 1259 struct pf_rulequeue *old_rules; 1260 int s; 1261 1262 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1263 return (EINVAL); 1264 rs = pf_find_ruleset(anchor); 1265 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1266 ticket != rs->rules[rs_num].inactive.ticket) 1267 return (EBUSY); 1268 1269 /* Swap rules, keep the old. */ 1270 s = splsoftnet(); 1271 old_rules = rs->rules[rs_num].active.ptr; 1272 rs->rules[rs_num].active.ptr = 1273 rs->rules[rs_num].inactive.ptr; 1274 rs->rules[rs_num].inactive.ptr = old_rules; 1275 rs->rules[rs_num].active.ticket = 1276 rs->rules[rs_num].inactive.ticket; 1277 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1278 1279 /* Purge the old rule list. */ 1280 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1281 pf_rm_rule(old_rules, rule); 1282 rs->rules[rs_num].inactive.open = 0; 1283 pf_remove_if_empty_ruleset(rs); 1284 splx(s); 1285 return (0); 1286} 1287 1288#ifdef __FreeBSD__ 1289int 1290pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1291#else 1292int 1293pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1294#endif 1295{ 1296 struct pf_pooladdr *pa = NULL; 1297 struct pf_pool *pool = NULL; 1298#ifndef __FreeBSD__ 1299 int s; 1300#endif 1301 int error = 0; 1302 1303 /* XXX keep in sync with switch() below */ 1304#ifdef __FreeBSD__ 1305 if (securelevel_gt(td->td_ucred, 2)) 1306#else 1307 if (securelevel > 1) 1308#endif 1309 switch (cmd) { 1310 case DIOCGETRULES: 1311 case DIOCGETRULE: 1312 case DIOCGETADDRS: 1313 case DIOCGETADDR: 1314 case DIOCGETSTATE: 1315 case DIOCSETSTATUSIF: 1316 case DIOCGETSTATUS: 1317 case DIOCCLRSTATUS: 1318 case DIOCNATLOOK: 1319 case DIOCSETDEBUG: 1320 case DIOCGETSTATES: 1321 case DIOCGETTIMEOUT: 1322 case DIOCCLRRULECTRS: 1323 case DIOCGETLIMIT: 1324 case DIOCGETALTQS: 1325 case DIOCGETALTQ: 1326 case DIOCGETQSTATS: 1327 case DIOCGETRULESETS: 1328 case DIOCGETRULESET: 1329 case DIOCRGETTABLES: 1330 case DIOCRGETTSTATS: 1331 case DIOCRCLRTSTATS: 1332 case DIOCRCLRADDRS: 1333 case DIOCRADDADDRS: 1334 case DIOCRDELADDRS: 1335 case DIOCRSETADDRS: 1336 case DIOCRGETADDRS: 1337 case DIOCRGETASTATS: 1338 case DIOCRCLRASTATS: 1339 case DIOCRTSTADDRS: 1340 case DIOCOSFPGET: 1341 case DIOCGETSRCNODES: 1342 case DIOCCLRSRCNODES: 1343 case DIOCIGETIFACES: 1344 case DIOCICLRISTATS: 1345#ifdef __FreeBSD__ 1346 case DIOCGIFSPEED: 1347#endif 1348 case DIOCSETIFFLAG: 1349 case DIOCCLRIFFLAG: 1350 break; 1351 case DIOCRCLRTABLES: 1352 case DIOCRADDTABLES: 1353 case DIOCRDELTABLES: 1354 case DIOCRSETTFLAGS: 1355 if (((struct pfioc_table *)addr)->pfrio_flags & 1356 PFR_FLAG_DUMMY) 1357 break; /* dummy operation ok */ 1358 return (EPERM); 1359 default: 1360 return (EPERM); 1361 } 1362 1363 if (!(flags & FWRITE)) 1364 switch (cmd) { 1365 case DIOCGETRULES: 1366 case DIOCGETRULE: 1367 case DIOCGETADDRS: 1368 case DIOCGETADDR: 1369 case DIOCGETSTATE: 1370 case DIOCGETSTATUS: 1371 case DIOCGETSTATES: 1372 case DIOCGETTIMEOUT: 1373 case DIOCGETLIMIT: 1374 case DIOCGETALTQS: 1375 case DIOCGETALTQ: 1376 case DIOCGETQSTATS: 1377 case DIOCGETRULESETS: 1378 case DIOCGETRULESET: 1379 case DIOCRGETTABLES: 1380 case DIOCRGETTSTATS: 1381 case DIOCRGETADDRS: 1382 case DIOCRGETASTATS: 1383 case DIOCRTSTADDRS: 1384 case DIOCOSFPGET: 1385 case DIOCGETSRCNODES: 1386 case DIOCIGETIFACES: 1387#ifdef __FreeBSD__ 1388 case DIOCGIFSPEED: 1389#endif 1390 break; 1391 case DIOCRCLRTABLES: 1392 case DIOCRADDTABLES: 1393 case DIOCRDELTABLES: 1394 case DIOCRCLRTSTATS: 1395 case DIOCRCLRADDRS: 1396 case DIOCRADDADDRS: 1397 case DIOCRDELADDRS: 1398 case DIOCRSETADDRS: 1399 case DIOCRSETTFLAGS: 1400 if (((struct pfioc_table *)addr)->pfrio_flags & 1401 PFR_FLAG_DUMMY) 1402 break; /* dummy operation ok */ 1403 return (EACCES); 1404 default: 1405 return (EACCES); 1406 } 1407 1408#ifdef __FreeBSD__ 1409 PF_LOCK(); 1410#else 1411 s = splsoftnet(); 1412#endif 1413 switch (cmd) { 1414 1415 case DIOCSTART: 1416 if (pf_status.running) 1417 error = EEXIST; 1418 else { 1419#ifdef __FreeBSD__ 1420 PF_UNLOCK(); 1421 error = hook_pf(); 1422 PF_LOCK(); 1423 if (error) { 1424 DPFPRINTF(PF_DEBUG_MISC, 1425 ("pf: pfil registeration fail\n")); 1426 break; 1427 } 1428#endif 1429 pf_status.running = 1; 1430 pf_status.since = time_second; 1431 if (pf_status.stateid == 0) { 1432 pf_status.stateid = time_second; 1433 pf_status.stateid = pf_status.stateid << 32; 1434 } 1435 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1436 } 1437 break; 1438 1439 case DIOCSTOP: 1440 if (!pf_status.running) 1441 error = ENOENT; 1442 else { 1443 pf_status.running = 0; 1444#ifdef __FreeBSD__ 1445 PF_UNLOCK(); 1446 error = dehook_pf(); 1447 PF_LOCK(); 1448 if (error) { 1449 pf_status.running = 1; 1450 DPFPRINTF(PF_DEBUG_MISC, 1451 ("pf: pfil unregisteration failed\n")); 1452 } 1453#endif 1454 pf_status.since = time_second; 1455 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1456 } 1457 break; 1458 1459 case DIOCADDRULE: { 1460 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1461 struct pf_ruleset *ruleset; 1462 struct pf_rule *rule, *tail; 1463 struct pf_pooladdr *pa; 1464 int rs_num; 1465 1466 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1467 ruleset = pf_find_ruleset(pr->anchor); 1468 if (ruleset == NULL) { 1469 error = EINVAL; 1470 break; 1471 } 1472 rs_num = pf_get_ruleset_number(pr->rule.action); 1473 if (rs_num >= PF_RULESET_MAX) { 1474 error = EINVAL; 1475 break; 1476 } 1477 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1478 error = EINVAL; 1479 break; 1480 } 1481 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1482#ifdef __FreeBSD__ 1483 DPFPRINTF(PF_DEBUG_MISC, 1484 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num, 1485 ruleset->rules[rs_num].inactive.ticket)); 1486#endif 1487 error = EBUSY; 1488 break; 1489 } 1490 if (pr->pool_ticket != ticket_pabuf) { 1491#ifdef __FreeBSD__ 1492 DPFPRINTF(PF_DEBUG_MISC, 1493 ("pool_ticket: %d != %d\n", pr->pool_ticket, 1494 ticket_pabuf)); 1495#endif 1496 error = EBUSY; 1497 break; 1498 } 1499 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1500 if (rule == NULL) { 1501 error = ENOMEM; 1502 break; 1503 } 1504 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1505 rule->anchor = NULL; 1506 rule->kif = NULL; 1507 TAILQ_INIT(&rule->rpool.list); 1508 /* initialize refcounting */ 1509 rule->states = 0; 1510 rule->src_nodes = 0; 1511 rule->entries.tqe_prev = NULL; 1512#ifndef INET 1513 if (rule->af == AF_INET) { 1514 pool_put(&pf_rule_pl, rule); 1515 error = EAFNOSUPPORT; 1516 break; 1517 } 1518#endif /* INET */ 1519#ifndef INET6 1520 if (rule->af == AF_INET6) { 1521 pool_put(&pf_rule_pl, rule); 1522 error = EAFNOSUPPORT; 1523 break; 1524 } 1525#endif /* INET6 */ 1526 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1527 pf_rulequeue); 1528 if (tail) 1529 rule->nr = tail->nr + 1; 1530 else 1531 rule->nr = 0; 1532 if (rule->ifname[0]) { 1533 rule->kif = pfi_attach_rule(rule->ifname); 1534 if (rule->kif == NULL) { 1535 pool_put(&pf_rule_pl, rule); 1536 error = EINVAL; 1537 break; 1538 } 1539 } 1540 1541#ifdef ALTQ 1542 /* set queue IDs */ 1543 if (rule->qname[0] != 0) { 1544 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1545 error = EBUSY; 1546 else if (rule->pqname[0] != 0) { 1547 if ((rule->pqid = 1548 pf_qname2qid(rule->pqname)) == 0) 1549 error = EBUSY; 1550 } else 1551 rule->pqid = rule->qid; 1552 } 1553#endif 1554 if (rule->tagname[0]) 1555 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1556 error = EBUSY; 1557 if (rule->match_tagname[0]) 1558 if ((rule->match_tag = 1559 pf_tagname2tag(rule->match_tagname)) == 0) 1560 error = EBUSY; 1561 if (rule->rt && !rule->direction) 1562 error = EINVAL; 1563 if (pf_rtlabel_add(&rule->src.addr) || 1564 pf_rtlabel_add(&rule->dst.addr)) 1565 error = EBUSY; 1566 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1567 error = EINVAL; 1568 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1569 error = EINVAL; 1570 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1571 error = EINVAL; 1572 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1573 error = EINVAL; 1574 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1575 error = EINVAL; 1576 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1577 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1578 error = EINVAL; 1579 1580 if (rule->overload_tblname[0]) { 1581 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1582 rule->overload_tblname)) == NULL) 1583 error = EINVAL; 1584 else 1585 rule->overload_tbl->pfrkt_flags |= 1586 PFR_TFLAG_ACTIVE; 1587 } 1588 1589 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1590 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1591 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1592 (rule->rt > PF_FASTROUTE)) && 1593 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1594 error = EINVAL; 1595 1596 if (error) { 1597 pf_rm_rule(NULL, rule); 1598 break; 1599 } 1600 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1601 rule->evaluations = rule->packets = rule->bytes = 0; 1602 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1603 rule, entries); 1604 break; 1605 } 1606 1607 case DIOCGETRULES: { 1608 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1609 struct pf_ruleset *ruleset; 1610 struct pf_rule *tail; 1611 int rs_num; 1612 1613 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1614 ruleset = pf_find_ruleset(pr->anchor); 1615 if (ruleset == NULL) { 1616 error = EINVAL; 1617 break; 1618 } 1619 rs_num = pf_get_ruleset_number(pr->rule.action); 1620 if (rs_num >= PF_RULESET_MAX) { 1621 error = EINVAL; 1622 break; 1623 } 1624 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1625 pf_rulequeue); 1626 if (tail) 1627 pr->nr = tail->nr + 1; 1628 else 1629 pr->nr = 0; 1630 pr->ticket = ruleset->rules[rs_num].active.ticket; 1631 break; 1632 } 1633 1634 case DIOCGETRULE: { 1635 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1636 struct pf_ruleset *ruleset; 1637 struct pf_rule *rule; 1638 int rs_num, i; 1639 1640 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1641 ruleset = pf_find_ruleset(pr->anchor); 1642 if (ruleset == NULL) { 1643 error = EINVAL; 1644 break; 1645 } 1646 rs_num = pf_get_ruleset_number(pr->rule.action); 1647 if (rs_num >= PF_RULESET_MAX) { 1648 error = EINVAL; 1649 break; 1650 } 1651 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1652 error = EBUSY; 1653 break; 1654 } 1655 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1656 while ((rule != NULL) && (rule->nr != pr->nr)) 1657 rule = TAILQ_NEXT(rule, entries); 1658 if (rule == NULL) { 1659 error = EBUSY; 1660 break; 1661 } 1662 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1663 if (pf_anchor_copyout(ruleset, rule, pr)) { 1664 error = EBUSY; 1665 break; 1666 } 1667 pfi_dynaddr_copyout(&pr->rule.src.addr); 1668 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1669 pf_tbladdr_copyout(&pr->rule.src.addr); 1670 pf_tbladdr_copyout(&pr->rule.dst.addr); 1671 pf_rtlabel_copyout(&pr->rule.src.addr); 1672 pf_rtlabel_copyout(&pr->rule.dst.addr); 1673 for (i = 0; i < PF_SKIP_COUNT; ++i) 1674 if (rule->skip[i].ptr == NULL) 1675 pr->rule.skip[i].nr = -1; 1676 else 1677 pr->rule.skip[i].nr = 1678 rule->skip[i].ptr->nr; 1679 break; 1680 } 1681 1682 case DIOCCHANGERULE: { 1683 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1684 struct pf_ruleset *ruleset; 1685 struct pf_rule *oldrule = NULL, *newrule = NULL; 1686 u_int32_t nr = 0; 1687 int rs_num; 1688 1689 if (!(pcr->action == PF_CHANGE_REMOVE || 1690 pcr->action == PF_CHANGE_GET_TICKET) && 1691 pcr->pool_ticket != ticket_pabuf) { 1692 error = EBUSY; 1693 break; 1694 } 1695 1696 if (pcr->action < PF_CHANGE_ADD_HEAD || 1697 pcr->action > PF_CHANGE_GET_TICKET) { 1698 error = EINVAL; 1699 break; 1700 } 1701 ruleset = pf_find_ruleset(pcr->anchor); 1702 if (ruleset == NULL) { 1703 error = EINVAL; 1704 break; 1705 } 1706 rs_num = pf_get_ruleset_number(pcr->rule.action); 1707 if (rs_num >= PF_RULESET_MAX) { 1708 error = EINVAL; 1709 break; 1710 } 1711 1712 if (pcr->action == PF_CHANGE_GET_TICKET) { 1713 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1714 break; 1715 } else { 1716 if (pcr->ticket != 1717 ruleset->rules[rs_num].active.ticket) { 1718 error = EINVAL; 1719 break; 1720 } 1721 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1722 error = EINVAL; 1723 break; 1724 } 1725 } 1726 1727 if (pcr->action != PF_CHANGE_REMOVE) { 1728 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1729 if (newrule == NULL) { 1730 error = ENOMEM; 1731 break; 1732 } 1733 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1734 TAILQ_INIT(&newrule->rpool.list); 1735 /* initialize refcounting */ 1736 newrule->states = 0; 1737 newrule->entries.tqe_prev = NULL; 1738#ifndef INET 1739 if (newrule->af == AF_INET) { 1740 pool_put(&pf_rule_pl, newrule); 1741 error = EAFNOSUPPORT; 1742 break; 1743 } 1744#endif /* INET */ 1745#ifndef INET6 1746 if (newrule->af == AF_INET6) { 1747 pool_put(&pf_rule_pl, newrule); 1748 error = EAFNOSUPPORT; 1749 break; 1750 } 1751#endif /* INET6 */ 1752 if (newrule->ifname[0]) { 1753 newrule->kif = pfi_attach_rule(newrule->ifname); 1754 if (newrule->kif == NULL) { 1755 pool_put(&pf_rule_pl, newrule); 1756 error = EINVAL; 1757 break; 1758 } 1759 } else 1760 newrule->kif = NULL; 1761 1762#ifdef ALTQ 1763 /* set queue IDs */ 1764 if (newrule->qname[0] != 0) { 1765 if ((newrule->qid = 1766 pf_qname2qid(newrule->qname)) == 0) 1767 error = EBUSY; 1768 else if (newrule->pqname[0] != 0) { 1769 if ((newrule->pqid = 1770 pf_qname2qid(newrule->pqname)) == 0) 1771 error = EBUSY; 1772 } else 1773 newrule->pqid = newrule->qid; 1774 } 1775#endif /* ALTQ */ 1776 if (newrule->tagname[0]) 1777 if ((newrule->tag = 1778 pf_tagname2tag(newrule->tagname)) == 0) 1779 error = EBUSY; 1780 if (newrule->match_tagname[0]) 1781 if ((newrule->match_tag = pf_tagname2tag( 1782 newrule->match_tagname)) == 0) 1783 error = EBUSY; 1784 if (newrule->rt && !newrule->direction) 1785 error = EINVAL; 1786 if (pf_rtlabel_add(&newrule->src.addr) || 1787 pf_rtlabel_add(&newrule->dst.addr)) 1788 error = EBUSY; 1789 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1790 error = EINVAL; 1791 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1792 error = EINVAL; 1793 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1794 error = EINVAL; 1795 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1796 error = EINVAL; 1797 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1798 error = EINVAL; 1799 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1800 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1801 error = EINVAL; 1802 1803 if (newrule->overload_tblname[0]) { 1804 if ((newrule->overload_tbl = pfr_attach_table( 1805 ruleset, newrule->overload_tblname)) == 1806 NULL) 1807 error = EINVAL; 1808 else 1809 newrule->overload_tbl->pfrkt_flags |= 1810 PFR_TFLAG_ACTIVE; 1811 } 1812 1813 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1814 if (((((newrule->action == PF_NAT) || 1815 (newrule->action == PF_RDR) || 1816 (newrule->action == PF_BINAT) || 1817 (newrule->rt > PF_FASTROUTE)) && 1818 !pcr->anchor[0])) && 1819 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1820 error = EINVAL; 1821 1822 if (error) { 1823 pf_rm_rule(NULL, newrule); 1824 break; 1825 } 1826 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1827 newrule->evaluations = newrule->packets = 0; 1828 newrule->bytes = 0; 1829 } 1830 pf_empty_pool(&pf_pabuf); 1831 1832 if (pcr->action == PF_CHANGE_ADD_HEAD) 1833 oldrule = TAILQ_FIRST( 1834 ruleset->rules[rs_num].active.ptr); 1835 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1836 oldrule = TAILQ_LAST( 1837 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1838 else { 1839 oldrule = TAILQ_FIRST( 1840 ruleset->rules[rs_num].active.ptr); 1841 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1842 oldrule = TAILQ_NEXT(oldrule, entries); 1843 if (oldrule == NULL) { 1844 if (newrule != NULL) 1845 pf_rm_rule(NULL, newrule); 1846 error = EINVAL; 1847 break; 1848 } 1849 } 1850 1851 if (pcr->action == PF_CHANGE_REMOVE) 1852 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1853 else { 1854 if (oldrule == NULL) 1855 TAILQ_INSERT_TAIL( 1856 ruleset->rules[rs_num].active.ptr, 1857 newrule, entries); 1858 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1859 pcr->action == PF_CHANGE_ADD_BEFORE) 1860 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1861 else 1862 TAILQ_INSERT_AFTER( 1863 ruleset->rules[rs_num].active.ptr, 1864 oldrule, newrule, entries); 1865 } 1866 1867 nr = 0; 1868 TAILQ_FOREACH(oldrule, 1869 ruleset->rules[rs_num].active.ptr, entries) 1870 oldrule->nr = nr++; 1871 1872 ruleset->rules[rs_num].active.ticket++; 1873 1874 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1875 pf_remove_if_empty_ruleset(ruleset); 1876 1877 break; 1878 } 1879 1880 case DIOCCLRSTATES: { 1881 struct pf_state *state; 1882 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1883 int killed = 0; 1884 1885 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1886 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1887 state->u.s.kif->pfik_name)) { 1888 state->timeout = PFTM_PURGE; 1889#if NPFSYNC 1890 /* don't send out individual delete messages */ 1891 state->sync_flags = PFSTATE_NOSYNC; 1892#endif 1893 killed++; 1894 } 1895 } 1896 pf_purge_expired_states(); 1897 pf_status.states = 0; 1898 psk->psk_af = killed; 1899#if NPFSYNC 1900 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1901#endif 1902 break; 1903 } 1904 1905 case DIOCKILLSTATES: { 1906 struct pf_state *state; 1907 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1908 int killed = 0; 1909 1910 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1911 if ((!psk->psk_af || state->af == psk->psk_af) 1912 && (!psk->psk_proto || psk->psk_proto == 1913 state->proto) && 1914 PF_MATCHA(psk->psk_src.neg, 1915 &psk->psk_src.addr.v.a.addr, 1916 &psk->psk_src.addr.v.a.mask, 1917 &state->lan.addr, state->af) && 1918 PF_MATCHA(psk->psk_dst.neg, 1919 &psk->psk_dst.addr.v.a.addr, 1920 &psk->psk_dst.addr.v.a.mask, 1921 &state->ext.addr, state->af) && 1922 (psk->psk_src.port_op == 0 || 1923 pf_match_port(psk->psk_src.port_op, 1924 psk->psk_src.port[0], psk->psk_src.port[1], 1925 state->lan.port)) && 1926 (psk->psk_dst.port_op == 0 || 1927 pf_match_port(psk->psk_dst.port_op, 1928 psk->psk_dst.port[0], psk->psk_dst.port[1], 1929 state->ext.port)) && 1930 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1931 state->u.s.kif->pfik_name))) { 1932 state->timeout = PFTM_PURGE; 1933 killed++; 1934 } 1935 } 1936 pf_purge_expired_states(); 1937 psk->psk_af = killed; 1938 break; 1939 } 1940 1941 case DIOCADDSTATE: { 1942 struct pfioc_state *ps = (struct pfioc_state *)addr; 1943 struct pf_state *state; 1944 struct pfi_kif *kif; 1945 1946 if (ps->state.timeout >= PFTM_MAX && 1947 ps->state.timeout != PFTM_UNTIL_PACKET) { 1948 error = EINVAL; 1949 break; 1950 } 1951 state = pool_get(&pf_state_pl, PR_NOWAIT); 1952 if (state == NULL) { 1953 error = ENOMEM; 1954 break; 1955 } 1956 kif = pfi_lookup_create(ps->state.u.ifname); 1957 if (kif == NULL) { 1958 pool_put(&pf_state_pl, state); 1959 error = ENOENT; 1960 break; 1961 } 1962 bcopy(&ps->state, state, sizeof(struct pf_state)); 1963 bzero(&state->u, sizeof(state->u)); 1964 state->rule.ptr = &pf_default_rule; 1965 state->nat_rule.ptr = NULL; 1966 state->anchor.ptr = NULL; 1967 state->rt_kif = NULL; 1968 state->creation = time_second; 1969 state->pfsync_time = 0; 1970 state->packets[0] = state->packets[1] = 0; 1971 state->bytes[0] = state->bytes[1] = 0; 1972 1973 if (pf_insert_state(kif, state)) { 1974 pfi_maybe_destroy(kif); 1975 pool_put(&pf_state_pl, state); 1976 error = ENOMEM; 1977 } 1978 break; 1979 } 1980 1981 case DIOCGETSTATE: { 1982 struct pfioc_state *ps = (struct pfioc_state *)addr; 1983 struct pf_state *state; 1984 u_int32_t nr; 1985 1986 nr = 0; 1987 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1988 if (nr >= ps->nr) 1989 break; 1990 nr++; 1991 } 1992 if (state == NULL) { 1993 error = EBUSY; 1994 break; 1995 } 1996 bcopy(state, &ps->state, sizeof(struct pf_state)); 1997 ps->state.rule.nr = state->rule.ptr->nr; 1998 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ? 1999 -1 : state->nat_rule.ptr->nr; 2000 ps->state.anchor.nr = (state->anchor.ptr == NULL) ? 2001 -1 : state->anchor.ptr->nr; 2002 ps->state.expire = pf_state_expires(state); 2003 if (ps->state.expire > time_second) 2004 ps->state.expire -= time_second; 2005 else 2006 ps->state.expire = 0; 2007 break; 2008 } 2009 2010 case DIOCGETSTATES: { 2011 struct pfioc_states *ps = (struct pfioc_states *)addr; 2012 struct pf_state *state; 2013 struct pf_state *p, pstore; 2014 struct pfi_kif *kif; 2015 u_int32_t nr = 0; 2016 int space = ps->ps_len; 2017 2018 if (space == 0) { 2019 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 2020 nr += kif->pfik_states; 2021 ps->ps_len = sizeof(struct pf_state) * nr; 2022 break; 2023 } 2024 2025 p = ps->ps_states; 2026 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 2027 RB_FOREACH(state, pf_state_tree_ext_gwy, 2028 &kif->pfik_ext_gwy) { 2029 int secs = time_second; 2030 2031 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 2032 break; 2033 2034 bcopy(state, &pstore, sizeof(pstore)); 2035 strlcpy(pstore.u.ifname, kif->pfik_name, 2036 sizeof(pstore.u.ifname)); 2037 pstore.rule.nr = state->rule.ptr->nr; 2038 pstore.nat_rule.nr = (state->nat_rule.ptr == 2039 NULL) ? -1 : state->nat_rule.ptr->nr; 2040 pstore.anchor.nr = (state->anchor.ptr == 2041 NULL) ? -1 : state->anchor.ptr->nr; 2042 pstore.creation = secs - pstore.creation; 2043 pstore.expire = pf_state_expires(state); 2044 if (pstore.expire > secs) 2045 pstore.expire -= secs; 2046 else 2047 pstore.expire = 0; 2048#ifdef __FreeBSD__ 2049 PF_COPYOUT(&pstore, p, sizeof(*p), error); 2050#else 2051 error = copyout(&pstore, p, sizeof(*p)); 2052#endif 2053 if (error) 2054 goto fail; 2055 p++; 2056 nr++; 2057 } 2058 ps->ps_len = sizeof(struct pf_state) * nr; 2059 break; 2060 } 2061 2062 case DIOCGETSTATUS: { 2063 struct pf_status *s = (struct pf_status *)addr; 2064 bcopy(&pf_status, s, sizeof(struct pf_status)); 2065 pfi_fill_oldstatus(s); 2066 break; 2067 } 2068 2069 case DIOCSETSTATUSIF: { 2070 struct pfioc_if *pi = (struct pfioc_if *)addr; 2071 2072 if (pi->ifname[0] == 0) { 2073 bzero(pf_status.ifname, IFNAMSIZ); 2074 break; 2075 } 2076 if (ifunit(pi->ifname) == NULL) { 2077 error = EINVAL; 2078 break; 2079 } 2080 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2081 break; 2082 } 2083 2084 case DIOCCLRSTATUS: { 2085 bzero(pf_status.counters, sizeof(pf_status.counters)); 2086 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2087 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2088 if (*pf_status.ifname) 2089 pfi_clr_istats(pf_status.ifname, NULL, 2090 PFI_FLAG_INSTANCE); 2091 break; 2092 } 2093 2094 case DIOCNATLOOK: { 2095 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2096 struct pf_state *state; 2097 struct pf_state key; 2098 int m = 0, direction = pnl->direction; 2099 2100 key.af = pnl->af; 2101 key.proto = pnl->proto; 2102 2103 if (!pnl->proto || 2104 PF_AZERO(&pnl->saddr, pnl->af) || 2105 PF_AZERO(&pnl->daddr, pnl->af) || 2106 !pnl->dport || !pnl->sport) 2107 error = EINVAL; 2108 else { 2109 /* 2110 * userland gives us source and dest of connection, 2111 * reverse the lookup so we ask for what happens with 2112 * the return traffic, enabling us to find it in the 2113 * state tree. 2114 */ 2115 if (direction == PF_IN) { 2116 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2117 key.ext.port = pnl->dport; 2118 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2119 key.gwy.port = pnl->sport; 2120 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2121 } else { 2122 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2123 key.lan.port = pnl->dport; 2124 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2125 key.ext.port = pnl->sport; 2126 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2127 } 2128 if (m > 1) 2129 error = E2BIG; /* more than one state */ 2130 else if (state != NULL) { 2131 if (direction == PF_IN) { 2132 PF_ACPY(&pnl->rsaddr, &state->lan.addr, 2133 state->af); 2134 pnl->rsport = state->lan.port; 2135 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2136 pnl->af); 2137 pnl->rdport = pnl->dport; 2138 } else { 2139 PF_ACPY(&pnl->rdaddr, &state->gwy.addr, 2140 state->af); 2141 pnl->rdport = state->gwy.port; 2142 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2143 pnl->af); 2144 pnl->rsport = pnl->sport; 2145 } 2146 } else 2147 error = ENOENT; 2148 } 2149 break; 2150 } 2151 2152 case DIOCSETTIMEOUT: { 2153 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2154 int old; 2155 2156 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2157 pt->seconds < 0) { 2158 error = EINVAL; 2159 goto fail; 2160 } 2161 old = pf_default_rule.timeout[pt->timeout]; 2162 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2163 pt->seconds = old; 2164 break; 2165 } 2166 2167 case DIOCGETTIMEOUT: { 2168 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2169 2170 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2171 error = EINVAL; 2172 goto fail; 2173 } 2174 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2175 break; 2176 } 2177 2178 case DIOCGETLIMIT: { 2179 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2180 2181 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2182 error = EINVAL; 2183 goto fail; 2184 } 2185 pl->limit = pf_pool_limits[pl->index].limit; 2186 break; 2187 } 2188 2189 case DIOCSETLIMIT: { 2190 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2191 int old_limit; 2192 2193 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2194 pf_pool_limits[pl->index].pp == NULL) { 2195 error = EINVAL; 2196 goto fail; 2197 } 2198#ifdef __FreeBSD__ 2199 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit); 2200#else 2201 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2202 pl->limit, NULL, 0) != 0) { 2203 error = EBUSY; 2204 goto fail; 2205 } 2206#endif 2207 old_limit = pf_pool_limits[pl->index].limit; 2208 pf_pool_limits[pl->index].limit = pl->limit; 2209 pl->limit = old_limit; 2210 break; 2211 } 2212 2213 case DIOCSETDEBUG: { 2214 u_int32_t *level = (u_int32_t *)addr; 2215 2216 pf_status.debug = *level; 2217 break; 2218 } 2219 2220 case DIOCCLRRULECTRS: { 2221 struct pf_ruleset *ruleset = &pf_main_ruleset; 2222 struct pf_rule *rule; 2223 2224 TAILQ_FOREACH(rule, 2225 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) 2226 rule->evaluations = rule->packets = 2227 rule->bytes = 0; 2228 break; 2229 } 2230 2231#ifdef __FreeBSD__ 2232 case DIOCGIFSPEED: { 2233 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2234 struct pf_ifspeed ps; 2235 struct ifnet *ifp; 2236 2237 if (psp->ifname[0] != 0) { 2238 /* Can we completely trust user-land? */ 2239 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2240 ifp = ifunit(ps.ifname); 2241 if (ifp != NULL) 2242 psp->baudrate = ifp->if_baudrate; 2243 else 2244 error = EINVAL; 2245 } else 2246 error = EINVAL; 2247 break; 2248 } 2249#endif /* __FreeBSD__ */ 2250 2251#ifdef ALTQ 2252 case DIOCSTARTALTQ: { 2253 struct pf_altq *altq; 2254 2255 /* enable all altq interfaces on active list */ 2256 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2257 if (altq->qname[0] == 0) { 2258 error = pf_enable_altq(altq); 2259 if (error != 0) 2260 break; 2261 } 2262 } 2263 if (error == 0) 2264 pf_altq_running = 1; 2265 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2266 break; 2267 } 2268 2269 case DIOCSTOPALTQ: { 2270 struct pf_altq *altq; 2271 2272 /* disable all altq interfaces on active list */ 2273 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2274 if (altq->qname[0] == 0) { 2275 error = pf_disable_altq(altq); 2276 if (error != 0) 2277 break; 2278 } 2279 } 2280 if (error == 0) 2281 pf_altq_running = 0; 2282 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2283 break; 2284 } 2285 2286 case DIOCADDALTQ: { 2287 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2288 struct pf_altq *altq, *a; 2289 2290 if (pa->ticket != ticket_altqs_inactive) { 2291 error = EBUSY; 2292 break; 2293 } 2294 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2295 if (altq == NULL) { 2296 error = ENOMEM; 2297 break; 2298 } 2299 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2300 2301 /* 2302 * if this is for a queue, find the discipline and 2303 * copy the necessary fields 2304 */ 2305 if (altq->qname[0] != 0) { 2306 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2307 error = EBUSY; 2308 pool_put(&pf_altq_pl, altq); 2309 break; 2310 } 2311 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2312 if (strncmp(a->ifname, altq->ifname, 2313 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2314 altq->altq_disc = a->altq_disc; 2315 break; 2316 } 2317 } 2318 } 2319 2320#ifdef __FreeBSD__ 2321 PF_UNLOCK(); 2322#endif 2323 error = altq_add(altq); 2324#ifdef __FreeBSD__ 2325 PF_LOCK(); 2326#endif 2327 if (error) { 2328 pool_put(&pf_altq_pl, altq); 2329 break; 2330 } 2331 2332 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2333 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2334 break; 2335 } 2336 2337 case DIOCGETALTQS: { 2338 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2339 struct pf_altq *altq; 2340 2341 pa->nr = 0; 2342 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2343 pa->nr++; 2344 pa->ticket = ticket_altqs_active; 2345 break; 2346 } 2347 2348 case DIOCGETALTQ: { 2349 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2350 struct pf_altq *altq; 2351 u_int32_t nr; 2352 2353 if (pa->ticket != ticket_altqs_active) { 2354 error = EBUSY; 2355 break; 2356 } 2357 nr = 0; 2358 altq = TAILQ_FIRST(pf_altqs_active); 2359 while ((altq != NULL) && (nr < pa->nr)) { 2360 altq = TAILQ_NEXT(altq, entries); 2361 nr++; 2362 } 2363 if (altq == NULL) { 2364 error = EBUSY; 2365 break; 2366 } 2367 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2368 break; 2369 } 2370 2371 case DIOCCHANGEALTQ: 2372 /* CHANGEALTQ not supported yet! */ 2373 error = ENODEV; 2374 break; 2375 2376 case DIOCGETQSTATS: { 2377 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2378 struct pf_altq *altq; 2379 u_int32_t nr; 2380 int nbytes; 2381 2382 if (pq->ticket != ticket_altqs_active) { 2383 error = EBUSY; 2384 break; 2385 } 2386 nbytes = pq->nbytes; 2387 nr = 0; 2388 altq = TAILQ_FIRST(pf_altqs_active); 2389 while ((altq != NULL) && (nr < pq->nr)) { 2390 altq = TAILQ_NEXT(altq, entries); 2391 nr++; 2392 } 2393 if (altq == NULL) { 2394 error = EBUSY; 2395 break; 2396 } 2397#ifdef __FreeBSD__ 2398 PF_UNLOCK(); 2399#endif 2400 error = altq_getqstats(altq, pq->buf, &nbytes); 2401#ifdef __FreeBSD__ 2402 PF_LOCK(); 2403#endif 2404 if (error == 0) { 2405 pq->scheduler = altq->scheduler; 2406 pq->nbytes = nbytes; 2407 } 2408 break; 2409 } 2410#endif /* ALTQ */ 2411 2412 case DIOCBEGINADDRS: { 2413 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2414 2415 pf_empty_pool(&pf_pabuf); 2416 pp->ticket = ++ticket_pabuf; 2417 break; 2418 } 2419 2420 case DIOCADDADDR: { 2421 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2422 2423 if (pp->ticket != ticket_pabuf) { 2424 error = EBUSY; 2425 break; 2426 } 2427#ifndef INET 2428 if (pp->af == AF_INET) { 2429 error = EAFNOSUPPORT; 2430 break; 2431 } 2432#endif /* INET */ 2433#ifndef INET6 2434 if (pp->af == AF_INET6) { 2435 error = EAFNOSUPPORT; 2436 break; 2437 } 2438#endif /* INET6 */ 2439 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2440 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2441 pp->addr.addr.type != PF_ADDR_TABLE) { 2442 error = EINVAL; 2443 break; 2444 } 2445 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2446 if (pa == NULL) { 2447 error = ENOMEM; 2448 break; 2449 } 2450 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2451 if (pa->ifname[0]) { 2452 pa->kif = pfi_attach_rule(pa->ifname); 2453 if (pa->kif == NULL) { 2454 pool_put(&pf_pooladdr_pl, pa); 2455 error = EINVAL; 2456 break; 2457 } 2458 } 2459 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2460 pfi_dynaddr_remove(&pa->addr); 2461 pfi_detach_rule(pa->kif); 2462 pool_put(&pf_pooladdr_pl, pa); 2463 error = EINVAL; 2464 break; 2465 } 2466 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2467 break; 2468 } 2469 2470 case DIOCGETADDRS: { 2471 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2472 2473 pp->nr = 0; 2474 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2475 pp->r_num, 0, 1, 0); 2476 if (pool == NULL) { 2477 error = EBUSY; 2478 break; 2479 } 2480 TAILQ_FOREACH(pa, &pool->list, entries) 2481 pp->nr++; 2482 break; 2483 } 2484 2485 case DIOCGETADDR: { 2486 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2487 u_int32_t nr = 0; 2488 2489 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2490 pp->r_num, 0, 1, 1); 2491 if (pool == NULL) { 2492 error = EBUSY; 2493 break; 2494 } 2495 pa = TAILQ_FIRST(&pool->list); 2496 while ((pa != NULL) && (nr < pp->nr)) { 2497 pa = TAILQ_NEXT(pa, entries); 2498 nr++; 2499 } 2500 if (pa == NULL) { 2501 error = EBUSY; 2502 break; 2503 } 2504 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2505 pfi_dynaddr_copyout(&pp->addr.addr); 2506 pf_tbladdr_copyout(&pp->addr.addr); 2507 pf_rtlabel_copyout(&pp->addr.addr); 2508 break; 2509 } 2510 2511 case DIOCCHANGEADDR: { 2512 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2513 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2514 struct pf_ruleset *ruleset; 2515 2516 if (pca->action < PF_CHANGE_ADD_HEAD || 2517 pca->action > PF_CHANGE_REMOVE) { 2518 error = EINVAL; 2519 break; 2520 } 2521 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2522 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2523 pca->addr.addr.type != PF_ADDR_TABLE) { 2524 error = EINVAL; 2525 break; 2526 } 2527 2528 ruleset = pf_find_ruleset(pca->anchor); 2529 if (ruleset == NULL) { 2530 error = EBUSY; 2531 break; 2532 } 2533 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2534 pca->r_num, pca->r_last, 1, 1); 2535 if (pool == NULL) { 2536 error = EBUSY; 2537 break; 2538 } 2539 if (pca->action != PF_CHANGE_REMOVE) { 2540 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2541 if (newpa == NULL) { 2542 error = ENOMEM; 2543 break; 2544 } 2545 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2546#ifndef INET 2547 if (pca->af == AF_INET) { 2548 pool_put(&pf_pooladdr_pl, newpa); 2549 error = EAFNOSUPPORT; 2550 break; 2551 } 2552#endif /* INET */ 2553#ifndef INET6 2554 if (pca->af == AF_INET6) { 2555 pool_put(&pf_pooladdr_pl, newpa); 2556 error = EAFNOSUPPORT; 2557 break; 2558 } 2559#endif /* INET6 */ 2560 if (newpa->ifname[0]) { 2561 newpa->kif = pfi_attach_rule(newpa->ifname); 2562 if (newpa->kif == NULL) { 2563 pool_put(&pf_pooladdr_pl, newpa); 2564 error = EINVAL; 2565 break; 2566 } 2567 } else 2568 newpa->kif = NULL; 2569 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2570 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2571 pfi_dynaddr_remove(&newpa->addr); 2572 pfi_detach_rule(newpa->kif); 2573 pool_put(&pf_pooladdr_pl, newpa); 2574 error = EINVAL; 2575 break; 2576 } 2577 } 2578 2579 if (pca->action == PF_CHANGE_ADD_HEAD) 2580 oldpa = TAILQ_FIRST(&pool->list); 2581 else if (pca->action == PF_CHANGE_ADD_TAIL) 2582 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2583 else { 2584 int i = 0; 2585 2586 oldpa = TAILQ_FIRST(&pool->list); 2587 while ((oldpa != NULL) && (i < pca->nr)) { 2588 oldpa = TAILQ_NEXT(oldpa, entries); 2589 i++; 2590 } 2591 if (oldpa == NULL) { 2592 error = EINVAL; 2593 break; 2594 } 2595 } 2596 2597 if (pca->action == PF_CHANGE_REMOVE) { 2598 TAILQ_REMOVE(&pool->list, oldpa, entries); 2599 pfi_dynaddr_remove(&oldpa->addr); 2600 pf_tbladdr_remove(&oldpa->addr); 2601 pfi_detach_rule(oldpa->kif); 2602 pool_put(&pf_pooladdr_pl, oldpa); 2603 } else { 2604 if (oldpa == NULL) 2605 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2606 else if (pca->action == PF_CHANGE_ADD_HEAD || 2607 pca->action == PF_CHANGE_ADD_BEFORE) 2608 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2609 else 2610 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2611 newpa, entries); 2612 } 2613 2614 pool->cur = TAILQ_FIRST(&pool->list); 2615 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2616 pca->af); 2617 break; 2618 } 2619 2620 case DIOCGETRULESETS: { 2621 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2622 struct pf_ruleset *ruleset; 2623 struct pf_anchor *anchor; 2624 2625 pr->path[sizeof(pr->path) - 1] = 0; 2626 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2627 error = EINVAL; 2628 break; 2629 } 2630 pr->nr = 0; 2631 if (ruleset->anchor == NULL) { 2632 /* XXX kludge for pf_main_ruleset */ 2633 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2634 if (anchor->parent == NULL) 2635 pr->nr++; 2636 } else { 2637 RB_FOREACH(anchor, pf_anchor_node, 2638 &ruleset->anchor->children) 2639 pr->nr++; 2640 } 2641 break; 2642 } 2643 2644 case DIOCGETRULESET: { 2645 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2646 struct pf_ruleset *ruleset; 2647 struct pf_anchor *anchor; 2648 u_int32_t nr = 0; 2649 2650 pr->path[sizeof(pr->path) - 1] = 0; 2651 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2652 error = EINVAL; 2653 break; 2654 } 2655 pr->name[0] = 0; 2656 if (ruleset->anchor == NULL) { 2657 /* XXX kludge for pf_main_ruleset */ 2658 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2659 if (anchor->parent == NULL && nr++ == pr->nr) { 2660 strlcpy(pr->name, anchor->name, 2661 sizeof(pr->name)); 2662 break; 2663 } 2664 } else { 2665 RB_FOREACH(anchor, pf_anchor_node, 2666 &ruleset->anchor->children) 2667 if (nr++ == pr->nr) { 2668 strlcpy(pr->name, anchor->name, 2669 sizeof(pr->name)); 2670 break; 2671 } 2672 } 2673 if (!pr->name[0]) 2674 error = EBUSY; 2675 break; 2676 } 2677 2678 case DIOCRCLRTABLES: { 2679 struct pfioc_table *io = (struct pfioc_table *)addr; 2680 2681 if (io->pfrio_esize != 0) { 2682 error = ENODEV; 2683 break; 2684 } 2685 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2686 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2687 break; 2688 } 2689 2690 case DIOCRADDTABLES: { 2691 struct pfioc_table *io = (struct pfioc_table *)addr; 2692 2693 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2694 error = ENODEV; 2695 break; 2696 } 2697 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2698 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2699 break; 2700 } 2701 2702 case DIOCRDELTABLES: { 2703 struct pfioc_table *io = (struct pfioc_table *)addr; 2704 2705 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2706 error = ENODEV; 2707 break; 2708 } 2709 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2710 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2711 break; 2712 } 2713 2714 case DIOCRGETTABLES: { 2715 struct pfioc_table *io = (struct pfioc_table *)addr; 2716 2717 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2718 error = ENODEV; 2719 break; 2720 } 2721 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2722 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2723 break; 2724 } 2725 2726 case DIOCRGETTSTATS: { 2727 struct pfioc_table *io = (struct pfioc_table *)addr; 2728 2729 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2730 error = ENODEV; 2731 break; 2732 } 2733 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2734 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2735 break; 2736 } 2737 2738 case DIOCRCLRTSTATS: { 2739 struct pfioc_table *io = (struct pfioc_table *)addr; 2740 2741 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2742 error = ENODEV; 2743 break; 2744 } 2745 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2746 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2747 break; 2748 } 2749 2750 case DIOCRSETTFLAGS: { 2751 struct pfioc_table *io = (struct pfioc_table *)addr; 2752 2753 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2754 error = ENODEV; 2755 break; 2756 } 2757 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2758 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2759 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2760 break; 2761 } 2762 2763 case DIOCRCLRADDRS: { 2764 struct pfioc_table *io = (struct pfioc_table *)addr; 2765 2766 if (io->pfrio_esize != 0) { 2767 error = ENODEV; 2768 break; 2769 } 2770 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2771 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2772 break; 2773 } 2774 2775 case DIOCRADDADDRS: { 2776 struct pfioc_table *io = (struct pfioc_table *)addr; 2777 2778 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2779 error = ENODEV; 2780 break; 2781 } 2782 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2783 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2784 PFR_FLAG_USERIOCTL); 2785 break; 2786 } 2787 2788 case DIOCRDELADDRS: { 2789 struct pfioc_table *io = (struct pfioc_table *)addr; 2790 2791 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2792 error = ENODEV; 2793 break; 2794 } 2795 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2796 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2797 PFR_FLAG_USERIOCTL); 2798 break; 2799 } 2800 2801 case DIOCRSETADDRS: { 2802 struct pfioc_table *io = (struct pfioc_table *)addr; 2803 2804 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2805 error = ENODEV; 2806 break; 2807 } 2808 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2809 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2810 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2811 PFR_FLAG_USERIOCTL); 2812 break; 2813 } 2814 2815 case DIOCRGETADDRS: { 2816 struct pfioc_table *io = (struct pfioc_table *)addr; 2817 2818 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2819 error = ENODEV; 2820 break; 2821 } 2822 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2823 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2824 break; 2825 } 2826 2827 case DIOCRGETASTATS: { 2828 struct pfioc_table *io = (struct pfioc_table *)addr; 2829 2830 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2831 error = ENODEV; 2832 break; 2833 } 2834 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2835 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2836 break; 2837 } 2838 2839 case DIOCRCLRASTATS: { 2840 struct pfioc_table *io = (struct pfioc_table *)addr; 2841 2842 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2843 error = ENODEV; 2844 break; 2845 } 2846 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2847 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2848 PFR_FLAG_USERIOCTL); 2849 break; 2850 } 2851 2852 case DIOCRTSTADDRS: { 2853 struct pfioc_table *io = (struct pfioc_table *)addr; 2854 2855 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2856 error = ENODEV; 2857 break; 2858 } 2859 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2860 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2861 PFR_FLAG_USERIOCTL); 2862 break; 2863 } 2864 2865 case DIOCRINADEFINE: { 2866 struct pfioc_table *io = (struct pfioc_table *)addr; 2867 2868 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2869 error = ENODEV; 2870 break; 2871 } 2872 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2873 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2874 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2875 break; 2876 } 2877 2878 case DIOCOSFPADD: { 2879 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2880 error = pf_osfp_add(io); 2881 break; 2882 } 2883 2884 case DIOCOSFPGET: { 2885 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2886 error = pf_osfp_get(io); 2887 break; 2888 } 2889 2890 case DIOCXBEGIN: { 2891 struct pfioc_trans *io = (struct pfioc_trans *) 2892 addr; 2893 static struct pfioc_trans_e ioe; 2894 static struct pfr_table table; 2895 int i; 2896 2897 if (io->esize != sizeof(ioe)) { 2898 error = ENODEV; 2899 goto fail; 2900 } 2901 for (i = 0; i < io->size; i++) { 2902#ifdef __FreeBSD__ 2903 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2904 if (error) { 2905#else 2906 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2907#endif 2908 error = EFAULT; 2909 goto fail; 2910 } 2911 switch (ioe.rs_num) { 2912#ifdef ALTQ 2913 case PF_RULESET_ALTQ: 2914 if (ioe.anchor[0]) { 2915 error = EINVAL; 2916 goto fail; 2917 } 2918 if ((error = pf_begin_altq(&ioe.ticket))) 2919 goto fail; 2920 break; 2921#endif /* ALTQ */ 2922 case PF_RULESET_TABLE: 2923 bzero(&table, sizeof(table)); 2924 strlcpy(table.pfrt_anchor, ioe.anchor, 2925 sizeof(table.pfrt_anchor)); 2926 if ((error = pfr_ina_begin(&table, 2927 &ioe.ticket, NULL, 0))) 2928 goto fail; 2929 break; 2930 default: 2931 if ((error = pf_begin_rules(&ioe.ticket, 2932 ioe.rs_num, ioe.anchor))) 2933 goto fail; 2934 break; 2935 } 2936#ifdef __FreeBSD__ 2937 PF_COPYOUT(&ioe, io->array+i, sizeof(io->array[i]), 2938 error); 2939 if (error) { 2940#else 2941 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) { 2942#endif 2943 error = EFAULT; 2944 goto fail; 2945 } 2946 } 2947 break; 2948 } 2949 2950 case DIOCXROLLBACK: { 2951 struct pfioc_trans *io = (struct pfioc_trans *) 2952 addr; 2953 static struct pfioc_trans_e ioe; 2954 static struct pfr_table table; 2955 int i; 2956 2957 if (io->esize != sizeof(ioe)) { 2958 error = ENODEV; 2959 goto fail; 2960 } 2961 for (i = 0; i < io->size; i++) { 2962#ifdef __FreeBSD__ 2963 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2964 if (error) { 2965#else 2966 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2967#endif 2968 error = EFAULT; 2969 goto fail; 2970 } 2971 switch (ioe.rs_num) { 2972#ifdef ALTQ 2973 case PF_RULESET_ALTQ: 2974 if (ioe.anchor[0]) { 2975 error = EINVAL; 2976 goto fail; 2977 } 2978 if ((error = pf_rollback_altq(ioe.ticket))) 2979 goto fail; /* really bad */ 2980 break; 2981#endif /* ALTQ */ 2982 case PF_RULESET_TABLE: 2983 bzero(&table, sizeof(table)); 2984 strlcpy(table.pfrt_anchor, ioe.anchor, 2985 sizeof(table.pfrt_anchor)); 2986 if ((error = pfr_ina_rollback(&table, 2987 ioe.ticket, NULL, 0))) 2988 goto fail; /* really bad */ 2989 break; 2990 default: 2991 if ((error = pf_rollback_rules(ioe.ticket, 2992 ioe.rs_num, ioe.anchor))) 2993 goto fail; /* really bad */ 2994 break; 2995 } 2996 } 2997 break; 2998 } 2999 3000 case DIOCXCOMMIT: { 3001 struct pfioc_trans *io = (struct pfioc_trans *) 3002 addr; 3003 static struct pfioc_trans_e ioe; 3004 static struct pfr_table table; 3005 struct pf_ruleset *rs; 3006 int i; 3007 3008 if (io->esize != sizeof(ioe)) { 3009 error = ENODEV; 3010 goto fail; 3011 } 3012 /* first makes sure everything will succeed */ 3013 for (i = 0; i < io->size; i++) { 3014#ifdef __FreeBSD__ 3015 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 3016 if (error) { 3017#else 3018 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 3019#endif 3020 error = EFAULT; 3021 goto fail; 3022 } 3023 switch (ioe.rs_num) { 3024#ifdef ALTQ 3025 case PF_RULESET_ALTQ: 3026 if (ioe.anchor[0]) { 3027 error = EINVAL; 3028 goto fail; 3029 } 3030 if (!altqs_inactive_open || ioe.ticket != 3031 ticket_altqs_inactive) { 3032 error = EBUSY; 3033 goto fail; 3034 } 3035 break; 3036#endif /* ALTQ */ 3037 case PF_RULESET_TABLE: 3038 rs = pf_find_ruleset(ioe.anchor); 3039 if (rs == NULL || !rs->topen || ioe.ticket != 3040 rs->tticket) { 3041 error = EBUSY; 3042 goto fail; 3043 } 3044 break; 3045 default: 3046 if (ioe.rs_num < 0 || ioe.rs_num >= 3047 PF_RULESET_MAX) { 3048 error = EINVAL; 3049 goto fail; 3050 } 3051 rs = pf_find_ruleset(ioe.anchor); 3052 if (rs == NULL || 3053 !rs->rules[ioe.rs_num].inactive.open || 3054 rs->rules[ioe.rs_num].inactive.ticket != 3055 ioe.ticket) { 3056 error = EBUSY; 3057 goto fail; 3058 } 3059 break; 3060 } 3061 } 3062 /* now do the commit - no errors should happen here */ 3063 for (i = 0; i < io->size; i++) { 3064#ifdef __FreeBSD__ 3065 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 3066 if (error) { 3067#else 3068 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 3069#endif 3070 error = EFAULT; 3071 goto fail; 3072 } 3073 switch (ioe.rs_num) { 3074#ifdef ALTQ 3075 case PF_RULESET_ALTQ: 3076 if ((error = pf_commit_altq(ioe.ticket))) 3077 goto fail; /* really bad */ 3078 break; 3079#endif /* ALTQ */ 3080 case PF_RULESET_TABLE: 3081 bzero(&table, sizeof(table)); 3082 strlcpy(table.pfrt_anchor, ioe.anchor, 3083 sizeof(table.pfrt_anchor)); 3084 if ((error = pfr_ina_commit(&table, ioe.ticket, 3085 NULL, NULL, 0))) 3086 goto fail; /* really bad */ 3087 break; 3088 default: 3089 if ((error = pf_commit_rules(ioe.ticket, 3090 ioe.rs_num, ioe.anchor))) 3091 goto fail; /* really bad */ 3092 break; 3093 } 3094 } 3095 break; 3096 } 3097 3098 case DIOCGETSRCNODES: { 3099 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3100 struct pf_src_node *n; 3101 struct pf_src_node *p, pstore; 3102 u_int32_t nr = 0; 3103 int space = psn->psn_len; 3104 3105 if (space == 0) { 3106 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3107 nr++; 3108 psn->psn_len = sizeof(struct pf_src_node) * nr; 3109 break; 3110 } 3111 3112 p = psn->psn_src_nodes; 3113 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3114 int secs = time_second, diff; 3115 3116 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3117 break; 3118 3119 bcopy(n, &pstore, sizeof(pstore)); 3120 if (n->rule.ptr != NULL) 3121 pstore.rule.nr = n->rule.ptr->nr; 3122 pstore.creation = secs - pstore.creation; 3123 if (pstore.expire > secs) 3124 pstore.expire -= secs; 3125 else 3126 pstore.expire = 0; 3127 3128 /* adjust the connection rate estimate */ 3129 diff = secs - n->conn_rate.last; 3130 if (diff >= n->conn_rate.seconds) 3131 pstore.conn_rate.count = 0; 3132 else 3133 pstore.conn_rate.count -= 3134 n->conn_rate.count * diff / 3135 n->conn_rate.seconds; 3136 3137#ifdef __FreeBSD__ 3138 PF_COPYOUT(&pstore, p, sizeof(*p), error); 3139#else 3140 error = copyout(&pstore, p, sizeof(*p)); 3141#endif 3142 if (error) 3143 goto fail; 3144 p++; 3145 nr++; 3146 } 3147 psn->psn_len = sizeof(struct pf_src_node) * nr; 3148 break; 3149 } 3150 3151 case DIOCCLRSRCNODES: { 3152 struct pf_src_node *n; 3153 struct pf_state *state; 3154 3155 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3156 state->src_node = NULL; 3157 state->nat_src_node = NULL; 3158 } 3159 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3160 n->expire = 1; 3161 n->states = 0; 3162 } 3163 pf_purge_expired_src_nodes(); 3164 pf_status.src_nodes = 0; 3165 break; 3166 } 3167 3168 case DIOCSETHOSTID: { 3169 u_int32_t *hostid = (u_int32_t *)addr; 3170 3171 if (*hostid == 0) 3172 pf_status.hostid = arc4random(); 3173 else 3174 pf_status.hostid = *hostid; 3175 break; 3176 } 3177 3178 case DIOCOSFPFLUSH: 3179 pf_osfp_flush(); 3180 break; 3181 3182 case DIOCIGETIFACES: { 3183 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3184 3185 if (io->pfiio_esize != sizeof(struct pfi_if)) { 3186 error = ENODEV; 3187 break; 3188 } 3189 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3190 &io->pfiio_size, io->pfiio_flags); 3191 break; 3192 } 3193 3194 case DIOCICLRISTATS: { 3195 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3196 3197 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero, 3198 io->pfiio_flags); 3199 break; 3200 } 3201 3202 case DIOCSETIFFLAG: { 3203 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3204 3205 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3206 break; 3207 } 3208 3209 case DIOCCLRIFFLAG: { 3210 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3211 3212 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3213 break; 3214 } 3215 3216 default: 3217 error = ENODEV; 3218 break; 3219 } 3220fail: 3221#ifdef __FreeBSD__ 3222 PF_UNLOCK(); 3223#else 3224 splx(s); 3225#endif 3226 return (error); 3227} 3228 3229#ifdef __FreeBSD__ 3230/* 3231 * XXX - Check for version missmatch!!! 3232 */ 3233static void 3234pf_clear_states(void) 3235{ 3236 struct pf_state *state; 3237 3238 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3239 state->timeout = PFTM_PURGE; 3240#if NPFSYNC 3241 /* don't send out individual delete messages */ 3242 state->sync_flags = PFSTATE_NOSYNC; 3243#endif 3244 } 3245 pf_purge_expired_states(); 3246 pf_status.states = 0; 3247#if 0 /* NPFSYNC */ 3248/* 3249 * XXX This is called on module unload, we do not want to sync that over? */ 3250 */ 3251 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 3252#endif 3253} 3254 3255static int 3256pf_clear_tables(void) 3257{ 3258 struct pfioc_table io; 3259 int error; 3260 3261 bzero(&io, sizeof(io)); 3262 3263 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3264 io.pfrio_flags); 3265 3266 return (error); 3267} 3268 3269static void 3270pf_clear_srcnodes(void) 3271{ 3272 struct pf_src_node *n; 3273 struct pf_state *state; 3274 3275 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3276 state->src_node = NULL; 3277 state->nat_src_node = NULL; 3278 } 3279 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3280 n->expire = 1; 3281 n->states = 0; 3282 } 3283 pf_purge_expired_src_nodes(); 3284 pf_status.src_nodes = 0; 3285} 3286/* 3287 * XXX - Check for version missmatch!!! 3288 */ 3289 3290/* 3291 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3292 */ 3293static int 3294shutdown_pf(void) 3295{ 3296 int error = 0; 3297 u_int32_t t[5]; 3298 char nn = '\0'; 3299 3300 callout_stop(&pf_expire_to); 3301 3302 pf_status.running = 0; 3303 do { 3304 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 3305 != 0) { 3306 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3307 break; 3308 } 3309 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 3310 != 0) { 3311 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3312 break; /* XXX: rollback? */ 3313 } 3314 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 3315 != 0) { 3316 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3317 break; /* XXX: rollback? */ 3318 } 3319 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3320 != 0) { 3321 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3322 break; /* XXX: rollback? */ 3323 } 3324 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3325 != 0) { 3326 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3327 break; /* XXX: rollback? */ 3328 } 3329 3330 /* XXX: these should always succeed here */ 3331 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3332 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3333 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3334 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3335 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3336 3337 if ((error = pf_clear_tables()) != 0) 3338 break; 3339 3340#ifdef ALTQ 3341 if ((error = pf_begin_altq(&t[0])) != 0) { 3342 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3343 break; 3344 } 3345 pf_commit_altq(t[0]); 3346#endif 3347 3348 pf_clear_states(); 3349 3350 pf_clear_srcnodes(); 3351 3352 /* status does not use malloced mem so no need to cleanup */ 3353 /* fingerprints and interfaces have thier own cleanup code */ 3354 } while(0); 3355 3356 return (error); 3357} 3358 3359static int 3360pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3361 struct inpcb *inp) 3362{ 3363 /* 3364 * XXX Wed Jul 9 22:03:16 2003 UTC 3365 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3366 * in network stack. OpenBSD's network stack have converted 3367 * ip_len/ip_off to host byte order frist as FreeBSD. 3368 * Now this is not true anymore , so we should convert back to network 3369 * byte order. 3370 */ 3371 struct ip *h = NULL; 3372 int chk; 3373 3374 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) { 3375 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3376 h = mtod(*m, struct ip *); 3377 HTONS(h->ip_len); 3378 HTONS(h->ip_off); 3379 } 3380 chk = pf_test(PF_IN, ifp, m, NULL, inp); 3381 if (chk && *m) { 3382 m_freem(*m); 3383 *m = NULL; 3384 } 3385 if (*m != NULL) { 3386 /* pf_test can change ip header location */ 3387 h = mtod(*m, struct ip *); 3388 NTOHS(h->ip_len); 3389 NTOHS(h->ip_off); 3390 } 3391 return chk; 3392} 3393 3394static int 3395pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3396 struct inpcb *inp) 3397{ 3398 /* 3399 * XXX Wed Jul 9 22:03:16 2003 UTC 3400 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3401 * in network stack. OpenBSD's network stack have converted 3402 * ip_len/ip_off to host byte order frist as FreeBSD. 3403 * Now this is not true anymore , so we should convert back to network 3404 * byte order. 3405 */ 3406 struct ip *h = NULL; 3407 int chk; 3408 3409 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3410 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3411 in_delayed_cksum(*m); 3412 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3413 } 3414 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) { 3415 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3416 h = mtod(*m, struct ip *); 3417 HTONS(h->ip_len); 3418 HTONS(h->ip_off); 3419 } 3420 chk = pf_test(PF_OUT, ifp, m, NULL, inp); 3421 if (chk && *m) { 3422 m_freem(*m); 3423 *m = NULL; 3424 } 3425 if (*m != NULL) { 3426 /* pf_test can change ip header location */ 3427 h = mtod(*m, struct ip *); 3428 NTOHS(h->ip_len); 3429 NTOHS(h->ip_off); 3430 } 3431 return chk; 3432} 3433 3434#ifdef INET6 3435static int 3436pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3437 struct inpcb *inp) 3438{ 3439 /* 3440 * IPv6 does not affected ip_len/ip_off byte order changes. 3441 */ 3442 int chk; 3443 3444 chk = pf_test6(PF_IN, ifp, m, NULL, inp); 3445 if (chk && *m) { 3446 m_freem(*m); 3447 *m = NULL; 3448 } 3449 return chk; 3450} 3451 3452static int 3453pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3454 struct inpcb *inp) 3455{ 3456 /* 3457 * IPv6 does not affected ip_len/ip_off byte order changes. 3458 */ 3459 int chk; 3460 3461 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3462 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3463 in_delayed_cksum(*m); 3464 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3465 } 3466 chk = pf_test6(PF_OUT, ifp, m, NULL, inp); 3467 if (chk && *m) { 3468 m_freem(*m); 3469 *m = NULL; 3470 } 3471 return chk; 3472} 3473#endif /* INET6 */ 3474 3475static int 3476hook_pf(void) 3477{ 3478 struct pfil_head *pfh_inet; 3479#ifdef INET6 3480 struct pfil_head *pfh_inet6; 3481#endif 3482 3483 PF_ASSERT(MA_NOTOWNED); 3484 3485 if (pf_pfil_hooked) 3486 return (0); 3487 3488 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3489 if (pfh_inet == NULL) 3490 return (ESRCH); /* XXX */ 3491 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3492 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3493#ifdef INET6 3494 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3495 if (pfh_inet6 == NULL) { 3496 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3497 pfh_inet); 3498 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3499 pfh_inet); 3500 return (ESRCH); /* XXX */ 3501 } 3502 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3503 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3504#endif 3505 3506 pf_pfil_hooked = 1; 3507 return (0); 3508} 3509 3510static int 3511dehook_pf(void) 3512{ 3513 struct pfil_head *pfh_inet; 3514#ifdef INET6 3515 struct pfil_head *pfh_inet6; 3516#endif 3517 3518 PF_ASSERT(MA_NOTOWNED); 3519 3520 if (pf_pfil_hooked == 0) 3521 return (0); 3522 3523 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3524 if (pfh_inet == NULL) 3525 return (ESRCH); /* XXX */ 3526 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3527 pfh_inet); 3528 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3529 pfh_inet); 3530#ifdef INET6 3531 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3532 if (pfh_inet6 == NULL) 3533 return (ESRCH); /* XXX */ 3534 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3535 pfh_inet6); 3536 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3537 pfh_inet6); 3538#endif 3539 3540 pf_pfil_hooked = 0; 3541 return (0); 3542} 3543 3544static int 3545pf_load(void) 3546{ 3547 init_zone_var(); 3548 init_pf_mutex(); 3549 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3550 if (pfattach() < 0) { 3551 destroy_dev(pf_dev); 3552 destroy_pf_mutex(); 3553 return (ENOMEM); 3554 } 3555 return (0); 3556} 3557 3558static int 3559pf_unload(void) 3560{ 3561 int error = 0; 3562 3563 PF_LOCK(); 3564 pf_status.running = 0; 3565 PF_UNLOCK(); 3566 error = dehook_pf(); 3567 if (error) { 3568 /* 3569 * Should not happen! 3570 * XXX Due to error code ESRCH, kldunload will show 3571 * a message like 'No such process'. 3572 */ 3573 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 3574 return error; 3575 } 3576 PF_LOCK(); 3577 shutdown_pf(); 3578 pfi_cleanup(); 3579 pf_osfp_flush(); 3580 pf_osfp_cleanup(); 3581 cleanup_pf_zone(); 3582 PF_UNLOCK(); 3583 destroy_dev(pf_dev); 3584 destroy_pf_mutex(); 3585 return error; 3586} 3587 3588static int 3589pf_modevent(module_t mod, int type, void *data) 3590{ 3591 int error = 0; 3592 3593 switch(type) { 3594 case MOD_LOAD: 3595 error = pf_load(); 3596 break; 3597 3598 case MOD_UNLOAD: 3599 error = pf_unload(); 3600 break; 3601 default: 3602 error = EINVAL; 3603 break; 3604 } 3605 return error; 3606} 3607 3608static moduledata_t pf_mod = { 3609 "pf", 3610 pf_modevent, 3611 0 3612}; 3613 3614DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST); 3615MODULE_VERSION(pf, PF_MODVER); 3616#endif /* __FreeBSD__ */ 3617