pf_ioctl.c revision 165719
1/* $FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 165719 2007-01-01 16:51:11Z mlaier $ */ 2/* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */ 3/* add: $OpenBSD: pf_ioctl.c,v 1.168 2006/07/21 01:21:17 dhartmei Exp $ */ 4 5/* 6 * Copyright (c) 2001 Daniel Hartmeier 7 * Copyright (c) 2002,2003 Henning Brauer 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * - Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * - Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * Effort sponsored in part by the Defense Advanced Research Projects 35 * Agency (DARPA) and Air Force Research Laboratory, Air Force 36 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 37 * 38 */ 39 40#ifdef __FreeBSD__ 41#include "opt_inet.h" 42#include "opt_inet6.h" 43#endif 44 45#ifdef __FreeBSD__ 46#include "opt_bpf.h" 47#include "opt_pf.h" 48 49#ifdef DEV_BPF 50#define NBPFILTER DEV_BPF 51#else 52#define NBPFILTER 0 53#endif 54 55#ifdef DEV_PFLOG 56#define NPFLOG DEV_PFLOG 57#else 58#define NPFLOG 0 59#endif 60 61#ifdef DEV_PFSYNC 62#define NPFSYNC DEV_PFSYNC 63#else 64#define NPFSYNC 0 65#endif 66 67#else 68#include "bpfilter.h" 69#include "pflog.h" 70#include "pfsync.h" 71#endif 72 73#include <sys/param.h> 74#include <sys/systm.h> 75#include <sys/mbuf.h> 76#include <sys/filio.h> 77#include <sys/fcntl.h> 78#include <sys/socket.h> 79#include <sys/socketvar.h> 80#include <sys/kernel.h> 81#include <sys/time.h> 82#include <sys/malloc.h> 83#ifdef __FreeBSD__ 84#include <sys/module.h> 85#include <sys/conf.h> 86#include <sys/proc.h> 87#else 88#include <sys/timeout.h> 89#include <sys/pool.h> 90#endif 91 92#include <net/if.h> 93#include <net/if_types.h> 94#include <net/route.h> 95 96#include <netinet/in.h> 97#include <netinet/in_var.h> 98#include <netinet/in_systm.h> 99#include <netinet/ip.h> 100#include <netinet/ip_var.h> 101#include <netinet/ip_icmp.h> 102 103#ifndef __FreeBSD__ 104#include <dev/rndvar.h> 105#endif 106#include <net/pfvar.h> 107 108#if NPFSYNC > 0 109#include <net/if_pfsync.h> 110#endif /* NPFSYNC > 0 */ 111 112#ifdef __FreeBSD__ 113#include <net/if_pflog.h> 114#endif 115 116#ifdef INET6 117#include <netinet/ip6.h> 118#include <netinet/in_pcb.h> 119#endif /* INET6 */ 120 121#ifdef ALTQ 122#include <altq/altq.h> 123#endif 124 125#ifdef __FreeBSD__ 126#include <sys/limits.h> 127#include <sys/lock.h> 128#include <sys/mutex.h> 129#include <net/pfil.h> 130#endif /* __FreeBSD__ */ 131 132#ifdef __FreeBSD__ 133void init_zone_var(void); 134void cleanup_pf_zone(void); 135int pfattach(void); 136#else 137void pfattach(int); 138int pfopen(dev_t, int, int, struct proc *); 139int pfclose(dev_t, int, int, struct proc *); 140#endif 141struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 142 u_int8_t, u_int8_t, u_int8_t); 143int pf_get_ruleset_number(u_int8_t); 144void pf_init_ruleset(struct pf_ruleset *); 145int pf_anchor_setup(struct pf_rule *, 146 const struct pf_ruleset *, const char *); 147int pf_anchor_copyout(const struct pf_ruleset *, 148 const struct pf_rule *, struct pfioc_rule *); 149void pf_anchor_remove(struct pf_rule *); 150 151void pf_mv_pool(struct pf_palist *, struct pf_palist *); 152void pf_empty_pool(struct pf_palist *); 153#ifdef __FreeBSD__ 154int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 155#else 156int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *); 157#endif 158#ifdef ALTQ 159int pf_begin_altq(u_int32_t *); 160int pf_rollback_altq(u_int32_t); 161int pf_commit_altq(u_int32_t); 162int pf_enable_altq(struct pf_altq *); 163int pf_disable_altq(struct pf_altq *); 164#endif /* ALTQ */ 165int pf_begin_rules(u_int32_t *, int, const char *); 166int pf_rollback_rules(u_int32_t, int, char *); 167int pf_commit_rules(u_int32_t, int, char *); 168 169#ifdef __FreeBSD__ 170extern struct callout pf_expire_to; 171#else 172extern struct timeout pf_expire_to; 173#endif 174 175struct pf_rule pf_default_rule; 176#ifdef ALTQ 177static int pf_altq_running; 178#endif 179 180#define TAGID_MAX 50000 181TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 182 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 183 184#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 185#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 186#endif 187static u_int16_t tagname2tag(struct pf_tags *, char *); 188static void tag2tagname(struct pf_tags *, u_int16_t, char *); 189static void tag_unref(struct pf_tags *, u_int16_t); 190int pf_rtlabel_add(struct pf_addr_wrap *); 191void pf_rtlabel_remove(struct pf_addr_wrap *); 192void pf_rtlabel_copyout(struct pf_addr_wrap *); 193 194#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 195 196 197#ifdef __FreeBSD__ 198static struct cdev *pf_dev; 199 200/* 201 * XXX - These are new and need to be checked when moveing to a new version 202 */ 203static void pf_clear_states(void); 204static int pf_clear_tables(void); 205static void pf_clear_srcnodes(void); 206/* 207 * XXX - These are new and need to be checked when moveing to a new version 208 */ 209 210/* 211 * Wrapper functions for pfil(9) hooks 212 */ 213static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 214 int dir, struct inpcb *inp); 215static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 216 int dir, struct inpcb *inp); 217#ifdef INET6 218static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 219 int dir, struct inpcb *inp); 220static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 221 int dir, struct inpcb *inp); 222#endif 223 224static int hook_pf(void); 225static int dehook_pf(void); 226static int shutdown_pf(void); 227static int pf_load(void); 228static int pf_unload(void); 229 230static struct cdevsw pf_cdevsw = { 231 .d_ioctl = pfioctl, 232 .d_name = PF_NAME, 233 .d_version = D_VERSION, 234}; 235 236static volatile int pf_pfil_hooked = 0; 237struct mtx pf_task_mtx; 238pflog_packet_t *pflog_packet_ptr = NULL; 239 240void 241init_pf_mutex(void) 242{ 243 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF); 244} 245 246void 247destroy_pf_mutex(void) 248{ 249 mtx_destroy(&pf_task_mtx); 250} 251 252void 253init_zone_var(void) 254{ 255 pf_src_tree_pl = pf_rule_pl = NULL; 256 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 257 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 258 pf_state_scrub_pl = NULL; 259 pfr_ktable_pl = pfr_kentry_pl = NULL; 260} 261 262void 263cleanup_pf_zone(void) 264{ 265 UMA_DESTROY(pf_src_tree_pl); 266 UMA_DESTROY(pf_rule_pl); 267 UMA_DESTROY(pf_state_pl); 268 UMA_DESTROY(pf_altq_pl); 269 UMA_DESTROY(pf_pooladdr_pl); 270 UMA_DESTROY(pf_frent_pl); 271 UMA_DESTROY(pf_frag_pl); 272 UMA_DESTROY(pf_cache_pl); 273 UMA_DESTROY(pf_cent_pl); 274 UMA_DESTROY(pfr_ktable_pl); 275 UMA_DESTROY(pfr_kentry_pl2); 276 UMA_DESTROY(pfr_kentry_pl); 277 UMA_DESTROY(pf_state_scrub_pl); 278 UMA_DESTROY(pfi_addr_pl); 279} 280 281int 282pfattach(void) 283{ 284 u_int32_t *my_timeout = pf_default_rule.timeout; 285 int error = 1; 286 287 do { 288 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 289 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 290 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 291 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 292 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl"); 293 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 294 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 295 UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2"); 296 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 297 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 298 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 299 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 300 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 301 "pfstatescrub"); 302 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 303 error = 0; 304 } while(0); 305 if (error) { 306 cleanup_pf_zone(); 307 return (error); 308 } 309 pfr_initialize(); 310 pfi_initialize(); 311 if ( (error = pf_osfp_initialize()) ) { 312 cleanup_pf_zone(); 313 pf_osfp_cleanup(); 314 return (error); 315 } 316 317 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 318 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 319 pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl; 320 pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 321 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 322 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 323 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 324 pf_pool_limits[PF_LIMIT_STATES].limit); 325 326 RB_INIT(&tree_src_tracking); 327 RB_INIT(&pf_anchors); 328 pf_init_ruleset(&pf_main_ruleset); 329 TAILQ_INIT(&pf_altqs[0]); 330 TAILQ_INIT(&pf_altqs[1]); 331 TAILQ_INIT(&pf_pabuf); 332 pf_altqs_active = &pf_altqs[0]; 333 pf_altqs_inactive = &pf_altqs[1]; 334 TAILQ_INIT(&state_updates); 335 336 /* default rule should never be garbage collected */ 337 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 338 pf_default_rule.action = PF_PASS; 339 pf_default_rule.nr = -1; 340 341 /* initialize default timeouts */ 342 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 343 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 344 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 345 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 346 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 347 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 348 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 349 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 350 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 351 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 352 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 353 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 354 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 355 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 356 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 357 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 358 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 359 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 360 361 callout_init(&pf_expire_to, NET_CALLOUT_MPSAFE); 362 callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz, 363 pf_purge_timeout, &pf_expire_to); 364 365 pf_normalize_init(); 366 bzero(&pf_status, sizeof(pf_status)); 367 pf_pfil_hooked = 0; 368 369 /* XXX do our best to avoid a conflict */ 370 pf_status.hostid = arc4random(); 371 372 return (error); 373} 374#else /* !__FreeBSD__ */ 375void 376pfattach(int num) 377{ 378 u_int32_t *timeout = pf_default_rule.timeout; 379 380 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 381 &pool_allocator_nointr); 382 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 383 "pfsrctrpl", NULL); 384 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 385 NULL); 386 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 387 &pool_allocator_nointr); 388 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 389 "pfpooladdrpl", &pool_allocator_nointr); 390 pfr_initialize(); 391 pfi_initialize(); 392 pf_osfp_initialize(); 393 394 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 395 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 396 397 RB_INIT(&tree_src_tracking); 398 RB_INIT(&pf_anchors); 399 pf_init_ruleset(&pf_main_ruleset); 400 TAILQ_INIT(&pf_altqs[0]); 401 TAILQ_INIT(&pf_altqs[1]); 402 TAILQ_INIT(&pf_pabuf); 403 pf_altqs_active = &pf_altqs[0]; 404 pf_altqs_inactive = &pf_altqs[1]; 405 TAILQ_INIT(&state_updates); 406 407 /* default rule should never be garbage collected */ 408 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 409 pf_default_rule.action = PF_PASS; 410 pf_default_rule.nr = -1; 411 412 /* initialize default timeouts */ 413 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 414 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 415 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 416 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 417 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 418 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 419 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 420 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 421 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 422 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 423 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 424 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 425 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 426 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 427 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 428 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 429 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 430 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 431 432 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to); 433 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz); 434 435 pf_normalize_init(); 436 bzero(&pf_status, sizeof(pf_status)); 437 pf_status.debug = PF_DEBUG_URGENT; 438 439 /* XXX do our best to avoid a conflict */ 440 pf_status.hostid = arc4random(); 441} 442 443int 444pfopen(struct cdev *dev, int flags, int fmt, struct proc *p) 445{ 446 if (minor(dev) >= 1) 447 return (ENXIO); 448 return (0); 449} 450 451int 452pfclose(struct cdev *dev, int flags, int fmt, struct proc *p) 453{ 454 if (minor(dev) >= 1) 455 return (ENXIO); 456 return (0); 457} 458#endif /* __FreeBSD__ */ 459 460struct pf_pool * 461pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 462 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 463 u_int8_t check_ticket) 464{ 465 struct pf_ruleset *ruleset; 466 struct pf_rule *rule; 467 int rs_num; 468 469 ruleset = pf_find_ruleset(anchor); 470 if (ruleset == NULL) 471 return (NULL); 472 rs_num = pf_get_ruleset_number(rule_action); 473 if (rs_num >= PF_RULESET_MAX) 474 return (NULL); 475 if (active) { 476 if (check_ticket && ticket != 477 ruleset->rules[rs_num].active.ticket) 478 return (NULL); 479 if (r_last) 480 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 481 pf_rulequeue); 482 else 483 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 484 } else { 485 if (check_ticket && ticket != 486 ruleset->rules[rs_num].inactive.ticket) 487 return (NULL); 488 if (r_last) 489 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 490 pf_rulequeue); 491 else 492 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 493 } 494 if (!r_last) { 495 while ((rule != NULL) && (rule->nr != rule_number)) 496 rule = TAILQ_NEXT(rule, entries); 497 } 498 if (rule == NULL) 499 return (NULL); 500 501 return (&rule->rpool); 502} 503 504int 505pf_get_ruleset_number(u_int8_t action) 506{ 507 switch (action) { 508 case PF_SCRUB: 509 case PF_NOSCRUB: 510 return (PF_RULESET_SCRUB); 511 break; 512 case PF_PASS: 513 case PF_DROP: 514 return (PF_RULESET_FILTER); 515 break; 516 case PF_NAT: 517 case PF_NONAT: 518 return (PF_RULESET_NAT); 519 break; 520 case PF_BINAT: 521 case PF_NOBINAT: 522 return (PF_RULESET_BINAT); 523 break; 524 case PF_RDR: 525 case PF_NORDR: 526 return (PF_RULESET_RDR); 527 break; 528 default: 529 return (PF_RULESET_MAX); 530 break; 531 } 532} 533 534void 535pf_init_ruleset(struct pf_ruleset *ruleset) 536{ 537 int i; 538 539 memset(ruleset, 0, sizeof(struct pf_ruleset)); 540 for (i = 0; i < PF_RULESET_MAX; i++) { 541 TAILQ_INIT(&ruleset->rules[i].queues[0]); 542 TAILQ_INIT(&ruleset->rules[i].queues[1]); 543 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0]; 544 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1]; 545 } 546} 547 548struct pf_anchor * 549pf_find_anchor(const char *path) 550{ 551 static struct pf_anchor key; 552 553 memset(&key, 0, sizeof(key)); 554 strlcpy(key.path, path, sizeof(key.path)); 555 return (RB_FIND(pf_anchor_global, &pf_anchors, &key)); 556} 557 558struct pf_ruleset * 559pf_find_ruleset(const char *path) 560{ 561 struct pf_anchor *anchor; 562 563 while (*path == '/') 564 path++; 565 if (!*path) 566 return (&pf_main_ruleset); 567 anchor = pf_find_anchor(path); 568 if (anchor == NULL) 569 return (NULL); 570 else 571 return (&anchor->ruleset); 572} 573 574struct pf_ruleset * 575pf_find_or_create_ruleset(const char *path) 576{ 577 static char p[MAXPATHLEN]; 578 char *q = NULL, *r; /* make the compiler happy */ 579 struct pf_ruleset *ruleset; 580 struct pf_anchor *anchor = NULL, *dup, *parent = NULL; 581 582 while (*path == '/') 583 path++; 584 ruleset = pf_find_ruleset(path); 585 if (ruleset != NULL) 586 return (ruleset); 587 strlcpy(p, path, sizeof(p)); 588#ifdef __FreeBSD__ 589 while (parent == NULL && (q = rindex(p, '/')) != NULL) { 590#else 591 while (parent == NULL && (q = strrchr(p, '/')) != NULL) { 592#endif 593 *q = 0; 594 if ((ruleset = pf_find_ruleset(p)) != NULL) { 595 parent = ruleset->anchor; 596 break; 597 } 598 } 599 if (q == NULL) 600 q = p; 601 else 602 q++; 603 strlcpy(p, path, sizeof(p)); 604 if (!*q) 605 return (NULL); 606#ifdef __FreeBSD__ 607 while ((r = index(q, '/')) != NULL || *q) { 608#else 609 while ((r = strchr(q, '/')) != NULL || *q) { 610#endif 611 if (r != NULL) 612 *r = 0; 613 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE || 614 (parent != NULL && strlen(parent->path) >= 615 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1)) 616 return (NULL); 617 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP, 618 M_NOWAIT); 619 if (anchor == NULL) 620 return (NULL); 621 memset(anchor, 0, sizeof(*anchor)); 622 RB_INIT(&anchor->children); 623 strlcpy(anchor->name, q, sizeof(anchor->name)); 624 if (parent != NULL) { 625 strlcpy(anchor->path, parent->path, 626 sizeof(anchor->path)); 627 strlcat(anchor->path, "/", sizeof(anchor->path)); 628 } 629 strlcat(anchor->path, anchor->name, sizeof(anchor->path)); 630 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) != 631 NULL) { 632 printf("pf_find_or_create_ruleset: RB_INSERT1 " 633 "'%s' '%s' collides with '%s' '%s'\n", 634 anchor->path, anchor->name, dup->path, dup->name); 635 free(anchor, M_TEMP); 636 return (NULL); 637 } 638 if (parent != NULL) { 639 anchor->parent = parent; 640 if ((dup = RB_INSERT(pf_anchor_node, &parent->children, 641 anchor)) != NULL) { 642 printf("pf_find_or_create_ruleset: " 643 "RB_INSERT2 '%s' '%s' collides with " 644 "'%s' '%s'\n", anchor->path, anchor->name, 645 dup->path, dup->name); 646 RB_REMOVE(pf_anchor_global, &pf_anchors, 647 anchor); 648 free(anchor, M_TEMP); 649 return (NULL); 650 } 651 } 652 pf_init_ruleset(&anchor->ruleset); 653 anchor->ruleset.anchor = anchor; 654 parent = anchor; 655 if (r != NULL) 656 q = r + 1; 657 else 658 *q = 0; 659 } 660 return (&anchor->ruleset); 661} 662 663void 664pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset) 665{ 666 struct pf_anchor *parent; 667 int i; 668 669 while (ruleset != NULL) { 670 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL || 671 !RB_EMPTY(&ruleset->anchor->children) || 672 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 || 673 ruleset->topen) 674 return; 675 for (i = 0; i < PF_RULESET_MAX; ++i) 676 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) || 677 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) || 678 ruleset->rules[i].inactive.open) 679 return; 680 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor); 681 if ((parent = ruleset->anchor->parent) != NULL) 682 RB_REMOVE(pf_anchor_node, &parent->children, 683 ruleset->anchor); 684 free(ruleset->anchor, M_TEMP); 685 if (parent == NULL) 686 return; 687 ruleset = &parent->ruleset; 688 } 689} 690 691int 692pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s, 693 const char *name) 694{ 695 static char *p, path[MAXPATHLEN]; 696 struct pf_ruleset *ruleset; 697 698 r->anchor = NULL; 699 r->anchor_relative = 0; 700 r->anchor_wildcard = 0; 701 if (!name[0]) 702 return (0); 703 if (name[0] == '/') 704 strlcpy(path, name + 1, sizeof(path)); 705 else { 706 /* relative path */ 707 r->anchor_relative = 1; 708 if (s->anchor == NULL || !s->anchor->path[0]) 709 path[0] = 0; 710 else 711 strlcpy(path, s->anchor->path, sizeof(path)); 712 while (name[0] == '.' && name[1] == '.' && name[2] == '/') { 713 if (!path[0]) { 714 printf("pf_anchor_setup: .. beyond root\n"); 715 return (1); 716 } 717#ifdef __FreeBSD__ 718 if ((p = rindex(path, '/')) != NULL) 719#else 720 if ((p = strrchr(path, '/')) != NULL) 721#endif 722 *p = 0; 723 else 724 path[0] = 0; 725 r->anchor_relative++; 726 name += 3; 727 } 728 if (path[0]) 729 strlcat(path, "/", sizeof(path)); 730 strlcat(path, name, sizeof(path)); 731 } 732#ifdef __FreeBSD__ 733 if ((p = rindex(path, '/')) != NULL && !strcmp(p, "/*")) { 734#else 735 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) { 736#endif 737 r->anchor_wildcard = 1; 738 *p = 0; 739 } 740 ruleset = pf_find_or_create_ruleset(path); 741 if (ruleset == NULL || ruleset->anchor == NULL) { 742 printf("pf_anchor_setup: ruleset\n"); 743 return (1); 744 } 745 r->anchor = ruleset->anchor; 746 r->anchor->refcnt++; 747 return (0); 748} 749 750int 751pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r, 752 struct pfioc_rule *pr) 753{ 754 pr->anchor_call[0] = 0; 755 if (r->anchor == NULL) 756 return (0); 757 if (!r->anchor_relative) { 758 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call)); 759 strlcat(pr->anchor_call, r->anchor->path, 760 sizeof(pr->anchor_call)); 761 } else { 762 char a[MAXPATHLEN], b[MAXPATHLEN], *p; 763 int i; 764 765 if (rs->anchor == NULL) 766 a[0] = 0; 767 else 768 strlcpy(a, rs->anchor->path, sizeof(a)); 769 strlcpy(b, r->anchor->path, sizeof(b)); 770 for (i = 1; i < r->anchor_relative; ++i) { 771#ifdef __FreeBSD__ 772 if ((p = rindex(a, '/')) == NULL) 773#else 774 if ((p = strrchr(a, '/')) == NULL) 775#endif 776 p = a; 777 *p = 0; 778 strlcat(pr->anchor_call, "../", 779 sizeof(pr->anchor_call)); 780 } 781 if (strncmp(a, b, strlen(a))) { 782 printf("pf_anchor_copyout: '%s' '%s'\n", a, b); 783 return (1); 784 } 785 if (strlen(b) > strlen(a)) 786 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0), 787 sizeof(pr->anchor_call)); 788 } 789 if (r->anchor_wildcard) 790 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*", 791 sizeof(pr->anchor_call)); 792 return (0); 793} 794 795void 796pf_anchor_remove(struct pf_rule *r) 797{ 798 if (r->anchor == NULL) 799 return; 800 if (r->anchor->refcnt <= 0) { 801 printf("pf_anchor_remove: broken refcount"); 802 r->anchor = NULL; 803 return; 804 } 805 if (!--r->anchor->refcnt) 806 pf_remove_if_empty_ruleset(&r->anchor->ruleset); 807 r->anchor = NULL; 808} 809 810void 811pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 812{ 813 struct pf_pooladdr *mv_pool_pa; 814 815 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 816 TAILQ_REMOVE(poola, mv_pool_pa, entries); 817 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 818 } 819} 820 821void 822pf_empty_pool(struct pf_palist *poola) 823{ 824 struct pf_pooladdr *empty_pool_pa; 825 826 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 827 pfi_dynaddr_remove(&empty_pool_pa->addr); 828 pf_tbladdr_remove(&empty_pool_pa->addr); 829 pfi_detach_rule(empty_pool_pa->kif); 830 TAILQ_REMOVE(poola, empty_pool_pa, entries); 831 pool_put(&pf_pooladdr_pl, empty_pool_pa); 832 } 833} 834 835void 836pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 837{ 838 if (rulequeue != NULL) { 839 if (rule->states <= 0) { 840 /* 841 * XXX - we need to remove the table *before* detaching 842 * the rule to make sure the table code does not delete 843 * the anchor under our feet. 844 */ 845 pf_tbladdr_remove(&rule->src.addr); 846 pf_tbladdr_remove(&rule->dst.addr); 847 if (rule->overload_tbl) 848 pfr_detach_table(rule->overload_tbl); 849 } 850 TAILQ_REMOVE(rulequeue, rule, entries); 851 rule->entries.tqe_prev = NULL; 852 rule->nr = -1; 853 } 854 855 if (rule->states > 0 || rule->src_nodes > 0 || 856 rule->entries.tqe_prev != NULL) 857 return; 858 pf_tag_unref(rule->tag); 859 pf_tag_unref(rule->match_tag); 860#ifdef ALTQ 861 if (rule->pqid != rule->qid) 862 pf_qid_unref(rule->pqid); 863 pf_qid_unref(rule->qid); 864#endif 865 pf_rtlabel_remove(&rule->src.addr); 866 pf_rtlabel_remove(&rule->dst.addr); 867 pfi_dynaddr_remove(&rule->src.addr); 868 pfi_dynaddr_remove(&rule->dst.addr); 869 if (rulequeue == NULL) { 870 pf_tbladdr_remove(&rule->src.addr); 871 pf_tbladdr_remove(&rule->dst.addr); 872 if (rule->overload_tbl) 873 pfr_detach_table(rule->overload_tbl); 874 } 875 pfi_detach_rule(rule->kif); 876 pf_anchor_remove(rule); 877 pf_empty_pool(&rule->rpool.list); 878 pool_put(&pf_rule_pl, rule); 879} 880 881static u_int16_t 882tagname2tag(struct pf_tags *head, char *tagname) 883{ 884 struct pf_tagname *tag, *p = NULL; 885 u_int16_t new_tagid = 1; 886 887 TAILQ_FOREACH(tag, head, entries) 888 if (strcmp(tagname, tag->name) == 0) { 889 tag->ref++; 890 return (tag->tag); 891 } 892 893 /* 894 * to avoid fragmentation, we do a linear search from the beginning 895 * and take the first free slot we find. if there is none or the list 896 * is empty, append a new entry at the end. 897 */ 898 899 /* new entry */ 900 if (!TAILQ_EMPTY(head)) 901 for (p = TAILQ_FIRST(head); p != NULL && 902 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 903 new_tagid = p->tag + 1; 904 905 if (new_tagid > TAGID_MAX) 906 return (0); 907 908 /* allocate and fill new struct pf_tagname */ 909 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 910 M_TEMP, M_NOWAIT); 911 if (tag == NULL) 912 return (0); 913 bzero(tag, sizeof(struct pf_tagname)); 914 strlcpy(tag->name, tagname, sizeof(tag->name)); 915 tag->tag = new_tagid; 916 tag->ref++; 917 918 if (p != NULL) /* insert new entry before p */ 919 TAILQ_INSERT_BEFORE(p, tag, entries); 920 else /* either list empty or no free slot in between */ 921 TAILQ_INSERT_TAIL(head, tag, entries); 922 923 return (tag->tag); 924} 925 926static void 927tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 928{ 929 struct pf_tagname *tag; 930 931 TAILQ_FOREACH(tag, head, entries) 932 if (tag->tag == tagid) { 933 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 934 return; 935 } 936} 937 938static void 939tag_unref(struct pf_tags *head, u_int16_t tag) 940{ 941 struct pf_tagname *p, *next; 942 943 if (tag == 0) 944 return; 945 946 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 947 next = TAILQ_NEXT(p, entries); 948 if (tag == p->tag) { 949 if (--p->ref == 0) { 950 TAILQ_REMOVE(head, p, entries); 951 free(p, M_TEMP); 952 } 953 break; 954 } 955 } 956} 957 958u_int16_t 959pf_tagname2tag(char *tagname) 960{ 961 return (tagname2tag(&pf_tags, tagname)); 962} 963 964void 965pf_tag2tagname(u_int16_t tagid, char *p) 966{ 967 return (tag2tagname(&pf_tags, tagid, p)); 968} 969 970void 971pf_tag_ref(u_int16_t tag) 972{ 973 struct pf_tagname *t; 974 975 TAILQ_FOREACH(t, &pf_tags, entries) 976 if (t->tag == tag) 977 break; 978 if (t != NULL) 979 t->ref++; 980} 981 982void 983pf_tag_unref(u_int16_t tag) 984{ 985 return (tag_unref(&pf_tags, tag)); 986} 987 988int 989pf_rtlabel_add(struct pf_addr_wrap *a) 990{ 991#ifdef __FreeBSD__ 992 /* XXX_IMPORT: later */ 993 return (0); 994#else 995 if (a->type == PF_ADDR_RTLABEL && 996 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 997 return (-1); 998 return (0); 999#endif 1000} 1001 1002void 1003pf_rtlabel_remove(struct pf_addr_wrap *a) 1004{ 1005#ifdef __FreeBSD__ 1006 /* XXX_IMPORT: later */ 1007#else 1008 if (a->type == PF_ADDR_RTLABEL) 1009 rtlabel_unref(a->v.rtlabel); 1010#endif 1011} 1012 1013void 1014pf_rtlabel_copyout(struct pf_addr_wrap *a) 1015{ 1016#ifdef __FreeBSD__ 1017 /* XXX_IMPORT: later */ 1018 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 1019 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 1020#else 1021 const char *name; 1022 1023 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 1024 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 1025 strlcpy(a->v.rtlabelname, "?", 1026 sizeof(a->v.rtlabelname)); 1027 else 1028 strlcpy(a->v.rtlabelname, name, 1029 sizeof(a->v.rtlabelname)); 1030 } 1031#endif 1032} 1033 1034#ifdef ALTQ 1035u_int32_t 1036pf_qname2qid(char *qname) 1037{ 1038 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 1039} 1040 1041void 1042pf_qid2qname(u_int32_t qid, char *p) 1043{ 1044 return (tag2tagname(&pf_qids, (u_int16_t)qid, p)); 1045} 1046 1047void 1048pf_qid_unref(u_int32_t qid) 1049{ 1050 return (tag_unref(&pf_qids, (u_int16_t)qid)); 1051} 1052 1053int 1054pf_begin_altq(u_int32_t *ticket) 1055{ 1056 struct pf_altq *altq; 1057 int error = 0; 1058 1059 /* Purge the old altq list */ 1060 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1061 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1062 if (altq->qname[0] == 0) { 1063 /* detach and destroy the discipline */ 1064 error = altq_remove(altq); 1065 } else 1066 pf_qid_unref(altq->qid); 1067 pool_put(&pf_altq_pl, altq); 1068 } 1069 if (error) 1070 return (error); 1071 *ticket = ++ticket_altqs_inactive; 1072 altqs_inactive_open = 1; 1073 return (0); 1074} 1075 1076int 1077pf_rollback_altq(u_int32_t ticket) 1078{ 1079 struct pf_altq *altq; 1080 int error = 0; 1081 1082 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 1083 return (0); 1084 /* Purge the old altq list */ 1085 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1086 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1087 if (altq->qname[0] == 0) { 1088 /* detach and destroy the discipline */ 1089 error = altq_remove(altq); 1090 } else 1091 pf_qid_unref(altq->qid); 1092 pool_put(&pf_altq_pl, altq); 1093 } 1094 altqs_inactive_open = 0; 1095 return (error); 1096} 1097 1098int 1099pf_commit_altq(u_int32_t ticket) 1100{ 1101 struct pf_altqqueue *old_altqs; 1102 struct pf_altq *altq; 1103 int s, err, error = 0; 1104 1105 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 1106 return (EBUSY); 1107 1108 /* swap altqs, keep the old. */ 1109 s = splsoftnet(); 1110 old_altqs = pf_altqs_active; 1111 pf_altqs_active = pf_altqs_inactive; 1112 pf_altqs_inactive = old_altqs; 1113 ticket_altqs_active = ticket_altqs_inactive; 1114 1115 /* Attach new disciplines */ 1116 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 1117 if (altq->qname[0] == 0) { 1118 /* attach the discipline */ 1119 error = altq_pfattach(altq); 1120 if (error == 0 && pf_altq_running) 1121 error = pf_enable_altq(altq); 1122 if (error != 0) { 1123 splx(s); 1124 return (error); 1125 } 1126 } 1127 } 1128 1129 /* Purge the old altq list */ 1130 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1131 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1132 if (altq->qname[0] == 0) { 1133 /* detach and destroy the discipline */ 1134 if (pf_altq_running) 1135 error = pf_disable_altq(altq); 1136 err = altq_pfdetach(altq); 1137 if (err != 0 && error == 0) 1138 error = err; 1139 err = altq_remove(altq); 1140 if (err != 0 && error == 0) 1141 error = err; 1142 } else 1143 pf_qid_unref(altq->qid); 1144 pool_put(&pf_altq_pl, altq); 1145 } 1146 splx(s); 1147 1148 altqs_inactive_open = 0; 1149 return (error); 1150} 1151 1152int 1153pf_enable_altq(struct pf_altq *altq) 1154{ 1155 struct ifnet *ifp; 1156 struct tb_profile tb; 1157 int s, error = 0; 1158 1159 if ((ifp = ifunit(altq->ifname)) == NULL) 1160 return (EINVAL); 1161 1162 if (ifp->if_snd.altq_type != ALTQT_NONE) 1163 error = altq_enable(&ifp->if_snd); 1164 1165 /* set tokenbucket regulator */ 1166 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1167 tb.rate = altq->ifbandwidth; 1168 tb.depth = altq->tbrsize; 1169 s = splimp(); 1170#ifdef __FreeBSD__ 1171 PF_UNLOCK(); 1172#endif 1173 error = tbr_set(&ifp->if_snd, &tb); 1174#ifdef __FreeBSD__ 1175 PF_LOCK(); 1176#endif 1177 splx(s); 1178 } 1179 1180 return (error); 1181} 1182 1183int 1184pf_disable_altq(struct pf_altq *altq) 1185{ 1186 struct ifnet *ifp; 1187 struct tb_profile tb; 1188 int s, error; 1189 1190 if ((ifp = ifunit(altq->ifname)) == NULL) 1191 return (EINVAL); 1192 1193 /* 1194 * when the discipline is no longer referenced, it was overridden 1195 * by a new one. if so, just return. 1196 */ 1197 if (altq->altq_disc != ifp->if_snd.altq_disc) 1198 return (0); 1199 1200 error = altq_disable(&ifp->if_snd); 1201 1202 if (error == 0) { 1203 /* clear tokenbucket regulator */ 1204 tb.rate = 0; 1205 s = splimp(); 1206#ifdef __FreeBSD__ 1207 PF_UNLOCK(); 1208#endif 1209 error = tbr_set(&ifp->if_snd, &tb); 1210#ifdef __FreeBSD__ 1211 PF_LOCK(); 1212#endif 1213 splx(s); 1214 } 1215 1216 return (error); 1217} 1218#endif /* ALTQ */ 1219 1220int 1221pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1222{ 1223 struct pf_ruleset *rs; 1224 struct pf_rule *rule; 1225 1226 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1227 return (EINVAL); 1228 rs = pf_find_or_create_ruleset(anchor); 1229 if (rs == NULL) 1230 return (EINVAL); 1231 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 1232 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1233 *ticket = ++rs->rules[rs_num].inactive.ticket; 1234 rs->rules[rs_num].inactive.open = 1; 1235 return (0); 1236} 1237 1238int 1239pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1240{ 1241 struct pf_ruleset *rs; 1242 struct pf_rule *rule; 1243 1244 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1245 return (EINVAL); 1246 rs = pf_find_ruleset(anchor); 1247 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1248 rs->rules[rs_num].inactive.ticket != ticket) 1249 return (0); 1250 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 1251 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1252 rs->rules[rs_num].inactive.open = 0; 1253 return (0); 1254} 1255 1256int 1257pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1258{ 1259 struct pf_ruleset *rs; 1260 struct pf_rule *rule; 1261 struct pf_rulequeue *old_rules; 1262 int s; 1263 1264 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1265 return (EINVAL); 1266 rs = pf_find_ruleset(anchor); 1267 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1268 ticket != rs->rules[rs_num].inactive.ticket) 1269 return (EBUSY); 1270 1271 /* Swap rules, keep the old. */ 1272 s = splsoftnet(); 1273 old_rules = rs->rules[rs_num].active.ptr; 1274 rs->rules[rs_num].active.ptr = 1275 rs->rules[rs_num].inactive.ptr; 1276 rs->rules[rs_num].inactive.ptr = old_rules; 1277 rs->rules[rs_num].active.ticket = 1278 rs->rules[rs_num].inactive.ticket; 1279 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1280 1281 /* Purge the old rule list. */ 1282 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1283 pf_rm_rule(old_rules, rule); 1284 rs->rules[rs_num].inactive.open = 0; 1285 pf_remove_if_empty_ruleset(rs); 1286 splx(s); 1287 return (0); 1288} 1289 1290#ifdef __FreeBSD__ 1291int 1292pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1293#else 1294int 1295pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1296#endif 1297{ 1298 struct pf_pooladdr *pa = NULL; 1299 struct pf_pool *pool = NULL; 1300#ifndef __FreeBSD__ 1301 int s; 1302#endif 1303 int error = 0; 1304 1305 /* XXX keep in sync with switch() below */ 1306#ifdef __FreeBSD__ 1307 if (securelevel_gt(td->td_ucred, 2)) 1308#else 1309 if (securelevel > 1) 1310#endif 1311 switch (cmd) { 1312 case DIOCGETRULES: 1313 case DIOCGETRULE: 1314 case DIOCGETADDRS: 1315 case DIOCGETADDR: 1316 case DIOCGETSTATE: 1317 case DIOCSETSTATUSIF: 1318 case DIOCGETSTATUS: 1319 case DIOCCLRSTATUS: 1320 case DIOCNATLOOK: 1321 case DIOCSETDEBUG: 1322 case DIOCGETSTATES: 1323 case DIOCGETTIMEOUT: 1324 case DIOCCLRRULECTRS: 1325 case DIOCGETLIMIT: 1326 case DIOCGETALTQS: 1327 case DIOCGETALTQ: 1328 case DIOCGETQSTATS: 1329 case DIOCGETRULESETS: 1330 case DIOCGETRULESET: 1331 case DIOCRGETTABLES: 1332 case DIOCRGETTSTATS: 1333 case DIOCRCLRTSTATS: 1334 case DIOCRCLRADDRS: 1335 case DIOCRADDADDRS: 1336 case DIOCRDELADDRS: 1337 case DIOCRSETADDRS: 1338 case DIOCRGETADDRS: 1339 case DIOCRGETASTATS: 1340 case DIOCRCLRASTATS: 1341 case DIOCRTSTADDRS: 1342 case DIOCOSFPGET: 1343 case DIOCGETSRCNODES: 1344 case DIOCCLRSRCNODES: 1345 case DIOCIGETIFACES: 1346 case DIOCICLRISTATS: 1347#ifdef __FreeBSD__ 1348 case DIOCGIFSPEED: 1349#endif 1350 case DIOCSETIFFLAG: 1351 case DIOCCLRIFFLAG: 1352 break; 1353 case DIOCRCLRTABLES: 1354 case DIOCRADDTABLES: 1355 case DIOCRDELTABLES: 1356 case DIOCRSETTFLAGS: 1357 if (((struct pfioc_table *)addr)->pfrio_flags & 1358 PFR_FLAG_DUMMY) 1359 break; /* dummy operation ok */ 1360 return (EPERM); 1361 default: 1362 return (EPERM); 1363 } 1364 1365 if (!(flags & FWRITE)) 1366 switch (cmd) { 1367 case DIOCGETRULES: 1368 case DIOCGETRULE: 1369 case DIOCGETADDRS: 1370 case DIOCGETADDR: 1371 case DIOCGETSTATE: 1372 case DIOCGETSTATUS: 1373 case DIOCGETSTATES: 1374 case DIOCGETTIMEOUT: 1375 case DIOCGETLIMIT: 1376 case DIOCGETALTQS: 1377 case DIOCGETALTQ: 1378 case DIOCGETQSTATS: 1379 case DIOCGETRULESETS: 1380 case DIOCGETRULESET: 1381 case DIOCRGETTABLES: 1382 case DIOCRGETTSTATS: 1383 case DIOCRGETADDRS: 1384 case DIOCRGETASTATS: 1385 case DIOCRTSTADDRS: 1386 case DIOCOSFPGET: 1387 case DIOCGETSRCNODES: 1388 case DIOCIGETIFACES: 1389#ifdef __FreeBSD__ 1390 case DIOCGIFSPEED: 1391#endif 1392 break; 1393 case DIOCRCLRTABLES: 1394 case DIOCRADDTABLES: 1395 case DIOCRDELTABLES: 1396 case DIOCRCLRTSTATS: 1397 case DIOCRCLRADDRS: 1398 case DIOCRADDADDRS: 1399 case DIOCRDELADDRS: 1400 case DIOCRSETADDRS: 1401 case DIOCRSETTFLAGS: 1402 if (((struct pfioc_table *)addr)->pfrio_flags & 1403 PFR_FLAG_DUMMY) 1404 break; /* dummy operation ok */ 1405 return (EACCES); 1406 default: 1407 return (EACCES); 1408 } 1409 1410#ifdef __FreeBSD__ 1411 PF_LOCK(); 1412#else 1413 s = splsoftnet(); 1414#endif 1415 switch (cmd) { 1416 1417 case DIOCSTART: 1418 if (pf_status.running) 1419 error = EEXIST; 1420 else { 1421#ifdef __FreeBSD__ 1422 PF_UNLOCK(); 1423 error = hook_pf(); 1424 PF_LOCK(); 1425 if (error) { 1426 DPFPRINTF(PF_DEBUG_MISC, 1427 ("pf: pfil registeration fail\n")); 1428 break; 1429 } 1430#endif 1431 pf_status.running = 1; 1432 pf_status.since = time_second; 1433 if (pf_status.stateid == 0) { 1434 pf_status.stateid = time_second; 1435 pf_status.stateid = pf_status.stateid << 32; 1436 } 1437 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1438 } 1439 break; 1440 1441 case DIOCSTOP: 1442 if (!pf_status.running) 1443 error = ENOENT; 1444 else { 1445 pf_status.running = 0; 1446#ifdef __FreeBSD__ 1447 PF_UNLOCK(); 1448 error = dehook_pf(); 1449 PF_LOCK(); 1450 if (error) { 1451 pf_status.running = 1; 1452 DPFPRINTF(PF_DEBUG_MISC, 1453 ("pf: pfil unregisteration failed\n")); 1454 } 1455#endif 1456 pf_status.since = time_second; 1457 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1458 } 1459 break; 1460 1461 case DIOCADDRULE: { 1462 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1463 struct pf_ruleset *ruleset; 1464 struct pf_rule *rule, *tail; 1465 struct pf_pooladdr *pa; 1466 int rs_num; 1467 1468 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1469 ruleset = pf_find_ruleset(pr->anchor); 1470 if (ruleset == NULL) { 1471 error = EINVAL; 1472 break; 1473 } 1474 rs_num = pf_get_ruleset_number(pr->rule.action); 1475 if (rs_num >= PF_RULESET_MAX) { 1476 error = EINVAL; 1477 break; 1478 } 1479 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1480 error = EINVAL; 1481 break; 1482 } 1483 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1484#ifdef __FreeBSD__ 1485 DPFPRINTF(PF_DEBUG_MISC, 1486 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num, 1487 ruleset->rules[rs_num].inactive.ticket)); 1488#endif 1489 error = EBUSY; 1490 break; 1491 } 1492 if (pr->pool_ticket != ticket_pabuf) { 1493#ifdef __FreeBSD__ 1494 DPFPRINTF(PF_DEBUG_MISC, 1495 ("pool_ticket: %d != %d\n", pr->pool_ticket, 1496 ticket_pabuf)); 1497#endif 1498 error = EBUSY; 1499 break; 1500 } 1501 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1502 if (rule == NULL) { 1503 error = ENOMEM; 1504 break; 1505 } 1506 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1507 rule->anchor = NULL; 1508 rule->kif = NULL; 1509 TAILQ_INIT(&rule->rpool.list); 1510 /* initialize refcounting */ 1511 rule->states = 0; 1512 rule->src_nodes = 0; 1513 rule->entries.tqe_prev = NULL; 1514#ifndef INET 1515 if (rule->af == AF_INET) { 1516 pool_put(&pf_rule_pl, rule); 1517 error = EAFNOSUPPORT; 1518 break; 1519 } 1520#endif /* INET */ 1521#ifndef INET6 1522 if (rule->af == AF_INET6) { 1523 pool_put(&pf_rule_pl, rule); 1524 error = EAFNOSUPPORT; 1525 break; 1526 } 1527#endif /* INET6 */ 1528 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1529 pf_rulequeue); 1530 if (tail) 1531 rule->nr = tail->nr + 1; 1532 else 1533 rule->nr = 0; 1534 if (rule->ifname[0]) { 1535 rule->kif = pfi_attach_rule(rule->ifname); 1536 if (rule->kif == NULL) { 1537 pool_put(&pf_rule_pl, rule); 1538 error = EINVAL; 1539 break; 1540 } 1541 } 1542 1543#ifdef ALTQ 1544 /* set queue IDs */ 1545 if (rule->qname[0] != 0) { 1546 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1547 error = EBUSY; 1548 else if (rule->pqname[0] != 0) { 1549 if ((rule->pqid = 1550 pf_qname2qid(rule->pqname)) == 0) 1551 error = EBUSY; 1552 } else 1553 rule->pqid = rule->qid; 1554 } 1555#endif 1556 if (rule->tagname[0]) 1557 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1558 error = EBUSY; 1559 if (rule->match_tagname[0]) 1560 if ((rule->match_tag = 1561 pf_tagname2tag(rule->match_tagname)) == 0) 1562 error = EBUSY; 1563 if (rule->rt && !rule->direction) 1564 error = EINVAL; 1565 if (pf_rtlabel_add(&rule->src.addr) || 1566 pf_rtlabel_add(&rule->dst.addr)) 1567 error = EBUSY; 1568 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1569 error = EINVAL; 1570 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1571 error = EINVAL; 1572 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1573 error = EINVAL; 1574 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1575 error = EINVAL; 1576 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1577 error = EINVAL; 1578 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1579 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1580 error = EINVAL; 1581 1582 if (rule->overload_tblname[0]) { 1583 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1584 rule->overload_tblname)) == NULL) 1585 error = EINVAL; 1586 else 1587 rule->overload_tbl->pfrkt_flags |= 1588 PFR_TFLAG_ACTIVE; 1589 } 1590 1591 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1592 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1593 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1594 (rule->rt > PF_FASTROUTE)) && 1595 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1596 error = EINVAL; 1597 1598 if (error) { 1599 pf_rm_rule(NULL, rule); 1600 break; 1601 } 1602 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1603 rule->evaluations = rule->packets = rule->bytes = 0; 1604 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1605 rule, entries); 1606 break; 1607 } 1608 1609 case DIOCGETRULES: { 1610 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1611 struct pf_ruleset *ruleset; 1612 struct pf_rule *tail; 1613 int rs_num; 1614 1615 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1616 ruleset = pf_find_ruleset(pr->anchor); 1617 if (ruleset == NULL) { 1618 error = EINVAL; 1619 break; 1620 } 1621 rs_num = pf_get_ruleset_number(pr->rule.action); 1622 if (rs_num >= PF_RULESET_MAX) { 1623 error = EINVAL; 1624 break; 1625 } 1626 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1627 pf_rulequeue); 1628 if (tail) 1629 pr->nr = tail->nr + 1; 1630 else 1631 pr->nr = 0; 1632 pr->ticket = ruleset->rules[rs_num].active.ticket; 1633 break; 1634 } 1635 1636 case DIOCGETRULE: { 1637 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1638 struct pf_ruleset *ruleset; 1639 struct pf_rule *rule; 1640 int rs_num, i; 1641 1642 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1643 ruleset = pf_find_ruleset(pr->anchor); 1644 if (ruleset == NULL) { 1645 error = EINVAL; 1646 break; 1647 } 1648 rs_num = pf_get_ruleset_number(pr->rule.action); 1649 if (rs_num >= PF_RULESET_MAX) { 1650 error = EINVAL; 1651 break; 1652 } 1653 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1654 error = EBUSY; 1655 break; 1656 } 1657 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1658 while ((rule != NULL) && (rule->nr != pr->nr)) 1659 rule = TAILQ_NEXT(rule, entries); 1660 if (rule == NULL) { 1661 error = EBUSY; 1662 break; 1663 } 1664 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1665 if (pf_anchor_copyout(ruleset, rule, pr)) { 1666 error = EBUSY; 1667 break; 1668 } 1669 pfi_dynaddr_copyout(&pr->rule.src.addr); 1670 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1671 pf_tbladdr_copyout(&pr->rule.src.addr); 1672 pf_tbladdr_copyout(&pr->rule.dst.addr); 1673 pf_rtlabel_copyout(&pr->rule.src.addr); 1674 pf_rtlabel_copyout(&pr->rule.dst.addr); 1675 for (i = 0; i < PF_SKIP_COUNT; ++i) 1676 if (rule->skip[i].ptr == NULL) 1677 pr->rule.skip[i].nr = -1; 1678 else 1679 pr->rule.skip[i].nr = 1680 rule->skip[i].ptr->nr; 1681 break; 1682 } 1683 1684 case DIOCCHANGERULE: { 1685 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1686 struct pf_ruleset *ruleset; 1687 struct pf_rule *oldrule = NULL, *newrule = NULL; 1688 u_int32_t nr = 0; 1689 int rs_num; 1690 1691 if (!(pcr->action == PF_CHANGE_REMOVE || 1692 pcr->action == PF_CHANGE_GET_TICKET) && 1693 pcr->pool_ticket != ticket_pabuf) { 1694 error = EBUSY; 1695 break; 1696 } 1697 1698 if (pcr->action < PF_CHANGE_ADD_HEAD || 1699 pcr->action > PF_CHANGE_GET_TICKET) { 1700 error = EINVAL; 1701 break; 1702 } 1703 ruleset = pf_find_ruleset(pcr->anchor); 1704 if (ruleset == NULL) { 1705 error = EINVAL; 1706 break; 1707 } 1708 rs_num = pf_get_ruleset_number(pcr->rule.action); 1709 if (rs_num >= PF_RULESET_MAX) { 1710 error = EINVAL; 1711 break; 1712 } 1713 1714 if (pcr->action == PF_CHANGE_GET_TICKET) { 1715 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1716 break; 1717 } else { 1718 if (pcr->ticket != 1719 ruleset->rules[rs_num].active.ticket) { 1720 error = EINVAL; 1721 break; 1722 } 1723 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1724 error = EINVAL; 1725 break; 1726 } 1727 } 1728 1729 if (pcr->action != PF_CHANGE_REMOVE) { 1730 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1731 if (newrule == NULL) { 1732 error = ENOMEM; 1733 break; 1734 } 1735 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1736 TAILQ_INIT(&newrule->rpool.list); 1737 /* initialize refcounting */ 1738 newrule->states = 0; 1739 newrule->entries.tqe_prev = NULL; 1740#ifndef INET 1741 if (newrule->af == AF_INET) { 1742 pool_put(&pf_rule_pl, newrule); 1743 error = EAFNOSUPPORT; 1744 break; 1745 } 1746#endif /* INET */ 1747#ifndef INET6 1748 if (newrule->af == AF_INET6) { 1749 pool_put(&pf_rule_pl, newrule); 1750 error = EAFNOSUPPORT; 1751 break; 1752 } 1753#endif /* INET6 */ 1754 if (newrule->ifname[0]) { 1755 newrule->kif = pfi_attach_rule(newrule->ifname); 1756 if (newrule->kif == NULL) { 1757 pool_put(&pf_rule_pl, newrule); 1758 error = EINVAL; 1759 break; 1760 } 1761 } else 1762 newrule->kif = NULL; 1763 1764#ifdef ALTQ 1765 /* set queue IDs */ 1766 if (newrule->qname[0] != 0) { 1767 if ((newrule->qid = 1768 pf_qname2qid(newrule->qname)) == 0) 1769 error = EBUSY; 1770 else if (newrule->pqname[0] != 0) { 1771 if ((newrule->pqid = 1772 pf_qname2qid(newrule->pqname)) == 0) 1773 error = EBUSY; 1774 } else 1775 newrule->pqid = newrule->qid; 1776 } 1777#endif /* ALTQ */ 1778 if (newrule->tagname[0]) 1779 if ((newrule->tag = 1780 pf_tagname2tag(newrule->tagname)) == 0) 1781 error = EBUSY; 1782 if (newrule->match_tagname[0]) 1783 if ((newrule->match_tag = pf_tagname2tag( 1784 newrule->match_tagname)) == 0) 1785 error = EBUSY; 1786 if (newrule->rt && !newrule->direction) 1787 error = EINVAL; 1788 if (pf_rtlabel_add(&newrule->src.addr) || 1789 pf_rtlabel_add(&newrule->dst.addr)) 1790 error = EBUSY; 1791 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1792 error = EINVAL; 1793 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1794 error = EINVAL; 1795 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1796 error = EINVAL; 1797 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1798 error = EINVAL; 1799 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1800 error = EINVAL; 1801 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1802 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1803 error = EINVAL; 1804 1805 if (newrule->overload_tblname[0]) { 1806 if ((newrule->overload_tbl = pfr_attach_table( 1807 ruleset, newrule->overload_tblname)) == 1808 NULL) 1809 error = EINVAL; 1810 else 1811 newrule->overload_tbl->pfrkt_flags |= 1812 PFR_TFLAG_ACTIVE; 1813 } 1814 1815 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1816 if (((((newrule->action == PF_NAT) || 1817 (newrule->action == PF_RDR) || 1818 (newrule->action == PF_BINAT) || 1819 (newrule->rt > PF_FASTROUTE)) && 1820 !newrule->anchor)) && 1821 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1822 error = EINVAL; 1823 1824 if (error) { 1825 pf_rm_rule(NULL, newrule); 1826 break; 1827 } 1828 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1829 newrule->evaluations = newrule->packets = 0; 1830 newrule->bytes = 0; 1831 } 1832 pf_empty_pool(&pf_pabuf); 1833 1834 if (pcr->action == PF_CHANGE_ADD_HEAD) 1835 oldrule = TAILQ_FIRST( 1836 ruleset->rules[rs_num].active.ptr); 1837 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1838 oldrule = TAILQ_LAST( 1839 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1840 else { 1841 oldrule = TAILQ_FIRST( 1842 ruleset->rules[rs_num].active.ptr); 1843 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1844 oldrule = TAILQ_NEXT(oldrule, entries); 1845 if (oldrule == NULL) { 1846 if (newrule != NULL) 1847 pf_rm_rule(NULL, newrule); 1848 error = EINVAL; 1849 break; 1850 } 1851 } 1852 1853 if (pcr->action == PF_CHANGE_REMOVE) 1854 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1855 else { 1856 if (oldrule == NULL) 1857 TAILQ_INSERT_TAIL( 1858 ruleset->rules[rs_num].active.ptr, 1859 newrule, entries); 1860 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1861 pcr->action == PF_CHANGE_ADD_BEFORE) 1862 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1863 else 1864 TAILQ_INSERT_AFTER( 1865 ruleset->rules[rs_num].active.ptr, 1866 oldrule, newrule, entries); 1867 } 1868 1869 nr = 0; 1870 TAILQ_FOREACH(oldrule, 1871 ruleset->rules[rs_num].active.ptr, entries) 1872 oldrule->nr = nr++; 1873 1874 ruleset->rules[rs_num].active.ticket++; 1875 1876 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1877 pf_remove_if_empty_ruleset(ruleset); 1878 1879 break; 1880 } 1881 1882 case DIOCCLRSTATES: { 1883 struct pf_state *state; 1884 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1885 int killed = 0; 1886 1887 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1888 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1889 state->u.s.kif->pfik_name)) { 1890 state->timeout = PFTM_PURGE; 1891#if NPFSYNC 1892 /* don't send out individual delete messages */ 1893 state->sync_flags = PFSTATE_NOSYNC; 1894#endif 1895 killed++; 1896 } 1897 } 1898 pf_purge_expired_states(); 1899 pf_status.states = 0; 1900 psk->psk_af = killed; 1901#if NPFSYNC 1902 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1903#endif 1904 break; 1905 } 1906 1907 case DIOCKILLSTATES: { 1908 struct pf_state *state; 1909 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1910 int killed = 0; 1911 1912 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1913 if ((!psk->psk_af || state->af == psk->psk_af) 1914 && (!psk->psk_proto || psk->psk_proto == 1915 state->proto) && 1916 PF_MATCHA(psk->psk_src.neg, 1917 &psk->psk_src.addr.v.a.addr, 1918 &psk->psk_src.addr.v.a.mask, 1919 &state->lan.addr, state->af) && 1920 PF_MATCHA(psk->psk_dst.neg, 1921 &psk->psk_dst.addr.v.a.addr, 1922 &psk->psk_dst.addr.v.a.mask, 1923 &state->ext.addr, state->af) && 1924 (psk->psk_src.port_op == 0 || 1925 pf_match_port(psk->psk_src.port_op, 1926 psk->psk_src.port[0], psk->psk_src.port[1], 1927 state->lan.port)) && 1928 (psk->psk_dst.port_op == 0 || 1929 pf_match_port(psk->psk_dst.port_op, 1930 psk->psk_dst.port[0], psk->psk_dst.port[1], 1931 state->ext.port)) && 1932 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1933 state->u.s.kif->pfik_name))) { 1934 state->timeout = PFTM_PURGE; 1935 killed++; 1936 } 1937 } 1938 pf_purge_expired_states(); 1939 psk->psk_af = killed; 1940 break; 1941 } 1942 1943 case DIOCADDSTATE: { 1944 struct pfioc_state *ps = (struct pfioc_state *)addr; 1945 struct pf_state *state; 1946 struct pfi_kif *kif; 1947 1948 if (ps->state.timeout >= PFTM_MAX && 1949 ps->state.timeout != PFTM_UNTIL_PACKET) { 1950 error = EINVAL; 1951 break; 1952 } 1953 state = pool_get(&pf_state_pl, PR_NOWAIT); 1954 if (state == NULL) { 1955 error = ENOMEM; 1956 break; 1957 } 1958 kif = pfi_lookup_create(ps->state.u.ifname); 1959 if (kif == NULL) { 1960 pool_put(&pf_state_pl, state); 1961 error = ENOENT; 1962 break; 1963 } 1964 bcopy(&ps->state, state, sizeof(struct pf_state)); 1965 bzero(&state->u, sizeof(state->u)); 1966 state->rule.ptr = &pf_default_rule; 1967 state->nat_rule.ptr = NULL; 1968 state->anchor.ptr = NULL; 1969 state->rt_kif = NULL; 1970 state->creation = time_second; 1971 state->pfsync_time = 0; 1972 state->packets[0] = state->packets[1] = 0; 1973 state->bytes[0] = state->bytes[1] = 0; 1974 1975 if (pf_insert_state(kif, state)) { 1976 pfi_maybe_destroy(kif); 1977 pool_put(&pf_state_pl, state); 1978 error = ENOMEM; 1979 } 1980 break; 1981 } 1982 1983 case DIOCGETSTATE: { 1984 struct pfioc_state *ps = (struct pfioc_state *)addr; 1985 struct pf_state *state; 1986 u_int32_t nr; 1987 1988 nr = 0; 1989 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1990 if (nr >= ps->nr) 1991 break; 1992 nr++; 1993 } 1994 if (state == NULL) { 1995 error = EBUSY; 1996 break; 1997 } 1998 bcopy(state, &ps->state, sizeof(struct pf_state)); 1999 ps->state.rule.nr = state->rule.ptr->nr; 2000 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ? 2001 -1 : state->nat_rule.ptr->nr; 2002 ps->state.anchor.nr = (state->anchor.ptr == NULL) ? 2003 -1 : state->anchor.ptr->nr; 2004 ps->state.expire = pf_state_expires(state); 2005 if (ps->state.expire > time_second) 2006 ps->state.expire -= time_second; 2007 else 2008 ps->state.expire = 0; 2009 break; 2010 } 2011 2012 case DIOCGETSTATES: { 2013 struct pfioc_states *ps = (struct pfioc_states *)addr; 2014 struct pf_state *state; 2015 struct pf_state *p, pstore; 2016 struct pfi_kif *kif; 2017 u_int32_t nr = 0; 2018 int space = ps->ps_len; 2019 2020 if (space == 0) { 2021 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 2022 nr += kif->pfik_states; 2023 ps->ps_len = sizeof(struct pf_state) * nr; 2024 break; 2025 } 2026 2027 p = ps->ps_states; 2028 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 2029 RB_FOREACH(state, pf_state_tree_ext_gwy, 2030 &kif->pfik_ext_gwy) { 2031 int secs = time_second; 2032 2033 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 2034 break; 2035 2036 bcopy(state, &pstore, sizeof(pstore)); 2037 strlcpy(pstore.u.ifname, kif->pfik_name, 2038 sizeof(pstore.u.ifname)); 2039 pstore.rule.nr = state->rule.ptr->nr; 2040 pstore.nat_rule.nr = (state->nat_rule.ptr == 2041 NULL) ? -1 : state->nat_rule.ptr->nr; 2042 pstore.anchor.nr = (state->anchor.ptr == 2043 NULL) ? -1 : state->anchor.ptr->nr; 2044 pstore.creation = secs - pstore.creation; 2045 pstore.expire = pf_state_expires(state); 2046 if (pstore.expire > secs) 2047 pstore.expire -= secs; 2048 else 2049 pstore.expire = 0; 2050#ifdef __FreeBSD__ 2051 PF_COPYOUT(&pstore, p, sizeof(*p), error); 2052#else 2053 error = copyout(&pstore, p, sizeof(*p)); 2054#endif 2055 if (error) 2056 goto fail; 2057 p++; 2058 nr++; 2059 } 2060 ps->ps_len = sizeof(struct pf_state) * nr; 2061 break; 2062 } 2063 2064 case DIOCGETSTATUS: { 2065 struct pf_status *s = (struct pf_status *)addr; 2066 bcopy(&pf_status, s, sizeof(struct pf_status)); 2067 pfi_fill_oldstatus(s); 2068 break; 2069 } 2070 2071 case DIOCSETSTATUSIF: { 2072 struct pfioc_if *pi = (struct pfioc_if *)addr; 2073 2074 if (pi->ifname[0] == 0) { 2075 bzero(pf_status.ifname, IFNAMSIZ); 2076 break; 2077 } 2078 if (ifunit(pi->ifname) == NULL) { 2079 error = EINVAL; 2080 break; 2081 } 2082 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2083 break; 2084 } 2085 2086 case DIOCCLRSTATUS: { 2087 bzero(pf_status.counters, sizeof(pf_status.counters)); 2088 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2089 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2090 if (*pf_status.ifname) 2091 pfi_clr_istats(pf_status.ifname, NULL, 2092 PFI_FLAG_INSTANCE); 2093 break; 2094 } 2095 2096 case DIOCNATLOOK: { 2097 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2098 struct pf_state *state; 2099 struct pf_state key; 2100 int m = 0, direction = pnl->direction; 2101 2102 key.af = pnl->af; 2103 key.proto = pnl->proto; 2104 2105 if (!pnl->proto || 2106 PF_AZERO(&pnl->saddr, pnl->af) || 2107 PF_AZERO(&pnl->daddr, pnl->af) || 2108 !pnl->dport || !pnl->sport) 2109 error = EINVAL; 2110 else { 2111 /* 2112 * userland gives us source and dest of connection, 2113 * reverse the lookup so we ask for what happens with 2114 * the return traffic, enabling us to find it in the 2115 * state tree. 2116 */ 2117 if (direction == PF_IN) { 2118 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2119 key.ext.port = pnl->dport; 2120 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2121 key.gwy.port = pnl->sport; 2122 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2123 } else { 2124 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2125 key.lan.port = pnl->dport; 2126 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2127 key.ext.port = pnl->sport; 2128 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2129 } 2130 if (m > 1) 2131 error = E2BIG; /* more than one state */ 2132 else if (state != NULL) { 2133 if (direction == PF_IN) { 2134 PF_ACPY(&pnl->rsaddr, &state->lan.addr, 2135 state->af); 2136 pnl->rsport = state->lan.port; 2137 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2138 pnl->af); 2139 pnl->rdport = pnl->dport; 2140 } else { 2141 PF_ACPY(&pnl->rdaddr, &state->gwy.addr, 2142 state->af); 2143 pnl->rdport = state->gwy.port; 2144 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2145 pnl->af); 2146 pnl->rsport = pnl->sport; 2147 } 2148 } else 2149 error = ENOENT; 2150 } 2151 break; 2152 } 2153 2154 case DIOCSETTIMEOUT: { 2155 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2156 int old; 2157 2158 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2159 pt->seconds < 0) { 2160 error = EINVAL; 2161 goto fail; 2162 } 2163 old = pf_default_rule.timeout[pt->timeout]; 2164 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2165 pt->seconds = old; 2166 break; 2167 } 2168 2169 case DIOCGETTIMEOUT: { 2170 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2171 2172 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2173 error = EINVAL; 2174 goto fail; 2175 } 2176 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2177 break; 2178 } 2179 2180 case DIOCGETLIMIT: { 2181 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2182 2183 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2184 error = EINVAL; 2185 goto fail; 2186 } 2187 pl->limit = pf_pool_limits[pl->index].limit; 2188 break; 2189 } 2190 2191 case DIOCSETLIMIT: { 2192 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2193 int old_limit; 2194 2195 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2196 pf_pool_limits[pl->index].pp == NULL) { 2197 error = EINVAL; 2198 goto fail; 2199 } 2200#ifdef __FreeBSD__ 2201 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit); 2202#else 2203 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2204 pl->limit, NULL, 0) != 0) { 2205 error = EBUSY; 2206 goto fail; 2207 } 2208#endif 2209 old_limit = pf_pool_limits[pl->index].limit; 2210 pf_pool_limits[pl->index].limit = pl->limit; 2211 pl->limit = old_limit; 2212 break; 2213 } 2214 2215 case DIOCSETDEBUG: { 2216 u_int32_t *level = (u_int32_t *)addr; 2217 2218 pf_status.debug = *level; 2219 break; 2220 } 2221 2222 case DIOCCLRRULECTRS: { 2223 struct pf_ruleset *ruleset = &pf_main_ruleset; 2224 struct pf_rule *rule; 2225 2226 TAILQ_FOREACH(rule, 2227 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) 2228 rule->evaluations = rule->packets = 2229 rule->bytes = 0; 2230 break; 2231 } 2232 2233#ifdef __FreeBSD__ 2234 case DIOCGIFSPEED: { 2235 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2236 struct pf_ifspeed ps; 2237 struct ifnet *ifp; 2238 2239 if (psp->ifname[0] != 0) { 2240 /* Can we completely trust user-land? */ 2241 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2242 ifp = ifunit(ps.ifname); 2243 if (ifp != NULL) 2244 psp->baudrate = ifp->if_baudrate; 2245 else 2246 error = EINVAL; 2247 } else 2248 error = EINVAL; 2249 break; 2250 } 2251#endif /* __FreeBSD__ */ 2252 2253#ifdef ALTQ 2254 case DIOCSTARTALTQ: { 2255 struct pf_altq *altq; 2256 2257 /* enable all altq interfaces on active list */ 2258 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2259 if (altq->qname[0] == 0) { 2260 error = pf_enable_altq(altq); 2261 if (error != 0) 2262 break; 2263 } 2264 } 2265 if (error == 0) 2266 pf_altq_running = 1; 2267 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2268 break; 2269 } 2270 2271 case DIOCSTOPALTQ: { 2272 struct pf_altq *altq; 2273 2274 /* disable all altq interfaces on active list */ 2275 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2276 if (altq->qname[0] == 0) { 2277 error = pf_disable_altq(altq); 2278 if (error != 0) 2279 break; 2280 } 2281 } 2282 if (error == 0) 2283 pf_altq_running = 0; 2284 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2285 break; 2286 } 2287 2288 case DIOCADDALTQ: { 2289 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2290 struct pf_altq *altq, *a; 2291 2292 if (pa->ticket != ticket_altqs_inactive) { 2293 error = EBUSY; 2294 break; 2295 } 2296 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2297 if (altq == NULL) { 2298 error = ENOMEM; 2299 break; 2300 } 2301 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2302 2303 /* 2304 * if this is for a queue, find the discipline and 2305 * copy the necessary fields 2306 */ 2307 if (altq->qname[0] != 0) { 2308 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2309 error = EBUSY; 2310 pool_put(&pf_altq_pl, altq); 2311 break; 2312 } 2313 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2314 if (strncmp(a->ifname, altq->ifname, 2315 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2316 altq->altq_disc = a->altq_disc; 2317 break; 2318 } 2319 } 2320 } 2321 2322#ifdef __FreeBSD__ 2323 PF_UNLOCK(); 2324#endif 2325 error = altq_add(altq); 2326#ifdef __FreeBSD__ 2327 PF_LOCK(); 2328#endif 2329 if (error) { 2330 pool_put(&pf_altq_pl, altq); 2331 break; 2332 } 2333 2334 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2335 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2336 break; 2337 } 2338 2339 case DIOCGETALTQS: { 2340 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2341 struct pf_altq *altq; 2342 2343 pa->nr = 0; 2344 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2345 pa->nr++; 2346 pa->ticket = ticket_altqs_active; 2347 break; 2348 } 2349 2350 case DIOCGETALTQ: { 2351 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2352 struct pf_altq *altq; 2353 u_int32_t nr; 2354 2355 if (pa->ticket != ticket_altqs_active) { 2356 error = EBUSY; 2357 break; 2358 } 2359 nr = 0; 2360 altq = TAILQ_FIRST(pf_altqs_active); 2361 while ((altq != NULL) && (nr < pa->nr)) { 2362 altq = TAILQ_NEXT(altq, entries); 2363 nr++; 2364 } 2365 if (altq == NULL) { 2366 error = EBUSY; 2367 break; 2368 } 2369 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2370 break; 2371 } 2372 2373 case DIOCCHANGEALTQ: 2374 /* CHANGEALTQ not supported yet! */ 2375 error = ENODEV; 2376 break; 2377 2378 case DIOCGETQSTATS: { 2379 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2380 struct pf_altq *altq; 2381 u_int32_t nr; 2382 int nbytes; 2383 2384 if (pq->ticket != ticket_altqs_active) { 2385 error = EBUSY; 2386 break; 2387 } 2388 nbytes = pq->nbytes; 2389 nr = 0; 2390 altq = TAILQ_FIRST(pf_altqs_active); 2391 while ((altq != NULL) && (nr < pq->nr)) { 2392 altq = TAILQ_NEXT(altq, entries); 2393 nr++; 2394 } 2395 if (altq == NULL) { 2396 error = EBUSY; 2397 break; 2398 } 2399#ifdef __FreeBSD__ 2400 PF_UNLOCK(); 2401#endif 2402 error = altq_getqstats(altq, pq->buf, &nbytes); 2403#ifdef __FreeBSD__ 2404 PF_LOCK(); 2405#endif 2406 if (error == 0) { 2407 pq->scheduler = altq->scheduler; 2408 pq->nbytes = nbytes; 2409 } 2410 break; 2411 } 2412#endif /* ALTQ */ 2413 2414 case DIOCBEGINADDRS: { 2415 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2416 2417 pf_empty_pool(&pf_pabuf); 2418 pp->ticket = ++ticket_pabuf; 2419 break; 2420 } 2421 2422 case DIOCADDADDR: { 2423 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2424 2425 if (pp->ticket != ticket_pabuf) { 2426 error = EBUSY; 2427 break; 2428 } 2429#ifndef INET 2430 if (pp->af == AF_INET) { 2431 error = EAFNOSUPPORT; 2432 break; 2433 } 2434#endif /* INET */ 2435#ifndef INET6 2436 if (pp->af == AF_INET6) { 2437 error = EAFNOSUPPORT; 2438 break; 2439 } 2440#endif /* INET6 */ 2441 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2442 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2443 pp->addr.addr.type != PF_ADDR_TABLE) { 2444 error = EINVAL; 2445 break; 2446 } 2447 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2448 if (pa == NULL) { 2449 error = ENOMEM; 2450 break; 2451 } 2452 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2453 if (pa->ifname[0]) { 2454 pa->kif = pfi_attach_rule(pa->ifname); 2455 if (pa->kif == NULL) { 2456 pool_put(&pf_pooladdr_pl, pa); 2457 error = EINVAL; 2458 break; 2459 } 2460 } 2461 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2462 pfi_dynaddr_remove(&pa->addr); 2463 pfi_detach_rule(pa->kif); 2464 pool_put(&pf_pooladdr_pl, pa); 2465 error = EINVAL; 2466 break; 2467 } 2468 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2469 break; 2470 } 2471 2472 case DIOCGETADDRS: { 2473 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2474 2475 pp->nr = 0; 2476 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2477 pp->r_num, 0, 1, 0); 2478 if (pool == NULL) { 2479 error = EBUSY; 2480 break; 2481 } 2482 TAILQ_FOREACH(pa, &pool->list, entries) 2483 pp->nr++; 2484 break; 2485 } 2486 2487 case DIOCGETADDR: { 2488 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2489 u_int32_t nr = 0; 2490 2491 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2492 pp->r_num, 0, 1, 1); 2493 if (pool == NULL) { 2494 error = EBUSY; 2495 break; 2496 } 2497 pa = TAILQ_FIRST(&pool->list); 2498 while ((pa != NULL) && (nr < pp->nr)) { 2499 pa = TAILQ_NEXT(pa, entries); 2500 nr++; 2501 } 2502 if (pa == NULL) { 2503 error = EBUSY; 2504 break; 2505 } 2506 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2507 pfi_dynaddr_copyout(&pp->addr.addr); 2508 pf_tbladdr_copyout(&pp->addr.addr); 2509 pf_rtlabel_copyout(&pp->addr.addr); 2510 break; 2511 } 2512 2513 case DIOCCHANGEADDR: { 2514 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2515 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2516 struct pf_ruleset *ruleset; 2517 2518 if (pca->action < PF_CHANGE_ADD_HEAD || 2519 pca->action > PF_CHANGE_REMOVE) { 2520 error = EINVAL; 2521 break; 2522 } 2523 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2524 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2525 pca->addr.addr.type != PF_ADDR_TABLE) { 2526 error = EINVAL; 2527 break; 2528 } 2529 2530 ruleset = pf_find_ruleset(pca->anchor); 2531 if (ruleset == NULL) { 2532 error = EBUSY; 2533 break; 2534 } 2535 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2536 pca->r_num, pca->r_last, 1, 1); 2537 if (pool == NULL) { 2538 error = EBUSY; 2539 break; 2540 } 2541 if (pca->action != PF_CHANGE_REMOVE) { 2542 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2543 if (newpa == NULL) { 2544 error = ENOMEM; 2545 break; 2546 } 2547 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2548#ifndef INET 2549 if (pca->af == AF_INET) { 2550 pool_put(&pf_pooladdr_pl, newpa); 2551 error = EAFNOSUPPORT; 2552 break; 2553 } 2554#endif /* INET */ 2555#ifndef INET6 2556 if (pca->af == AF_INET6) { 2557 pool_put(&pf_pooladdr_pl, newpa); 2558 error = EAFNOSUPPORT; 2559 break; 2560 } 2561#endif /* INET6 */ 2562 if (newpa->ifname[0]) { 2563 newpa->kif = pfi_attach_rule(newpa->ifname); 2564 if (newpa->kif == NULL) { 2565 pool_put(&pf_pooladdr_pl, newpa); 2566 error = EINVAL; 2567 break; 2568 } 2569 } else 2570 newpa->kif = NULL; 2571 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2572 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2573 pfi_dynaddr_remove(&newpa->addr); 2574 pfi_detach_rule(newpa->kif); 2575 pool_put(&pf_pooladdr_pl, newpa); 2576 error = EINVAL; 2577 break; 2578 } 2579 } 2580 2581 if (pca->action == PF_CHANGE_ADD_HEAD) 2582 oldpa = TAILQ_FIRST(&pool->list); 2583 else if (pca->action == PF_CHANGE_ADD_TAIL) 2584 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2585 else { 2586 int i = 0; 2587 2588 oldpa = TAILQ_FIRST(&pool->list); 2589 while ((oldpa != NULL) && (i < pca->nr)) { 2590 oldpa = TAILQ_NEXT(oldpa, entries); 2591 i++; 2592 } 2593 if (oldpa == NULL) { 2594 error = EINVAL; 2595 break; 2596 } 2597 } 2598 2599 if (pca->action == PF_CHANGE_REMOVE) { 2600 TAILQ_REMOVE(&pool->list, oldpa, entries); 2601 pfi_dynaddr_remove(&oldpa->addr); 2602 pf_tbladdr_remove(&oldpa->addr); 2603 pfi_detach_rule(oldpa->kif); 2604 pool_put(&pf_pooladdr_pl, oldpa); 2605 } else { 2606 if (oldpa == NULL) 2607 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2608 else if (pca->action == PF_CHANGE_ADD_HEAD || 2609 pca->action == PF_CHANGE_ADD_BEFORE) 2610 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2611 else 2612 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2613 newpa, entries); 2614 } 2615 2616 pool->cur = TAILQ_FIRST(&pool->list); 2617 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2618 pca->af); 2619 break; 2620 } 2621 2622 case DIOCGETRULESETS: { 2623 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2624 struct pf_ruleset *ruleset; 2625 struct pf_anchor *anchor; 2626 2627 pr->path[sizeof(pr->path) - 1] = 0; 2628 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2629 error = EINVAL; 2630 break; 2631 } 2632 pr->nr = 0; 2633 if (ruleset->anchor == NULL) { 2634 /* XXX kludge for pf_main_ruleset */ 2635 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2636 if (anchor->parent == NULL) 2637 pr->nr++; 2638 } else { 2639 RB_FOREACH(anchor, pf_anchor_node, 2640 &ruleset->anchor->children) 2641 pr->nr++; 2642 } 2643 break; 2644 } 2645 2646 case DIOCGETRULESET: { 2647 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2648 struct pf_ruleset *ruleset; 2649 struct pf_anchor *anchor; 2650 u_int32_t nr = 0; 2651 2652 pr->path[sizeof(pr->path) - 1] = 0; 2653 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2654 error = EINVAL; 2655 break; 2656 } 2657 pr->name[0] = 0; 2658 if (ruleset->anchor == NULL) { 2659 /* XXX kludge for pf_main_ruleset */ 2660 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2661 if (anchor->parent == NULL && nr++ == pr->nr) { 2662 strlcpy(pr->name, anchor->name, 2663 sizeof(pr->name)); 2664 break; 2665 } 2666 } else { 2667 RB_FOREACH(anchor, pf_anchor_node, 2668 &ruleset->anchor->children) 2669 if (nr++ == pr->nr) { 2670 strlcpy(pr->name, anchor->name, 2671 sizeof(pr->name)); 2672 break; 2673 } 2674 } 2675 if (!pr->name[0]) 2676 error = EBUSY; 2677 break; 2678 } 2679 2680 case DIOCRCLRTABLES: { 2681 struct pfioc_table *io = (struct pfioc_table *)addr; 2682 2683 if (io->pfrio_esize != 0) { 2684 error = ENODEV; 2685 break; 2686 } 2687 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2688 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2689 break; 2690 } 2691 2692 case DIOCRADDTABLES: { 2693 struct pfioc_table *io = (struct pfioc_table *)addr; 2694 2695 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2696 error = ENODEV; 2697 break; 2698 } 2699 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2700 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2701 break; 2702 } 2703 2704 case DIOCRDELTABLES: { 2705 struct pfioc_table *io = (struct pfioc_table *)addr; 2706 2707 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2708 error = ENODEV; 2709 break; 2710 } 2711 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2712 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2713 break; 2714 } 2715 2716 case DIOCRGETTABLES: { 2717 struct pfioc_table *io = (struct pfioc_table *)addr; 2718 2719 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2720 error = ENODEV; 2721 break; 2722 } 2723 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2724 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2725 break; 2726 } 2727 2728 case DIOCRGETTSTATS: { 2729 struct pfioc_table *io = (struct pfioc_table *)addr; 2730 2731 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2732 error = ENODEV; 2733 break; 2734 } 2735 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2736 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2737 break; 2738 } 2739 2740 case DIOCRCLRTSTATS: { 2741 struct pfioc_table *io = (struct pfioc_table *)addr; 2742 2743 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2744 error = ENODEV; 2745 break; 2746 } 2747 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2748 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2749 break; 2750 } 2751 2752 case DIOCRSETTFLAGS: { 2753 struct pfioc_table *io = (struct pfioc_table *)addr; 2754 2755 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2756 error = ENODEV; 2757 break; 2758 } 2759 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2760 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2761 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2762 break; 2763 } 2764 2765 case DIOCRCLRADDRS: { 2766 struct pfioc_table *io = (struct pfioc_table *)addr; 2767 2768 if (io->pfrio_esize != 0) { 2769 error = ENODEV; 2770 break; 2771 } 2772 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2773 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2774 break; 2775 } 2776 2777 case DIOCRADDADDRS: { 2778 struct pfioc_table *io = (struct pfioc_table *)addr; 2779 2780 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2781 error = ENODEV; 2782 break; 2783 } 2784 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2785 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2786 PFR_FLAG_USERIOCTL); 2787 break; 2788 } 2789 2790 case DIOCRDELADDRS: { 2791 struct pfioc_table *io = (struct pfioc_table *)addr; 2792 2793 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2794 error = ENODEV; 2795 break; 2796 } 2797 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2798 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2799 PFR_FLAG_USERIOCTL); 2800 break; 2801 } 2802 2803 case DIOCRSETADDRS: { 2804 struct pfioc_table *io = (struct pfioc_table *)addr; 2805 2806 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2807 error = ENODEV; 2808 break; 2809 } 2810 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2811 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2812 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2813 PFR_FLAG_USERIOCTL); 2814 break; 2815 } 2816 2817 case DIOCRGETADDRS: { 2818 struct pfioc_table *io = (struct pfioc_table *)addr; 2819 2820 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2821 error = ENODEV; 2822 break; 2823 } 2824 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2825 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2826 break; 2827 } 2828 2829 case DIOCRGETASTATS: { 2830 struct pfioc_table *io = (struct pfioc_table *)addr; 2831 2832 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2833 error = ENODEV; 2834 break; 2835 } 2836 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2837 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2838 break; 2839 } 2840 2841 case DIOCRCLRASTATS: { 2842 struct pfioc_table *io = (struct pfioc_table *)addr; 2843 2844 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2845 error = ENODEV; 2846 break; 2847 } 2848 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2849 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2850 PFR_FLAG_USERIOCTL); 2851 break; 2852 } 2853 2854 case DIOCRTSTADDRS: { 2855 struct pfioc_table *io = (struct pfioc_table *)addr; 2856 2857 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2858 error = ENODEV; 2859 break; 2860 } 2861 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2862 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2863 PFR_FLAG_USERIOCTL); 2864 break; 2865 } 2866 2867 case DIOCRINADEFINE: { 2868 struct pfioc_table *io = (struct pfioc_table *)addr; 2869 2870 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2871 error = ENODEV; 2872 break; 2873 } 2874 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2875 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2876 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2877 break; 2878 } 2879 2880 case DIOCOSFPADD: { 2881 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2882 error = pf_osfp_add(io); 2883 break; 2884 } 2885 2886 case DIOCOSFPGET: { 2887 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2888 error = pf_osfp_get(io); 2889 break; 2890 } 2891 2892 case DIOCXBEGIN: { 2893 struct pfioc_trans *io = (struct pfioc_trans *) 2894 addr; 2895 static struct pfioc_trans_e ioe; 2896 static struct pfr_table table; 2897 int i; 2898 2899 if (io->esize != sizeof(ioe)) { 2900 error = ENODEV; 2901 goto fail; 2902 } 2903 for (i = 0; i < io->size; i++) { 2904#ifdef __FreeBSD__ 2905 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2906 if (error) { 2907#else 2908 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2909#endif 2910 error = EFAULT; 2911 goto fail; 2912 } 2913 switch (ioe.rs_num) { 2914#ifdef ALTQ 2915 case PF_RULESET_ALTQ: 2916 if (ioe.anchor[0]) { 2917 error = EINVAL; 2918 goto fail; 2919 } 2920 if ((error = pf_begin_altq(&ioe.ticket))) 2921 goto fail; 2922 break; 2923#endif /* ALTQ */ 2924 case PF_RULESET_TABLE: 2925 bzero(&table, sizeof(table)); 2926 strlcpy(table.pfrt_anchor, ioe.anchor, 2927 sizeof(table.pfrt_anchor)); 2928 if ((error = pfr_ina_begin(&table, 2929 &ioe.ticket, NULL, 0))) 2930 goto fail; 2931 break; 2932 default: 2933 if ((error = pf_begin_rules(&ioe.ticket, 2934 ioe.rs_num, ioe.anchor))) 2935 goto fail; 2936 break; 2937 } 2938#ifdef __FreeBSD__ 2939 PF_COPYOUT(&ioe, io->array+i, sizeof(io->array[i]), 2940 error); 2941 if (error) { 2942#else 2943 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) { 2944#endif 2945 error = EFAULT; 2946 goto fail; 2947 } 2948 } 2949 break; 2950 } 2951 2952 case DIOCXROLLBACK: { 2953 struct pfioc_trans *io = (struct pfioc_trans *) 2954 addr; 2955 static struct pfioc_trans_e ioe; 2956 static struct pfr_table table; 2957 int i; 2958 2959 if (io->esize != sizeof(ioe)) { 2960 error = ENODEV; 2961 goto fail; 2962 } 2963 for (i = 0; i < io->size; i++) { 2964#ifdef __FreeBSD__ 2965 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2966 if (error) { 2967#else 2968 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2969#endif 2970 error = EFAULT; 2971 goto fail; 2972 } 2973 switch (ioe.rs_num) { 2974#ifdef ALTQ 2975 case PF_RULESET_ALTQ: 2976 if (ioe.anchor[0]) { 2977 error = EINVAL; 2978 goto fail; 2979 } 2980 if ((error = pf_rollback_altq(ioe.ticket))) 2981 goto fail; /* really bad */ 2982 break; 2983#endif /* ALTQ */ 2984 case PF_RULESET_TABLE: 2985 bzero(&table, sizeof(table)); 2986 strlcpy(table.pfrt_anchor, ioe.anchor, 2987 sizeof(table.pfrt_anchor)); 2988 if ((error = pfr_ina_rollback(&table, 2989 ioe.ticket, NULL, 0))) 2990 goto fail; /* really bad */ 2991 break; 2992 default: 2993 if ((error = pf_rollback_rules(ioe.ticket, 2994 ioe.rs_num, ioe.anchor))) 2995 goto fail; /* really bad */ 2996 break; 2997 } 2998 } 2999 break; 3000 } 3001 3002 case DIOCXCOMMIT: { 3003 struct pfioc_trans *io = (struct pfioc_trans *) 3004 addr; 3005 static struct pfioc_trans_e ioe; 3006 static struct pfr_table table; 3007 struct pf_ruleset *rs; 3008 int i; 3009 3010 if (io->esize != sizeof(ioe)) { 3011 error = ENODEV; 3012 goto fail; 3013 } 3014 /* first makes sure everything will succeed */ 3015 for (i = 0; i < io->size; i++) { 3016#ifdef __FreeBSD__ 3017 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 3018 if (error) { 3019#else 3020 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 3021#endif 3022 error = EFAULT; 3023 goto fail; 3024 } 3025 switch (ioe.rs_num) { 3026#ifdef ALTQ 3027 case PF_RULESET_ALTQ: 3028 if (ioe.anchor[0]) { 3029 error = EINVAL; 3030 goto fail; 3031 } 3032 if (!altqs_inactive_open || ioe.ticket != 3033 ticket_altqs_inactive) { 3034 error = EBUSY; 3035 goto fail; 3036 } 3037 break; 3038#endif /* ALTQ */ 3039 case PF_RULESET_TABLE: 3040 rs = pf_find_ruleset(ioe.anchor); 3041 if (rs == NULL || !rs->topen || ioe.ticket != 3042 rs->tticket) { 3043 error = EBUSY; 3044 goto fail; 3045 } 3046 break; 3047 default: 3048 if (ioe.rs_num < 0 || ioe.rs_num >= 3049 PF_RULESET_MAX) { 3050 error = EINVAL; 3051 goto fail; 3052 } 3053 rs = pf_find_ruleset(ioe.anchor); 3054 if (rs == NULL || 3055 !rs->rules[ioe.rs_num].inactive.open || 3056 rs->rules[ioe.rs_num].inactive.ticket != 3057 ioe.ticket) { 3058 error = EBUSY; 3059 goto fail; 3060 } 3061 break; 3062 } 3063 } 3064 /* now do the commit - no errors should happen here */ 3065 for (i = 0; i < io->size; i++) { 3066#ifdef __FreeBSD__ 3067 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 3068 if (error) { 3069#else 3070 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 3071#endif 3072 error = EFAULT; 3073 goto fail; 3074 } 3075 switch (ioe.rs_num) { 3076#ifdef ALTQ 3077 case PF_RULESET_ALTQ: 3078 if ((error = pf_commit_altq(ioe.ticket))) 3079 goto fail; /* really bad */ 3080 break; 3081#endif /* ALTQ */ 3082 case PF_RULESET_TABLE: 3083 bzero(&table, sizeof(table)); 3084 strlcpy(table.pfrt_anchor, ioe.anchor, 3085 sizeof(table.pfrt_anchor)); 3086 if ((error = pfr_ina_commit(&table, ioe.ticket, 3087 NULL, NULL, 0))) 3088 goto fail; /* really bad */ 3089 break; 3090 default: 3091 if ((error = pf_commit_rules(ioe.ticket, 3092 ioe.rs_num, ioe.anchor))) 3093 goto fail; /* really bad */ 3094 break; 3095 } 3096 } 3097 break; 3098 } 3099 3100 case DIOCGETSRCNODES: { 3101 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3102 struct pf_src_node *n; 3103 struct pf_src_node *p, pstore; 3104 u_int32_t nr = 0; 3105 int space = psn->psn_len; 3106 3107 if (space == 0) { 3108 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3109 nr++; 3110 psn->psn_len = sizeof(struct pf_src_node) * nr; 3111 break; 3112 } 3113 3114 p = psn->psn_src_nodes; 3115 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3116 int secs = time_second, diff; 3117 3118 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3119 break; 3120 3121 bcopy(n, &pstore, sizeof(pstore)); 3122 if (n->rule.ptr != NULL) 3123 pstore.rule.nr = n->rule.ptr->nr; 3124 pstore.creation = secs - pstore.creation; 3125 if (pstore.expire > secs) 3126 pstore.expire -= secs; 3127 else 3128 pstore.expire = 0; 3129 3130 /* adjust the connection rate estimate */ 3131 diff = secs - n->conn_rate.last; 3132 if (diff >= n->conn_rate.seconds) 3133 pstore.conn_rate.count = 0; 3134 else 3135 pstore.conn_rate.count -= 3136 n->conn_rate.count * diff / 3137 n->conn_rate.seconds; 3138 3139#ifdef __FreeBSD__ 3140 PF_COPYOUT(&pstore, p, sizeof(*p), error); 3141#else 3142 error = copyout(&pstore, p, sizeof(*p)); 3143#endif 3144 if (error) 3145 goto fail; 3146 p++; 3147 nr++; 3148 } 3149 psn->psn_len = sizeof(struct pf_src_node) * nr; 3150 break; 3151 } 3152 3153 case DIOCCLRSRCNODES: { 3154 struct pf_src_node *n; 3155 struct pf_state *state; 3156 3157 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3158 state->src_node = NULL; 3159 state->nat_src_node = NULL; 3160 } 3161 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3162 n->expire = 1; 3163 n->states = 0; 3164 } 3165 pf_purge_expired_src_nodes(); 3166 pf_status.src_nodes = 0; 3167 break; 3168 } 3169 3170 case DIOCSETHOSTID: { 3171 u_int32_t *hostid = (u_int32_t *)addr; 3172 3173 if (*hostid == 0) 3174 pf_status.hostid = arc4random(); 3175 else 3176 pf_status.hostid = *hostid; 3177 break; 3178 } 3179 3180 case DIOCOSFPFLUSH: 3181 pf_osfp_flush(); 3182 break; 3183 3184 case DIOCIGETIFACES: { 3185 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3186 3187 if (io->pfiio_esize != sizeof(struct pfi_if)) { 3188 error = ENODEV; 3189 break; 3190 } 3191 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3192 &io->pfiio_size, io->pfiio_flags); 3193 break; 3194 } 3195 3196 case DIOCICLRISTATS: { 3197 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3198 3199 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero, 3200 io->pfiio_flags); 3201 break; 3202 } 3203 3204 case DIOCSETIFFLAG: { 3205 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3206 3207 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3208 break; 3209 } 3210 3211 case DIOCCLRIFFLAG: { 3212 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3213 3214 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3215 break; 3216 } 3217 3218 default: 3219 error = ENODEV; 3220 break; 3221 } 3222fail: 3223#ifdef __FreeBSD__ 3224 PF_UNLOCK(); 3225#else 3226 splx(s); 3227#endif 3228 return (error); 3229} 3230 3231#ifdef __FreeBSD__ 3232/* 3233 * XXX - Check for version missmatch!!! 3234 */ 3235static void 3236pf_clear_states(void) 3237{ 3238 struct pf_state *state; 3239 3240 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3241 state->timeout = PFTM_PURGE; 3242#if NPFSYNC 3243 /* don't send out individual delete messages */ 3244 state->sync_flags = PFSTATE_NOSYNC; 3245#endif 3246 } 3247 pf_purge_expired_states(); 3248 pf_status.states = 0; 3249#if 0 /* NPFSYNC */ 3250/* 3251 * XXX This is called on module unload, we do not want to sync that over? */ 3252 */ 3253 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 3254#endif 3255} 3256 3257static int 3258pf_clear_tables(void) 3259{ 3260 struct pfioc_table io; 3261 int error; 3262 3263 bzero(&io, sizeof(io)); 3264 3265 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3266 io.pfrio_flags); 3267 3268 return (error); 3269} 3270 3271static void 3272pf_clear_srcnodes(void) 3273{ 3274 struct pf_src_node *n; 3275 struct pf_state *state; 3276 3277 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3278 state->src_node = NULL; 3279 state->nat_src_node = NULL; 3280 } 3281 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3282 n->expire = 1; 3283 n->states = 0; 3284 } 3285 pf_purge_expired_src_nodes(); 3286 pf_status.src_nodes = 0; 3287} 3288/* 3289 * XXX - Check for version missmatch!!! 3290 */ 3291 3292/* 3293 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3294 */ 3295static int 3296shutdown_pf(void) 3297{ 3298 int error = 0; 3299 u_int32_t t[5]; 3300 char nn = '\0'; 3301 3302 callout_stop(&pf_expire_to); 3303 3304 pf_status.running = 0; 3305 do { 3306 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 3307 != 0) { 3308 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3309 break; 3310 } 3311 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 3312 != 0) { 3313 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3314 break; /* XXX: rollback? */ 3315 } 3316 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 3317 != 0) { 3318 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3319 break; /* XXX: rollback? */ 3320 } 3321 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3322 != 0) { 3323 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3324 break; /* XXX: rollback? */ 3325 } 3326 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3327 != 0) { 3328 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3329 break; /* XXX: rollback? */ 3330 } 3331 3332 /* XXX: these should always succeed here */ 3333 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3334 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3335 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3336 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3337 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3338 3339 if ((error = pf_clear_tables()) != 0) 3340 break; 3341 3342#ifdef ALTQ 3343 if ((error = pf_begin_altq(&t[0])) != 0) { 3344 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3345 break; 3346 } 3347 pf_commit_altq(t[0]); 3348#endif 3349 3350 pf_clear_states(); 3351 3352 pf_clear_srcnodes(); 3353 3354 /* status does not use malloced mem so no need to cleanup */ 3355 /* fingerprints and interfaces have thier own cleanup code */ 3356 } while(0); 3357 3358 return (error); 3359} 3360 3361static int 3362pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3363 struct inpcb *inp) 3364{ 3365 /* 3366 * XXX Wed Jul 9 22:03:16 2003 UTC 3367 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3368 * in network stack. OpenBSD's network stack have converted 3369 * ip_len/ip_off to host byte order frist as FreeBSD. 3370 * Now this is not true anymore , so we should convert back to network 3371 * byte order. 3372 */ 3373 struct ip *h = NULL; 3374 int chk; 3375 3376 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) { 3377 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3378 h = mtod(*m, struct ip *); 3379 HTONS(h->ip_len); 3380 HTONS(h->ip_off); 3381 } 3382 chk = pf_test(PF_IN, ifp, m, NULL, inp); 3383 if (chk && *m) { 3384 m_freem(*m); 3385 *m = NULL; 3386 } 3387 if (*m != NULL) { 3388 /* pf_test can change ip header location */ 3389 h = mtod(*m, struct ip *); 3390 NTOHS(h->ip_len); 3391 NTOHS(h->ip_off); 3392 } 3393 return chk; 3394} 3395 3396static int 3397pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3398 struct inpcb *inp) 3399{ 3400 /* 3401 * XXX Wed Jul 9 22:03:16 2003 UTC 3402 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3403 * in network stack. OpenBSD's network stack have converted 3404 * ip_len/ip_off to host byte order frist as FreeBSD. 3405 * Now this is not true anymore , so we should convert back to network 3406 * byte order. 3407 */ 3408 struct ip *h = NULL; 3409 int chk; 3410 3411 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3412 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3413 in_delayed_cksum(*m); 3414 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3415 } 3416 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) { 3417 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3418 h = mtod(*m, struct ip *); 3419 HTONS(h->ip_len); 3420 HTONS(h->ip_off); 3421 } 3422 chk = pf_test(PF_OUT, ifp, m, NULL, inp); 3423 if (chk && *m) { 3424 m_freem(*m); 3425 *m = NULL; 3426 } 3427 if (*m != NULL) { 3428 /* pf_test can change ip header location */ 3429 h = mtod(*m, struct ip *); 3430 NTOHS(h->ip_len); 3431 NTOHS(h->ip_off); 3432 } 3433 return chk; 3434} 3435 3436#ifdef INET6 3437static int 3438pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3439 struct inpcb *inp) 3440{ 3441 /* 3442 * IPv6 is not affected by ip_len/ip_off byte order changes. 3443 */ 3444 int chk; 3445 3446 /* 3447 * In case of loopback traffic IPv6 uses the real interface in 3448 * order to support scoped addresses. In order to support stateful 3449 * filtering we have change this to lo0 as it is the case in IPv4. 3450 */ 3451 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? &loif[0] : ifp, m, 3452 NULL, inp); 3453 if (chk && *m) { 3454 m_freem(*m); 3455 *m = NULL; 3456 } 3457 return chk; 3458} 3459 3460static int 3461pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3462 struct inpcb *inp) 3463{ 3464 /* 3465 * IPv6 does not affected ip_len/ip_off byte order changes. 3466 */ 3467 int chk; 3468 3469 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3470 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3471 in_delayed_cksum(*m); 3472 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3473 } 3474 chk = pf_test6(PF_OUT, ifp, m, NULL, inp); 3475 if (chk && *m) { 3476 m_freem(*m); 3477 *m = NULL; 3478 } 3479 return chk; 3480} 3481#endif /* INET6 */ 3482 3483static int 3484hook_pf(void) 3485{ 3486 struct pfil_head *pfh_inet; 3487#ifdef INET6 3488 struct pfil_head *pfh_inet6; 3489#endif 3490 3491 PF_ASSERT(MA_NOTOWNED); 3492 3493 if (pf_pfil_hooked) 3494 return (0); 3495 3496 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3497 if (pfh_inet == NULL) 3498 return (ESRCH); /* XXX */ 3499 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3500 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3501#ifdef INET6 3502 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3503 if (pfh_inet6 == NULL) { 3504 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3505 pfh_inet); 3506 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3507 pfh_inet); 3508 return (ESRCH); /* XXX */ 3509 } 3510 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3511 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3512#endif 3513 3514 pf_pfil_hooked = 1; 3515 return (0); 3516} 3517 3518static int 3519dehook_pf(void) 3520{ 3521 struct pfil_head *pfh_inet; 3522#ifdef INET6 3523 struct pfil_head *pfh_inet6; 3524#endif 3525 3526 PF_ASSERT(MA_NOTOWNED); 3527 3528 if (pf_pfil_hooked == 0) 3529 return (0); 3530 3531 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3532 if (pfh_inet == NULL) 3533 return (ESRCH); /* XXX */ 3534 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3535 pfh_inet); 3536 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3537 pfh_inet); 3538#ifdef INET6 3539 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3540 if (pfh_inet6 == NULL) 3541 return (ESRCH); /* XXX */ 3542 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3543 pfh_inet6); 3544 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3545 pfh_inet6); 3546#endif 3547 3548 pf_pfil_hooked = 0; 3549 return (0); 3550} 3551 3552static int 3553pf_load(void) 3554{ 3555 init_zone_var(); 3556 init_pf_mutex(); 3557 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3558 if (pfattach() < 0) { 3559 destroy_dev(pf_dev); 3560 destroy_pf_mutex(); 3561 return (ENOMEM); 3562 } 3563 return (0); 3564} 3565 3566static int 3567pf_unload(void) 3568{ 3569 int error = 0; 3570 3571 PF_LOCK(); 3572 pf_status.running = 0; 3573 PF_UNLOCK(); 3574 error = dehook_pf(); 3575 if (error) { 3576 /* 3577 * Should not happen! 3578 * XXX Due to error code ESRCH, kldunload will show 3579 * a message like 'No such process'. 3580 */ 3581 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 3582 return error; 3583 } 3584 PF_LOCK(); 3585 shutdown_pf(); 3586 pfi_cleanup(); 3587 pf_osfp_flush(); 3588 pf_osfp_cleanup(); 3589 cleanup_pf_zone(); 3590 PF_UNLOCK(); 3591 destroy_dev(pf_dev); 3592 destroy_pf_mutex(); 3593 return error; 3594} 3595 3596static int 3597pf_modevent(module_t mod, int type, void *data) 3598{ 3599 int error = 0; 3600 3601 switch(type) { 3602 case MOD_LOAD: 3603 error = pf_load(); 3604 break; 3605 3606 case MOD_UNLOAD: 3607 error = pf_unload(); 3608 break; 3609 default: 3610 error = EINVAL; 3611 break; 3612 } 3613 return error; 3614} 3615 3616static moduledata_t pf_mod = { 3617 "pf", 3618 pf_modevent, 3619 0 3620}; 3621 3622DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST); 3623MODULE_VERSION(pf, PF_MODVER); 3624#endif /* __FreeBSD__ */ 3625