pf_ioctl.c revision 222529
1/* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */ 2 3/* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38#ifdef __FreeBSD__ 39#include <sys/cdefs.h> 40__FBSDID("$FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 222529 2011-05-31 15:05:29Z bz $"); 41 42#include "opt_inet.h" 43#include "opt_inet6.h" 44#include "opt_bpf.h" 45#include "opt_pf.h" 46 47#ifdef DEV_BPF 48#define NBPFILTER DEV_BPF 49#else 50#define NBPFILTER 0 51#endif 52 53#ifdef DEV_PFLOG 54#define NPFLOG DEV_PFLOG 55#else 56#define NPFLOG 0 57#endif 58 59#ifdef DEV_PFSYNC 60#define NPFSYNC DEV_PFSYNC 61#else 62#define NPFSYNC 0 63#endif 64 65#else 66#include "bpfilter.h" 67#include "pflog.h" 68#include "pfsync.h" 69#endif 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/mbuf.h> 74#include <sys/filio.h> 75#include <sys/fcntl.h> 76#include <sys/socket.h> 77#include <sys/socketvar.h> 78#include <sys/kernel.h> 79#include <sys/time.h> 80#include <sys/malloc.h> 81#ifdef __FreeBSD__ 82#include <sys/module.h> 83#include <sys/conf.h> 84#include <sys/proc.h> 85#include <sys/sysctl.h> 86#else 87#include <sys/timeout.h> 88#include <sys/pool.h> 89#endif 90#include <sys/proc.h> 91#include <sys/malloc.h> 92#include <sys/kthread.h> 93#ifndef __FreeBSD__ 94#include <sys/rwlock.h> 95#include <uvm/uvm_extern.h> 96#endif 97 98#include <net/if.h> 99#include <net/if_types.h> 100#ifdef __FreeBSD__ 101#include <net/vnet.h> 102#endif 103 104#include <netinet/in.h> 105#include <netinet/in_var.h> 106#include <netinet/in_systm.h> 107#include <netinet/ip.h> 108#include <netinet/ip_var.h> 109#include <netinet/ip_icmp.h> 110 111#ifdef __FreeBSD__ 112#include <sys/md5.h> 113#else 114#include <dev/rndvar.h> 115#include <crypto/md5.h> 116#endif 117#include <net/pfvar.h> 118 119#if NPFSYNC > 0 120#include <net/if_pfsync.h> 121#endif /* NPFSYNC > 0 */ 122 123#include <net/if_pflog.h> 124 125#ifdef INET6 126#include <netinet/ip6.h> 127#include <netinet/in_pcb.h> 128#endif /* INET6 */ 129 130#ifdef ALTQ 131#include <altq/altq.h> 132#endif 133 134#ifdef __FreeBSD__ 135#include <sys/limits.h> 136#include <sys/lock.h> 137#include <sys/mutex.h> 138#include <net/pfil.h> 139#endif /* __FreeBSD__ */ 140 141#ifdef __FreeBSD__ 142void init_zone_var(void); 143void cleanup_pf_zone(void); 144int pfattach(void); 145#else 146void pfattach(int); 147void pf_thread_create(void *); 148int pfopen(dev_t, int, int, struct proc *); 149int pfclose(dev_t, int, int, struct proc *); 150#endif 151struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 152 u_int8_t, u_int8_t, u_int8_t); 153 154void pf_mv_pool(struct pf_palist *, struct pf_palist *); 155void pf_empty_pool(struct pf_palist *); 156#ifdef __FreeBSD__ 157int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 158#else 159int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *); 160#endif 161#ifdef ALTQ 162int pf_begin_altq(u_int32_t *); 163int pf_rollback_altq(u_int32_t); 164int pf_commit_altq(u_int32_t); 165int pf_enable_altq(struct pf_altq *); 166int pf_disable_altq(struct pf_altq *); 167#endif /* ALTQ */ 168int pf_begin_rules(u_int32_t *, int, const char *); 169int pf_rollback_rules(u_int32_t, int, char *); 170int pf_setup_pfsync_matching(struct pf_ruleset *); 171void pf_hash_rule(MD5_CTX *, struct pf_rule *); 172void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 173int pf_commit_rules(u_int32_t, int, char *); 174 175struct pf_rule pf_default_rule; 176#ifdef __FreeBSD__ 177struct sx pf_consistency_lock; 178SX_SYSINIT(pf_consistency_lock, &pf_consistency_lock, "pf_statetbl_lock"); 179#else 180struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER; 181#endif 182#ifdef ALTQ 183static int pf_altq_running; 184#endif 185 186#define TAGID_MAX 50000 187TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 188 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 189 190#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 191#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 192#endif 193u_int16_t tagname2tag(struct pf_tags *, char *); 194void tag2tagname(struct pf_tags *, u_int16_t, char *); 195void tag_unref(struct pf_tags *, u_int16_t); 196int pf_rtlabel_add(struct pf_addr_wrap *); 197void pf_rtlabel_remove(struct pf_addr_wrap *); 198void pf_rtlabel_copyout(struct pf_addr_wrap *); 199 200#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 201 202 203#ifdef __FreeBSD__ 204static struct cdev *pf_dev; 205 206/* 207 * XXX - These are new and need to be checked when moveing to a new version 208 */ 209static void pf_clear_states(void); 210static int pf_clear_tables(void); 211static void pf_clear_srcnodes(void); 212/* 213 * XXX - These are new and need to be checked when moveing to a new version 214 */ 215 216/* 217 * Wrapper functions for pfil(9) hooks 218 */ 219#ifdef INET 220static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 221 int dir, struct inpcb *inp); 222static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 223 int dir, struct inpcb *inp); 224#endif 225#ifdef INET6 226static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 227 int dir, struct inpcb *inp); 228static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 229 int dir, struct inpcb *inp); 230#endif 231 232static int hook_pf(void); 233static int dehook_pf(void); 234static int shutdown_pf(void); 235static int pf_load(void); 236static int pf_unload(void); 237 238static struct cdevsw pf_cdevsw = { 239 .d_ioctl = pfioctl, 240 .d_name = PF_NAME, 241 .d_version = D_VERSION, 242}; 243 244static volatile int pf_pfil_hooked = 0; 245int pf_end_threads = 0; 246struct mtx pf_task_mtx; 247pflog_packet_t *pflog_packet_ptr = NULL; 248 249int debug_pfugidhack = 0; 250SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0, 251 "Enable/disable pf user/group rules mpsafe hack"); 252 253void 254init_pf_mutex(void) 255{ 256 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF); 257} 258 259void 260destroy_pf_mutex(void) 261{ 262 mtx_destroy(&pf_task_mtx); 263} 264 265void 266init_zone_var(void) 267{ 268 pf_src_tree_pl = pf_rule_pl = NULL; 269 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 270 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 271 pf_state_scrub_pl = NULL; 272 pfr_ktable_pl = pfr_kentry_pl = NULL; 273} 274 275void 276cleanup_pf_zone(void) 277{ 278 UMA_DESTROY(pf_src_tree_pl); 279 UMA_DESTROY(pf_rule_pl); 280 UMA_DESTROY(pf_state_pl); 281 UMA_DESTROY(pf_altq_pl); 282 UMA_DESTROY(pf_pooladdr_pl); 283 UMA_DESTROY(pf_frent_pl); 284 UMA_DESTROY(pf_frag_pl); 285 UMA_DESTROY(pf_cache_pl); 286 UMA_DESTROY(pf_cent_pl); 287 UMA_DESTROY(pfr_ktable_pl); 288 UMA_DESTROY(pfr_kentry_pl2); 289 UMA_DESTROY(pfr_kentry_pl); 290 UMA_DESTROY(pf_state_scrub_pl); 291 UMA_DESTROY(pfi_addr_pl); 292} 293 294int 295pfattach(void) 296{ 297 u_int32_t *my_timeout = pf_default_rule.timeout; 298 int error = 1; 299 300 do { 301 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 302 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 303 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 304 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 305 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl"); 306 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 307 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 308 UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2"); 309 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 310 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 311 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 312 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 313 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 314 "pfstatescrub"); 315 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 316 error = 0; 317 } while(0); 318 if (error) { 319 cleanup_pf_zone(); 320 return (error); 321 } 322 pfr_initialize(); 323 pfi_initialize(); 324 if ( (error = pf_osfp_initialize()) ) { 325 cleanup_pf_zone(); 326 pf_osfp_cleanup(); 327 return (error); 328 } 329 330 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 331 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 332 pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl; 333 pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 334 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 335 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 336 pf_pool_limits[PF_LIMIT_TABLES].pp = pfr_ktable_pl; 337 pf_pool_limits[PF_LIMIT_TABLES].limit = PFR_KTABLE_HIWAT; 338 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = pfr_kentry_pl; 339 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 340 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 341 pf_pool_limits[PF_LIMIT_STATES].limit); 342 343 RB_INIT(&tree_src_tracking); 344 RB_INIT(&pf_anchors); 345 pf_init_ruleset(&pf_main_ruleset); 346 TAILQ_INIT(&pf_altqs[0]); 347 TAILQ_INIT(&pf_altqs[1]); 348 TAILQ_INIT(&pf_pabuf); 349 pf_altqs_active = &pf_altqs[0]; 350 pf_altqs_inactive = &pf_altqs[1]; 351 TAILQ_INIT(&state_list); 352 353 /* default rule should never be garbage collected */ 354 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 355 pf_default_rule.action = PF_PASS; 356 pf_default_rule.nr = -1; 357 pf_default_rule.rtableid = -1; 358 359 /* initialize default timeouts */ 360 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 361 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 362 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 363 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 364 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 365 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 366 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 367 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 368 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 369 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 370 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 371 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 372 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 373 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 374 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 375 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 376 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 377 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 378 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 379 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 380 381 pf_normalize_init(); 382 bzero(&pf_status, sizeof(pf_status)); 383 pf_status.debug = PF_DEBUG_URGENT; 384 385 pf_pfil_hooked = 0; 386 387 /* XXX do our best to avoid a conflict */ 388 pf_status.hostid = arc4random(); 389 390 if (kproc_create(pf_purge_thread, NULL, NULL, 0, 0, "pfpurge")) 391 return (ENXIO); 392 393 return (error); 394} 395#else /* !__FreeBSD__ */ 396void 397pfattach(int num) 398{ 399 u_int32_t *timeout = pf_default_rule.timeout; 400 401 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 402 &pool_allocator_nointr); 403 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 404 "pfsrctrpl", NULL); 405 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 406 NULL); 407 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 408 &pool_allocator_nointr); 409 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 410 "pfpooladdrpl", &pool_allocator_nointr); 411 pfr_initialize(); 412 pfi_initialize(); 413 pf_osfp_initialize(); 414 415 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 416 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 417 418 if (ctob(physmem) <= 100*1024*1024) 419 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 420 PFR_KENTRY_HIWAT_SMALL; 421 422 RB_INIT(&tree_src_tracking); 423 RB_INIT(&pf_anchors); 424 pf_init_ruleset(&pf_main_ruleset); 425 TAILQ_INIT(&pf_altqs[0]); 426 TAILQ_INIT(&pf_altqs[1]); 427 TAILQ_INIT(&pf_pabuf); 428 pf_altqs_active = &pf_altqs[0]; 429 pf_altqs_inactive = &pf_altqs[1]; 430 TAILQ_INIT(&state_list); 431 432 /* default rule should never be garbage collected */ 433 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 434 pf_default_rule.action = PF_PASS; 435 pf_default_rule.nr = -1; 436 pf_default_rule.rtableid = -1; 437 438 /* initialize default timeouts */ 439 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 440 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 441 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 442 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 443 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 444 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 445 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 446 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 447 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 448 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 449 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 450 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 451 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 452 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 453 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 454 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 455 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 456 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 457 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 458 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 459 460 pf_normalize_init(); 461 bzero(&pf_status, sizeof(pf_status)); 462 pf_status.debug = PF_DEBUG_URGENT; 463 464 /* XXX do our best to avoid a conflict */ 465 pf_status.hostid = arc4random(); 466 467 /* require process context to purge states, so perform in a thread */ 468 kproc_create_deferred(pf_thread_create, NULL); 469} 470 471void 472pf_thread_create(void *v) 473{ 474 if (kproc_create(pf_purge_thread, NULL, NULL, "pfpurge")) 475 panic("pfpurge thread"); 476} 477 478int 479pfopen(struct cdev *dev, int flags, int fmt, struct proc *p) 480{ 481 if (dev2unit(dev) >= 1) 482 return (ENXIO); 483 return (0); 484} 485 486int 487pfclose(struct cdev *dev, int flags, int fmt, struct proc *p) 488{ 489 if (dev2unit(dev) >= 1) 490 return (ENXIO); 491 return (0); 492} 493#endif /* __FreeBSD__ */ 494 495struct pf_pool * 496pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 497 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 498 u_int8_t check_ticket) 499{ 500 struct pf_ruleset *ruleset; 501 struct pf_rule *rule; 502 int rs_num; 503 504 ruleset = pf_find_ruleset(anchor); 505 if (ruleset == NULL) 506 return (NULL); 507 rs_num = pf_get_ruleset_number(rule_action); 508 if (rs_num >= PF_RULESET_MAX) 509 return (NULL); 510 if (active) { 511 if (check_ticket && ticket != 512 ruleset->rules[rs_num].active.ticket) 513 return (NULL); 514 if (r_last) 515 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 516 pf_rulequeue); 517 else 518 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 519 } else { 520 if (check_ticket && ticket != 521 ruleset->rules[rs_num].inactive.ticket) 522 return (NULL); 523 if (r_last) 524 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 525 pf_rulequeue); 526 else 527 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 528 } 529 if (!r_last) { 530 while ((rule != NULL) && (rule->nr != rule_number)) 531 rule = TAILQ_NEXT(rule, entries); 532 } 533 if (rule == NULL) 534 return (NULL); 535 536 return (&rule->rpool); 537} 538 539void 540pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 541{ 542 struct pf_pooladdr *mv_pool_pa; 543 544 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 545 TAILQ_REMOVE(poola, mv_pool_pa, entries); 546 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 547 } 548} 549 550void 551pf_empty_pool(struct pf_palist *poola) 552{ 553 struct pf_pooladdr *empty_pool_pa; 554 555 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 556 pfi_dynaddr_remove(&empty_pool_pa->addr); 557 pf_tbladdr_remove(&empty_pool_pa->addr); 558 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 559 TAILQ_REMOVE(poola, empty_pool_pa, entries); 560 pool_put(&pf_pooladdr_pl, empty_pool_pa); 561 } 562} 563 564void 565pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 566{ 567 if (rulequeue != NULL) { 568 if (rule->states <= 0) { 569 /* 570 * XXX - we need to remove the table *before* detaching 571 * the rule to make sure the table code does not delete 572 * the anchor under our feet. 573 */ 574 pf_tbladdr_remove(&rule->src.addr); 575 pf_tbladdr_remove(&rule->dst.addr); 576 if (rule->overload_tbl) 577 pfr_detach_table(rule->overload_tbl); 578 } 579 TAILQ_REMOVE(rulequeue, rule, entries); 580 rule->entries.tqe_prev = NULL; 581 rule->nr = -1; 582 } 583 584 if (rule->states > 0 || rule->src_nodes > 0 || 585 rule->entries.tqe_prev != NULL) 586 return; 587 pf_tag_unref(rule->tag); 588 pf_tag_unref(rule->match_tag); 589#ifdef ALTQ 590 if (rule->pqid != rule->qid) 591 pf_qid_unref(rule->pqid); 592 pf_qid_unref(rule->qid); 593#endif 594 pf_rtlabel_remove(&rule->src.addr); 595 pf_rtlabel_remove(&rule->dst.addr); 596 pfi_dynaddr_remove(&rule->src.addr); 597 pfi_dynaddr_remove(&rule->dst.addr); 598 if (rulequeue == NULL) { 599 pf_tbladdr_remove(&rule->src.addr); 600 pf_tbladdr_remove(&rule->dst.addr); 601 if (rule->overload_tbl) 602 pfr_detach_table(rule->overload_tbl); 603 } 604 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 605 pf_anchor_remove(rule); 606 pf_empty_pool(&rule->rpool.list); 607 pool_put(&pf_rule_pl, rule); 608} 609 610u_int16_t 611tagname2tag(struct pf_tags *head, char *tagname) 612{ 613 struct pf_tagname *tag, *p = NULL; 614 u_int16_t new_tagid = 1; 615 616 TAILQ_FOREACH(tag, head, entries) 617 if (strcmp(tagname, tag->name) == 0) { 618 tag->ref++; 619 return (tag->tag); 620 } 621 622 /* 623 * to avoid fragmentation, we do a linear search from the beginning 624 * and take the first free slot we find. if there is none or the list 625 * is empty, append a new entry at the end. 626 */ 627 628 /* new entry */ 629 if (!TAILQ_EMPTY(head)) 630 for (p = TAILQ_FIRST(head); p != NULL && 631 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 632 new_tagid = p->tag + 1; 633 634 if (new_tagid > TAGID_MAX) 635 return (0); 636 637 /* allocate and fill new struct pf_tagname */ 638 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 639 M_TEMP, M_NOWAIT); 640 if (tag == NULL) 641 return (0); 642 bzero(tag, sizeof(struct pf_tagname)); 643 strlcpy(tag->name, tagname, sizeof(tag->name)); 644 tag->tag = new_tagid; 645 tag->ref++; 646 647 if (p != NULL) /* insert new entry before p */ 648 TAILQ_INSERT_BEFORE(p, tag, entries); 649 else /* either list empty or no free slot in between */ 650 TAILQ_INSERT_TAIL(head, tag, entries); 651 652 return (tag->tag); 653} 654 655void 656tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 657{ 658 struct pf_tagname *tag; 659 660 TAILQ_FOREACH(tag, head, entries) 661 if (tag->tag == tagid) { 662 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 663 return; 664 } 665} 666 667void 668tag_unref(struct pf_tags *head, u_int16_t tag) 669{ 670 struct pf_tagname *p, *next; 671 672 if (tag == 0) 673 return; 674 675 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 676 next = TAILQ_NEXT(p, entries); 677 if (tag == p->tag) { 678 if (--p->ref == 0) { 679 TAILQ_REMOVE(head, p, entries); 680 free(p, M_TEMP); 681 } 682 break; 683 } 684 } 685} 686 687u_int16_t 688pf_tagname2tag(char *tagname) 689{ 690 return (tagname2tag(&pf_tags, tagname)); 691} 692 693void 694pf_tag2tagname(u_int16_t tagid, char *p) 695{ 696 tag2tagname(&pf_tags, tagid, p); 697} 698 699void 700pf_tag_ref(u_int16_t tag) 701{ 702 struct pf_tagname *t; 703 704 TAILQ_FOREACH(t, &pf_tags, entries) 705 if (t->tag == tag) 706 break; 707 if (t != NULL) 708 t->ref++; 709} 710 711void 712pf_tag_unref(u_int16_t tag) 713{ 714 tag_unref(&pf_tags, tag); 715} 716 717int 718pf_rtlabel_add(struct pf_addr_wrap *a) 719{ 720#ifdef __FreeBSD__ 721 /* XXX_IMPORT: later */ 722 return (0); 723#else 724 if (a->type == PF_ADDR_RTLABEL && 725 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 726 return (-1); 727 return (0); 728#endif 729} 730 731void 732pf_rtlabel_remove(struct pf_addr_wrap *a) 733{ 734#ifdef __FreeBSD__ 735 /* XXX_IMPORT: later */ 736#else 737 if (a->type == PF_ADDR_RTLABEL) 738 rtlabel_unref(a->v.rtlabel); 739#endif 740} 741 742void 743pf_rtlabel_copyout(struct pf_addr_wrap *a) 744{ 745#ifdef __FreeBSD__ 746 /* XXX_IMPORT: later */ 747 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 748 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 749#else 750 const char *name; 751 752 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 753 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 754 strlcpy(a->v.rtlabelname, "?", 755 sizeof(a->v.rtlabelname)); 756 else 757 strlcpy(a->v.rtlabelname, name, 758 sizeof(a->v.rtlabelname)); 759 } 760#endif 761} 762 763#ifdef ALTQ 764u_int32_t 765pf_qname2qid(char *qname) 766{ 767 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 768} 769 770void 771pf_qid2qname(u_int32_t qid, char *p) 772{ 773 tag2tagname(&pf_qids, (u_int16_t)qid, p); 774} 775 776void 777pf_qid_unref(u_int32_t qid) 778{ 779 tag_unref(&pf_qids, (u_int16_t)qid); 780} 781 782int 783pf_begin_altq(u_int32_t *ticket) 784{ 785 struct pf_altq *altq; 786 int error = 0; 787 788 /* Purge the old altq list */ 789 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 790 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 791#ifdef __FreeBSD__ 792 if (altq->qname[0] == 0 && 793 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 794#else 795 if (altq->qname[0] == 0) { 796#endif 797 /* detach and destroy the discipline */ 798 error = altq_remove(altq); 799 } else 800 pf_qid_unref(altq->qid); 801 pool_put(&pf_altq_pl, altq); 802 } 803 if (error) 804 return (error); 805 *ticket = ++ticket_altqs_inactive; 806 altqs_inactive_open = 1; 807 return (0); 808} 809 810int 811pf_rollback_altq(u_int32_t ticket) 812{ 813 struct pf_altq *altq; 814 int error = 0; 815 816 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 817 return (0); 818 /* Purge the old altq list */ 819 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 820 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 821#ifdef __FreeBSD__ 822 if (altq->qname[0] == 0 && 823 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 824#else 825 if (altq->qname[0] == 0) { 826#endif 827 /* detach and destroy the discipline */ 828 error = altq_remove(altq); 829 } else 830 pf_qid_unref(altq->qid); 831 pool_put(&pf_altq_pl, altq); 832 } 833 altqs_inactive_open = 0; 834 return (error); 835} 836 837int 838pf_commit_altq(u_int32_t ticket) 839{ 840 struct pf_altqqueue *old_altqs; 841 struct pf_altq *altq; 842 int s, err, error = 0; 843 844 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 845 return (EBUSY); 846 847 /* swap altqs, keep the old. */ 848 s = splsoftnet(); 849 old_altqs = pf_altqs_active; 850 pf_altqs_active = pf_altqs_inactive; 851 pf_altqs_inactive = old_altqs; 852 ticket_altqs_active = ticket_altqs_inactive; 853 854 /* Attach new disciplines */ 855 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 856#ifdef __FreeBSD__ 857 if (altq->qname[0] == 0 && 858 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 859#else 860 if (altq->qname[0] == 0) { 861#endif 862 /* attach the discipline */ 863 error = altq_pfattach(altq); 864 if (error == 0 && pf_altq_running) 865 error = pf_enable_altq(altq); 866 if (error != 0) { 867 splx(s); 868 return (error); 869 } 870 } 871 } 872 873 /* Purge the old altq list */ 874 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 875 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 876#ifdef __FreeBSD__ 877 if (altq->qname[0] == 0 && 878 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 879#else 880 if (altq->qname[0] == 0) { 881#endif 882 /* detach and destroy the discipline */ 883 if (pf_altq_running) 884 error = pf_disable_altq(altq); 885 err = altq_pfdetach(altq); 886 if (err != 0 && error == 0) 887 error = err; 888 err = altq_remove(altq); 889 if (err != 0 && error == 0) 890 error = err; 891 } else 892 pf_qid_unref(altq->qid); 893 pool_put(&pf_altq_pl, altq); 894 } 895 splx(s); 896 897 altqs_inactive_open = 0; 898 return (error); 899} 900 901int 902pf_enable_altq(struct pf_altq *altq) 903{ 904 struct ifnet *ifp; 905 struct tb_profile tb; 906 int s, error = 0; 907 908 if ((ifp = ifunit(altq->ifname)) == NULL) 909 return (EINVAL); 910 911 if (ifp->if_snd.altq_type != ALTQT_NONE) 912 error = altq_enable(&ifp->if_snd); 913 914 /* set tokenbucket regulator */ 915 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 916 tb.rate = altq->ifbandwidth; 917 tb.depth = altq->tbrsize; 918 s = splnet(); 919#ifdef __FreeBSD__ 920 PF_UNLOCK(); 921#endif 922 error = tbr_set(&ifp->if_snd, &tb); 923#ifdef __FreeBSD__ 924 PF_LOCK(); 925#endif 926 splx(s); 927 } 928 929 return (error); 930} 931 932int 933pf_disable_altq(struct pf_altq *altq) 934{ 935 struct ifnet *ifp; 936 struct tb_profile tb; 937 int s, error; 938 939 if ((ifp = ifunit(altq->ifname)) == NULL) 940 return (EINVAL); 941 942 /* 943 * when the discipline is no longer referenced, it was overridden 944 * by a new one. if so, just return. 945 */ 946 if (altq->altq_disc != ifp->if_snd.altq_disc) 947 return (0); 948 949 error = altq_disable(&ifp->if_snd); 950 951 if (error == 0) { 952 /* clear tokenbucket regulator */ 953 tb.rate = 0; 954 s = splnet(); 955#ifdef __FreeBSD__ 956 PF_UNLOCK(); 957#endif 958 error = tbr_set(&ifp->if_snd, &tb); 959#ifdef __FreeBSD__ 960 PF_LOCK(); 961#endif 962 splx(s); 963 } 964 965 return (error); 966} 967 968#ifdef __FreeBSD__ 969void 970pf_altq_ifnet_event(struct ifnet *ifp, int remove) 971{ 972 struct ifnet *ifp1; 973 struct pf_altq *a1, *a2, *a3; 974 u_int32_t ticket; 975 int error = 0; 976 977 /* Interrupt userland queue modifications */ 978 if (altqs_inactive_open) 979 pf_rollback_altq(ticket_altqs_inactive); 980 981 /* Start new altq ruleset */ 982 if (pf_begin_altq(&ticket)) 983 return; 984 985 /* Copy the current active set */ 986 TAILQ_FOREACH(a1, pf_altqs_active, entries) { 987 a2 = pool_get(&pf_altq_pl, PR_NOWAIT); 988 if (a2 == NULL) { 989 error = ENOMEM; 990 break; 991 } 992 bcopy(a1, a2, sizeof(struct pf_altq)); 993 994 if (a2->qname[0] != 0) { 995 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 996 error = EBUSY; 997 pool_put(&pf_altq_pl, a2); 998 break; 999 } 1000 a2->altq_disc = NULL; 1001 TAILQ_FOREACH(a3, pf_altqs_inactive, entries) { 1002 if (strncmp(a3->ifname, a2->ifname, 1003 IFNAMSIZ) == 0 && a3->qname[0] == 0) { 1004 a2->altq_disc = a3->altq_disc; 1005 break; 1006 } 1007 } 1008 } 1009 /* Deactivate the interface in question */ 1010 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1011 if ((ifp1 = ifunit(a2->ifname)) == NULL || 1012 (remove && ifp1 == ifp)) { 1013 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1014 } else { 1015 PF_UNLOCK(); 1016 error = altq_add(a2); 1017 PF_LOCK(); 1018 1019 if (ticket != ticket_altqs_inactive) 1020 error = EBUSY; 1021 1022 if (error) { 1023 pool_put(&pf_altq_pl, a2); 1024 break; 1025 } 1026 } 1027 1028 TAILQ_INSERT_TAIL(pf_altqs_inactive, a2, entries); 1029 } 1030 1031 if (error != 0) 1032 pf_rollback_altq(ticket); 1033 else 1034 pf_commit_altq(ticket); 1035} 1036#endif 1037#endif /* ALTQ */ 1038 1039int 1040pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1041{ 1042 struct pf_ruleset *rs; 1043 struct pf_rule *rule; 1044 1045 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1046 return (EINVAL); 1047 rs = pf_find_or_create_ruleset(anchor); 1048 if (rs == NULL) 1049 return (EINVAL); 1050 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1051 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1052 rs->rules[rs_num].inactive.rcount--; 1053 } 1054 *ticket = ++rs->rules[rs_num].inactive.ticket; 1055 rs->rules[rs_num].inactive.open = 1; 1056 return (0); 1057} 1058 1059int 1060pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1061{ 1062 struct pf_ruleset *rs; 1063 struct pf_rule *rule; 1064 1065 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1066 return (EINVAL); 1067 rs = pf_find_ruleset(anchor); 1068 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1069 rs->rules[rs_num].inactive.ticket != ticket) 1070 return (0); 1071 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1072 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1073 rs->rules[rs_num].inactive.rcount--; 1074 } 1075 rs->rules[rs_num].inactive.open = 0; 1076 return (0); 1077} 1078 1079#define PF_MD5_UPD(st, elm) \ 1080 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1081 1082#define PF_MD5_UPD_STR(st, elm) \ 1083 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1084 1085#define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1086 (stor) = htonl((st)->elm); \ 1087 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1088} while (0) 1089 1090#define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1091 (stor) = htons((st)->elm); \ 1092 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1093} while (0) 1094 1095void 1096pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1097{ 1098 PF_MD5_UPD(pfr, addr.type); 1099 switch (pfr->addr.type) { 1100 case PF_ADDR_DYNIFTL: 1101 PF_MD5_UPD(pfr, addr.v.ifname); 1102 PF_MD5_UPD(pfr, addr.iflags); 1103 break; 1104 case PF_ADDR_TABLE: 1105 PF_MD5_UPD(pfr, addr.v.tblname); 1106 break; 1107 case PF_ADDR_ADDRMASK: 1108 /* XXX ignore af? */ 1109 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1110 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1111 break; 1112 case PF_ADDR_RTLABEL: 1113 PF_MD5_UPD(pfr, addr.v.rtlabelname); 1114 break; 1115 } 1116 1117 PF_MD5_UPD(pfr, port[0]); 1118 PF_MD5_UPD(pfr, port[1]); 1119 PF_MD5_UPD(pfr, neg); 1120 PF_MD5_UPD(pfr, port_op); 1121} 1122 1123void 1124pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 1125{ 1126 u_int16_t x; 1127 u_int32_t y; 1128 1129 pf_hash_rule_addr(ctx, &rule->src); 1130 pf_hash_rule_addr(ctx, &rule->dst); 1131 PF_MD5_UPD_STR(rule, label); 1132 PF_MD5_UPD_STR(rule, ifname); 1133 PF_MD5_UPD_STR(rule, match_tagname); 1134 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1135 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1136 PF_MD5_UPD_HTONL(rule, prob, y); 1137 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1138 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1139 PF_MD5_UPD(rule, uid.op); 1140 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1141 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1142 PF_MD5_UPD(rule, gid.op); 1143 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1144 PF_MD5_UPD(rule, action); 1145 PF_MD5_UPD(rule, direction); 1146 PF_MD5_UPD(rule, af); 1147 PF_MD5_UPD(rule, quick); 1148 PF_MD5_UPD(rule, ifnot); 1149 PF_MD5_UPD(rule, match_tag_not); 1150 PF_MD5_UPD(rule, natpass); 1151 PF_MD5_UPD(rule, keep_state); 1152 PF_MD5_UPD(rule, proto); 1153 PF_MD5_UPD(rule, type); 1154 PF_MD5_UPD(rule, code); 1155 PF_MD5_UPD(rule, flags); 1156 PF_MD5_UPD(rule, flagset); 1157 PF_MD5_UPD(rule, allow_opts); 1158 PF_MD5_UPD(rule, rt); 1159 PF_MD5_UPD(rule, tos); 1160} 1161 1162int 1163pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1164{ 1165 struct pf_ruleset *rs; 1166 struct pf_rule *rule, **old_array; 1167 struct pf_rulequeue *old_rules; 1168 int s, error; 1169 u_int32_t old_rcount; 1170 1171 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1172 return (EINVAL); 1173 rs = pf_find_ruleset(anchor); 1174 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1175 ticket != rs->rules[rs_num].inactive.ticket) 1176 return (EBUSY); 1177 1178 /* Calculate checksum for the main ruleset */ 1179 if (rs == &pf_main_ruleset) { 1180 error = pf_setup_pfsync_matching(rs); 1181 if (error != 0) 1182 return (error); 1183 } 1184 1185 /* Swap rules, keep the old. */ 1186 s = splsoftnet(); 1187 old_rules = rs->rules[rs_num].active.ptr; 1188 old_rcount = rs->rules[rs_num].active.rcount; 1189 old_array = rs->rules[rs_num].active.ptr_array; 1190 1191 rs->rules[rs_num].active.ptr = 1192 rs->rules[rs_num].inactive.ptr; 1193 rs->rules[rs_num].active.ptr_array = 1194 rs->rules[rs_num].inactive.ptr_array; 1195 rs->rules[rs_num].active.rcount = 1196 rs->rules[rs_num].inactive.rcount; 1197 rs->rules[rs_num].inactive.ptr = old_rules; 1198 rs->rules[rs_num].inactive.ptr_array = old_array; 1199 rs->rules[rs_num].inactive.rcount = old_rcount; 1200 1201 rs->rules[rs_num].active.ticket = 1202 rs->rules[rs_num].inactive.ticket; 1203 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1204 1205 1206 /* Purge the old rule list. */ 1207 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1208 pf_rm_rule(old_rules, rule); 1209 if (rs->rules[rs_num].inactive.ptr_array) 1210 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1211 rs->rules[rs_num].inactive.ptr_array = NULL; 1212 rs->rules[rs_num].inactive.rcount = 0; 1213 rs->rules[rs_num].inactive.open = 0; 1214 pf_remove_if_empty_ruleset(rs); 1215 splx(s); 1216 return (0); 1217} 1218 1219int 1220pf_setup_pfsync_matching(struct pf_ruleset *rs) 1221{ 1222 MD5_CTX ctx; 1223 struct pf_rule *rule; 1224 int rs_cnt; 1225 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1226 1227 MD5Init(&ctx); 1228 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1229 /* XXX PF_RULESET_SCRUB as well? */ 1230 if (rs_cnt == PF_RULESET_SCRUB) 1231 continue; 1232 1233 if (rs->rules[rs_cnt].inactive.ptr_array) 1234 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1235 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1236 1237 if (rs->rules[rs_cnt].inactive.rcount) { 1238 rs->rules[rs_cnt].inactive.ptr_array = 1239 malloc(sizeof(caddr_t) * 1240 rs->rules[rs_cnt].inactive.rcount, 1241 M_TEMP, M_NOWAIT); 1242 1243 if (!rs->rules[rs_cnt].inactive.ptr_array) 1244 return (ENOMEM); 1245 } 1246 1247 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1248 entries) { 1249 pf_hash_rule(&ctx, rule); 1250 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1251 } 1252 } 1253 1254 MD5Final(digest, &ctx); 1255 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1256 return (0); 1257} 1258 1259int 1260#ifdef __FreeBSD__ 1261pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1262#else 1263pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1264#endif 1265{ 1266 struct pf_pooladdr *pa = NULL; 1267 struct pf_pool *pool = NULL; 1268#ifndef __FreeBSD__ 1269 int s; 1270#endif 1271 int error = 0; 1272 1273 /* XXX keep in sync with switch() below */ 1274#ifdef __FreeBSD__ 1275 if (securelevel_gt(td->td_ucred, 2)) 1276#else 1277 if (securelevel > 1) 1278#endif 1279 switch (cmd) { 1280 case DIOCGETRULES: 1281 case DIOCGETRULE: 1282 case DIOCGETADDRS: 1283 case DIOCGETADDR: 1284 case DIOCGETSTATE: 1285 case DIOCSETSTATUSIF: 1286 case DIOCGETSTATUS: 1287 case DIOCCLRSTATUS: 1288 case DIOCNATLOOK: 1289 case DIOCSETDEBUG: 1290 case DIOCGETSTATES: 1291 case DIOCGETTIMEOUT: 1292 case DIOCCLRRULECTRS: 1293 case DIOCGETLIMIT: 1294 case DIOCGETALTQS: 1295 case DIOCGETALTQ: 1296 case DIOCGETQSTATS: 1297 case DIOCGETRULESETS: 1298 case DIOCGETRULESET: 1299 case DIOCRGETTABLES: 1300 case DIOCRGETTSTATS: 1301 case DIOCRCLRTSTATS: 1302 case DIOCRCLRADDRS: 1303 case DIOCRADDADDRS: 1304 case DIOCRDELADDRS: 1305 case DIOCRSETADDRS: 1306 case DIOCRGETADDRS: 1307 case DIOCRGETASTATS: 1308 case DIOCRCLRASTATS: 1309 case DIOCRTSTADDRS: 1310 case DIOCOSFPGET: 1311 case DIOCGETSRCNODES: 1312 case DIOCCLRSRCNODES: 1313 case DIOCIGETIFACES: 1314#ifdef __FreeBSD__ 1315 case DIOCGIFSPEED: 1316#endif 1317 case DIOCSETIFFLAG: 1318 case DIOCCLRIFFLAG: 1319 break; 1320 case DIOCRCLRTABLES: 1321 case DIOCRADDTABLES: 1322 case DIOCRDELTABLES: 1323 case DIOCRSETTFLAGS: 1324 if (((struct pfioc_table *)addr)->pfrio_flags & 1325 PFR_FLAG_DUMMY) 1326 break; /* dummy operation ok */ 1327 return (EPERM); 1328 default: 1329 return (EPERM); 1330 } 1331 1332 if (!(flags & FWRITE)) 1333 switch (cmd) { 1334 case DIOCGETRULES: 1335 case DIOCGETADDRS: 1336 case DIOCGETADDR: 1337 case DIOCGETSTATE: 1338 case DIOCGETSTATUS: 1339 case DIOCGETSTATES: 1340 case DIOCGETTIMEOUT: 1341 case DIOCGETLIMIT: 1342 case DIOCGETALTQS: 1343 case DIOCGETALTQ: 1344 case DIOCGETQSTATS: 1345 case DIOCGETRULESETS: 1346 case DIOCGETRULESET: 1347 case DIOCNATLOOK: 1348 case DIOCRGETTABLES: 1349 case DIOCRGETTSTATS: 1350 case DIOCRGETADDRS: 1351 case DIOCRGETASTATS: 1352 case DIOCRTSTADDRS: 1353 case DIOCOSFPGET: 1354 case DIOCGETSRCNODES: 1355 case DIOCIGETIFACES: 1356#ifdef __FreeBSD__ 1357 case DIOCGIFSPEED: 1358#endif 1359 break; 1360 case DIOCRCLRTABLES: 1361 case DIOCRADDTABLES: 1362 case DIOCRDELTABLES: 1363 case DIOCRCLRTSTATS: 1364 case DIOCRCLRADDRS: 1365 case DIOCRADDADDRS: 1366 case DIOCRDELADDRS: 1367 case DIOCRSETADDRS: 1368 case DIOCRSETTFLAGS: 1369 if (((struct pfioc_table *)addr)->pfrio_flags & 1370 PFR_FLAG_DUMMY) { 1371 flags |= FWRITE; /* need write lock for dummy */ 1372 break; /* dummy operation ok */ 1373 } 1374 return (EACCES); 1375 case DIOCGETRULE: 1376 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) 1377 return (EACCES); 1378 break; 1379 default: 1380 return (EACCES); 1381 } 1382 1383 if (flags & FWRITE) 1384#ifdef __FreeBSD__ 1385 sx_xlock(&pf_consistency_lock); 1386 else 1387 sx_slock(&pf_consistency_lock); 1388#else 1389 rw_enter_write(&pf_consistency_lock); 1390 else 1391 rw_enter_read(&pf_consistency_lock); 1392#endif 1393 1394#ifdef __FreeBSD__ 1395 PF_LOCK(); 1396#else 1397 s = splsoftnet(); 1398#endif 1399 switch (cmd) { 1400 1401 case DIOCSTART: 1402 if (pf_status.running) 1403 error = EEXIST; 1404 else { 1405#ifdef __FreeBSD__ 1406 PF_UNLOCK(); 1407 error = hook_pf(); 1408 PF_LOCK(); 1409 if (error) { 1410 DPFPRINTF(PF_DEBUG_MISC, 1411 ("pf: pfil registeration fail\n")); 1412 break; 1413 } 1414#endif 1415 pf_status.running = 1; 1416 pf_status.since = time_second; 1417 if (pf_status.stateid == 0) { 1418 pf_status.stateid = time_second; 1419 pf_status.stateid = pf_status.stateid << 32; 1420 } 1421 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1422 } 1423 break; 1424 1425 case DIOCSTOP: 1426 if (!pf_status.running) 1427 error = ENOENT; 1428 else { 1429 pf_status.running = 0; 1430#ifdef __FreeBSD__ 1431 PF_UNLOCK(); 1432 error = dehook_pf(); 1433 PF_LOCK(); 1434 if (error) { 1435 pf_status.running = 1; 1436 DPFPRINTF(PF_DEBUG_MISC, 1437 ("pf: pfil unregisteration failed\n")); 1438 } 1439#endif 1440 pf_status.since = time_second; 1441 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1442 } 1443 break; 1444 1445 case DIOCADDRULE: { 1446 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1447 struct pf_ruleset *ruleset; 1448 struct pf_rule *rule, *tail; 1449 struct pf_pooladdr *pa; 1450 int rs_num; 1451 1452 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1453 ruleset = pf_find_ruleset(pr->anchor); 1454 if (ruleset == NULL) { 1455 error = EINVAL; 1456 break; 1457 } 1458 rs_num = pf_get_ruleset_number(pr->rule.action); 1459 if (rs_num >= PF_RULESET_MAX) { 1460 error = EINVAL; 1461 break; 1462 } 1463 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1464 error = EINVAL; 1465 break; 1466 } 1467 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1468#ifdef __FreeBSD__ 1469 DPFPRINTF(PF_DEBUG_MISC, 1470 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num, 1471 ruleset->rules[rs_num].inactive.ticket)); 1472#endif 1473 error = EBUSY; 1474 break; 1475 } 1476 if (pr->pool_ticket != ticket_pabuf) { 1477#ifdef __FreeBSD__ 1478 DPFPRINTF(PF_DEBUG_MISC, 1479 ("pool_ticket: %d != %d\n", pr->pool_ticket, 1480 ticket_pabuf)); 1481#endif 1482 error = EBUSY; 1483 break; 1484 } 1485 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1486 if (rule == NULL) { 1487 error = ENOMEM; 1488 break; 1489 } 1490 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1491#ifdef __FreeBSD__ 1492 rule->cuid = td->td_ucred->cr_ruid; 1493 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1494#else 1495 rule->cuid = p->p_cred->p_ruid; 1496 rule->cpid = p->p_pid; 1497#endif 1498 rule->anchor = NULL; 1499 rule->kif = NULL; 1500 TAILQ_INIT(&rule->rpool.list); 1501 /* initialize refcounting */ 1502 rule->states = 0; 1503 rule->src_nodes = 0; 1504 rule->entries.tqe_prev = NULL; 1505#ifndef INET 1506 if (rule->af == AF_INET) { 1507 pool_put(&pf_rule_pl, rule); 1508 error = EAFNOSUPPORT; 1509 break; 1510 } 1511#endif /* INET */ 1512#ifndef INET6 1513 if (rule->af == AF_INET6) { 1514 pool_put(&pf_rule_pl, rule); 1515 error = EAFNOSUPPORT; 1516 break; 1517 } 1518#endif /* INET6 */ 1519 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1520 pf_rulequeue); 1521 if (tail) 1522 rule->nr = tail->nr + 1; 1523 else 1524 rule->nr = 0; 1525 if (rule->ifname[0]) { 1526 rule->kif = pfi_kif_get(rule->ifname); 1527 if (rule->kif == NULL) { 1528 pool_put(&pf_rule_pl, rule); 1529 error = EINVAL; 1530 break; 1531 } 1532 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1533 } 1534 1535#ifdef __FreeBSD__ /* ROUTING */ 1536 if (rule->rtableid > 0 && rule->rtableid > rt_numfibs) 1537#else 1538 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1539#endif 1540 error = EBUSY; 1541 1542#ifdef ALTQ 1543 /* set queue IDs */ 1544 if (rule->qname[0] != 0) { 1545 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1546 error = EBUSY; 1547 else if (rule->pqname[0] != 0) { 1548 if ((rule->pqid = 1549 pf_qname2qid(rule->pqname)) == 0) 1550 error = EBUSY; 1551 } else 1552 rule->pqid = rule->qid; 1553 } 1554#endif 1555 if (rule->tagname[0]) 1556 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1557 error = EBUSY; 1558 if (rule->match_tagname[0]) 1559 if ((rule->match_tag = 1560 pf_tagname2tag(rule->match_tagname)) == 0) 1561 error = EBUSY; 1562 if (rule->rt && !rule->direction) 1563 error = EINVAL; 1564#if NPFLOG > 0 1565#ifdef __FreeBSD__ 1566 if (!rule->log) 1567 rule->logif = 0; 1568#endif 1569 if (rule->logif >= PFLOGIFS_MAX) 1570 error = EINVAL; 1571#endif 1572 if (pf_rtlabel_add(&rule->src.addr) || 1573 pf_rtlabel_add(&rule->dst.addr)) 1574 error = EBUSY; 1575 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1576 error = EINVAL; 1577 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1578 error = EINVAL; 1579 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1580 error = EINVAL; 1581 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1582 error = EINVAL; 1583 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1584 error = EINVAL; 1585 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1586 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1587 error = EINVAL; 1588 1589 if (rule->overload_tblname[0]) { 1590 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1591 rule->overload_tblname)) == NULL) 1592 error = EINVAL; 1593 else 1594 rule->overload_tbl->pfrkt_flags |= 1595 PFR_TFLAG_ACTIVE; 1596 } 1597 1598 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1599 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1600 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1601 (rule->rt > PF_FASTROUTE)) && 1602 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1603 error = EINVAL; 1604 1605 if (error) { 1606 pf_rm_rule(NULL, rule); 1607 break; 1608 } 1609 1610#ifdef __FreeBSD__ 1611 if (!debug_pfugidhack && (rule->uid.op || rule->gid.op || 1612 rule->log & PF_LOG_SOCKET_LOOKUP)) { 1613 DPFPRINTF(PF_DEBUG_MISC, 1614 ("pf: debug.pfugidhack enabled\n")); 1615 debug_pfugidhack = 1; 1616 } 1617#endif 1618 1619 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1620 rule->evaluations = rule->packets[0] = rule->packets[1] = 1621 rule->bytes[0] = rule->bytes[1] = 0; 1622 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1623 rule, entries); 1624 ruleset->rules[rs_num].inactive.rcount++; 1625 break; 1626 } 1627 1628 case DIOCGETRULES: { 1629 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1630 struct pf_ruleset *ruleset; 1631 struct pf_rule *tail; 1632 int rs_num; 1633 1634 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1635 ruleset = pf_find_ruleset(pr->anchor); 1636 if (ruleset == NULL) { 1637 error = EINVAL; 1638 break; 1639 } 1640 rs_num = pf_get_ruleset_number(pr->rule.action); 1641 if (rs_num >= PF_RULESET_MAX) { 1642 error = EINVAL; 1643 break; 1644 } 1645 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1646 pf_rulequeue); 1647 if (tail) 1648 pr->nr = tail->nr + 1; 1649 else 1650 pr->nr = 0; 1651 pr->ticket = ruleset->rules[rs_num].active.ticket; 1652 break; 1653 } 1654 1655 case DIOCGETRULE: { 1656 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1657 struct pf_ruleset *ruleset; 1658 struct pf_rule *rule; 1659 int rs_num, i; 1660 1661 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1662 ruleset = pf_find_ruleset(pr->anchor); 1663 if (ruleset == NULL) { 1664 error = EINVAL; 1665 break; 1666 } 1667 rs_num = pf_get_ruleset_number(pr->rule.action); 1668 if (rs_num >= PF_RULESET_MAX) { 1669 error = EINVAL; 1670 break; 1671 } 1672 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1673 error = EBUSY; 1674 break; 1675 } 1676 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1677 while ((rule != NULL) && (rule->nr != pr->nr)) 1678 rule = TAILQ_NEXT(rule, entries); 1679 if (rule == NULL) { 1680 error = EBUSY; 1681 break; 1682 } 1683 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1684 if (pf_anchor_copyout(ruleset, rule, pr)) { 1685 error = EBUSY; 1686 break; 1687 } 1688 pfi_dynaddr_copyout(&pr->rule.src.addr); 1689 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1690 pf_tbladdr_copyout(&pr->rule.src.addr); 1691 pf_tbladdr_copyout(&pr->rule.dst.addr); 1692 pf_rtlabel_copyout(&pr->rule.src.addr); 1693 pf_rtlabel_copyout(&pr->rule.dst.addr); 1694 for (i = 0; i < PF_SKIP_COUNT; ++i) 1695 if (rule->skip[i].ptr == NULL) 1696 pr->rule.skip[i].nr = -1; 1697 else 1698 pr->rule.skip[i].nr = 1699 rule->skip[i].ptr->nr; 1700 1701 if (pr->action == PF_GET_CLR_CNTR) { 1702 rule->evaluations = 0; 1703 rule->packets[0] = rule->packets[1] = 0; 1704 rule->bytes[0] = rule->bytes[1] = 0; 1705 } 1706 break; 1707 } 1708 1709 case DIOCCHANGERULE: { 1710 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1711 struct pf_ruleset *ruleset; 1712 struct pf_rule *oldrule = NULL, *newrule = NULL; 1713 u_int32_t nr = 0; 1714 int rs_num; 1715 1716 if (!(pcr->action == PF_CHANGE_REMOVE || 1717 pcr->action == PF_CHANGE_GET_TICKET) && 1718 pcr->pool_ticket != ticket_pabuf) { 1719 error = EBUSY; 1720 break; 1721 } 1722 1723 if (pcr->action < PF_CHANGE_ADD_HEAD || 1724 pcr->action > PF_CHANGE_GET_TICKET) { 1725 error = EINVAL; 1726 break; 1727 } 1728 ruleset = pf_find_ruleset(pcr->anchor); 1729 if (ruleset == NULL) { 1730 error = EINVAL; 1731 break; 1732 } 1733 rs_num = pf_get_ruleset_number(pcr->rule.action); 1734 if (rs_num >= PF_RULESET_MAX) { 1735 error = EINVAL; 1736 break; 1737 } 1738 1739 if (pcr->action == PF_CHANGE_GET_TICKET) { 1740 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1741 break; 1742 } else { 1743 if (pcr->ticket != 1744 ruleset->rules[rs_num].active.ticket) { 1745 error = EINVAL; 1746 break; 1747 } 1748 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1749 error = EINVAL; 1750 break; 1751 } 1752 } 1753 1754 if (pcr->action != PF_CHANGE_REMOVE) { 1755 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1756 if (newrule == NULL) { 1757 error = ENOMEM; 1758 break; 1759 } 1760 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1761#ifdef __FreeBSD__ 1762 newrule->cuid = td->td_ucred->cr_ruid; 1763 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1764#else 1765 newrule->cuid = p->p_cred->p_ruid; 1766 newrule->cpid = p->p_pid; 1767#endif 1768 TAILQ_INIT(&newrule->rpool.list); 1769 /* initialize refcounting */ 1770 newrule->states = 0; 1771 newrule->entries.tqe_prev = NULL; 1772#ifndef INET 1773 if (newrule->af == AF_INET) { 1774 pool_put(&pf_rule_pl, newrule); 1775 error = EAFNOSUPPORT; 1776 break; 1777 } 1778#endif /* INET */ 1779#ifndef INET6 1780 if (newrule->af == AF_INET6) { 1781 pool_put(&pf_rule_pl, newrule); 1782 error = EAFNOSUPPORT; 1783 break; 1784 } 1785#endif /* INET6 */ 1786 if (newrule->ifname[0]) { 1787 newrule->kif = pfi_kif_get(newrule->ifname); 1788 if (newrule->kif == NULL) { 1789 pool_put(&pf_rule_pl, newrule); 1790 error = EINVAL; 1791 break; 1792 } 1793 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1794 } else 1795 newrule->kif = NULL; 1796 1797 if (newrule->rtableid > 0 && 1798#ifdef __FreeBSD__ /* ROUTING */ 1799 newrule->rtableid > rt_numfibs) 1800#else 1801 !rtable_exists(newrule->rtableid)) 1802#endif 1803 error = EBUSY; 1804 1805#ifdef ALTQ 1806 /* set queue IDs */ 1807 if (newrule->qname[0] != 0) { 1808 if ((newrule->qid = 1809 pf_qname2qid(newrule->qname)) == 0) 1810 error = EBUSY; 1811 else if (newrule->pqname[0] != 0) { 1812 if ((newrule->pqid = 1813 pf_qname2qid(newrule->pqname)) == 0) 1814 error = EBUSY; 1815 } else 1816 newrule->pqid = newrule->qid; 1817 } 1818#endif /* ALTQ */ 1819 if (newrule->tagname[0]) 1820 if ((newrule->tag = 1821 pf_tagname2tag(newrule->tagname)) == 0) 1822 error = EBUSY; 1823 if (newrule->match_tagname[0]) 1824 if ((newrule->match_tag = pf_tagname2tag( 1825 newrule->match_tagname)) == 0) 1826 error = EBUSY; 1827 if (newrule->rt && !newrule->direction) 1828 error = EINVAL; 1829#ifdef __FreeBSD__ 1830#if NPFLOG > 0 1831 if (!newrule->log) 1832 newrule->logif = 0; 1833 if (newrule->logif >= PFLOGIFS_MAX) 1834 error = EINVAL; 1835#endif 1836#endif 1837 if (pf_rtlabel_add(&newrule->src.addr) || 1838 pf_rtlabel_add(&newrule->dst.addr)) 1839 error = EBUSY; 1840 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1841 error = EINVAL; 1842 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1843 error = EINVAL; 1844 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1845 error = EINVAL; 1846 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1847 error = EINVAL; 1848 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1849 error = EINVAL; 1850 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1851 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1852 error = EINVAL; 1853 1854 if (newrule->overload_tblname[0]) { 1855 if ((newrule->overload_tbl = pfr_attach_table( 1856 ruleset, newrule->overload_tblname)) == 1857 NULL) 1858 error = EINVAL; 1859 else 1860 newrule->overload_tbl->pfrkt_flags |= 1861 PFR_TFLAG_ACTIVE; 1862 } 1863 1864 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1865 if (((((newrule->action == PF_NAT) || 1866 (newrule->action == PF_RDR) || 1867 (newrule->action == PF_BINAT) || 1868 (newrule->rt > PF_FASTROUTE)) && 1869 !newrule->anchor)) && 1870 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1871 error = EINVAL; 1872 1873 if (error) { 1874 pf_rm_rule(NULL, newrule); 1875 break; 1876 } 1877 1878#ifdef __FreeBSD__ 1879 if (!debug_pfugidhack && (newrule->uid.op || 1880 newrule->gid.op || 1881 newrule->log & PF_LOG_SOCKET_LOOKUP)) { 1882 DPFPRINTF(PF_DEBUG_MISC, 1883 ("pf: debug.pfugidhack enabled\n")); 1884 debug_pfugidhack = 1; 1885 } 1886#endif 1887 1888 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1889 newrule->evaluations = 0; 1890 newrule->packets[0] = newrule->packets[1] = 0; 1891 newrule->bytes[0] = newrule->bytes[1] = 0; 1892 } 1893 pf_empty_pool(&pf_pabuf); 1894 1895 if (pcr->action == PF_CHANGE_ADD_HEAD) 1896 oldrule = TAILQ_FIRST( 1897 ruleset->rules[rs_num].active.ptr); 1898 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1899 oldrule = TAILQ_LAST( 1900 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1901 else { 1902 oldrule = TAILQ_FIRST( 1903 ruleset->rules[rs_num].active.ptr); 1904 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1905 oldrule = TAILQ_NEXT(oldrule, entries); 1906 if (oldrule == NULL) { 1907 if (newrule != NULL) 1908 pf_rm_rule(NULL, newrule); 1909 error = EINVAL; 1910 break; 1911 } 1912 } 1913 1914 if (pcr->action == PF_CHANGE_REMOVE) { 1915 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1916 ruleset->rules[rs_num].active.rcount--; 1917 } else { 1918 if (oldrule == NULL) 1919 TAILQ_INSERT_TAIL( 1920 ruleset->rules[rs_num].active.ptr, 1921 newrule, entries); 1922 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1923 pcr->action == PF_CHANGE_ADD_BEFORE) 1924 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1925 else 1926 TAILQ_INSERT_AFTER( 1927 ruleset->rules[rs_num].active.ptr, 1928 oldrule, newrule, entries); 1929 ruleset->rules[rs_num].active.rcount++; 1930 } 1931 1932 nr = 0; 1933 TAILQ_FOREACH(oldrule, 1934 ruleset->rules[rs_num].active.ptr, entries) 1935 oldrule->nr = nr++; 1936 1937 ruleset->rules[rs_num].active.ticket++; 1938 1939 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1940 pf_remove_if_empty_ruleset(ruleset); 1941 1942 break; 1943 } 1944 1945 case DIOCCLRSTATES: { 1946 struct pf_state *state, *nexts; 1947 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1948 int killed = 0; 1949 1950 for (state = RB_MIN(pf_state_tree_id, &tree_id); state; 1951 state = nexts) { 1952 nexts = RB_NEXT(pf_state_tree_id, &tree_id, state); 1953 1954 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1955 state->u.s.kif->pfik_name)) { 1956#if NPFSYNC 1957 /* don't send out individual delete messages */ 1958 state->sync_flags = PFSTATE_NOSYNC; 1959#endif 1960 pf_unlink_state(state); 1961 killed++; 1962 } 1963 } 1964 psk->psk_af = killed; 1965#if NPFSYNC 1966 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1967#endif 1968 break; 1969 } 1970 1971 case DIOCKILLSTATES: { 1972 struct pf_state *state, *nexts; 1973 struct pf_state_host *src, *dst; 1974 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1975 int killed = 0; 1976 1977 for (state = RB_MIN(pf_state_tree_id, &tree_id); state; 1978 state = nexts) { 1979 nexts = RB_NEXT(pf_state_tree_id, &tree_id, state); 1980 1981 if (state->direction == PF_OUT) { 1982 src = &state->lan; 1983 dst = &state->ext; 1984 } else { 1985 src = &state->ext; 1986 dst = &state->lan; 1987 } 1988 if ((!psk->psk_af || state->af == psk->psk_af) 1989 && (!psk->psk_proto || psk->psk_proto == 1990 state->proto) && 1991 PF_MATCHA(psk->psk_src.neg, 1992 &psk->psk_src.addr.v.a.addr, 1993 &psk->psk_src.addr.v.a.mask, 1994 &src->addr, state->af) && 1995 PF_MATCHA(psk->psk_dst.neg, 1996 &psk->psk_dst.addr.v.a.addr, 1997 &psk->psk_dst.addr.v.a.mask, 1998 &dst->addr, state->af) && 1999 (psk->psk_src.port_op == 0 || 2000 pf_match_port(psk->psk_src.port_op, 2001 psk->psk_src.port[0], psk->psk_src.port[1], 2002 src->port)) && 2003 (psk->psk_dst.port_op == 0 || 2004 pf_match_port(psk->psk_dst.port_op, 2005 psk->psk_dst.port[0], psk->psk_dst.port[1], 2006 dst->port)) && 2007 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 2008 state->u.s.kif->pfik_name))) { 2009#if NPFSYNC > 0 2010 /* send immediate delete of state */ 2011 pfsync_delete_state(state); 2012 state->sync_flags |= PFSTATE_NOSYNC; 2013#endif 2014 pf_unlink_state(state); 2015 killed++; 2016 } 2017 } 2018 psk->psk_af = killed; 2019 break; 2020 } 2021 2022 case DIOCADDSTATE: { 2023 struct pfioc_state *ps = (struct pfioc_state *)addr; 2024 struct pf_state *state; 2025 struct pfi_kif *kif; 2026 2027 if (ps->state.timeout >= PFTM_MAX && 2028 ps->state.timeout != PFTM_UNTIL_PACKET) { 2029 error = EINVAL; 2030 break; 2031 } 2032 state = pool_get(&pf_state_pl, PR_NOWAIT); 2033 if (state == NULL) { 2034 error = ENOMEM; 2035 break; 2036 } 2037 kif = pfi_kif_get(ps->state.u.ifname); 2038 if (kif == NULL) { 2039 pool_put(&pf_state_pl, state); 2040 error = ENOENT; 2041 break; 2042 } 2043 bcopy(&ps->state, state, sizeof(struct pf_state)); 2044 bzero(&state->u, sizeof(state->u)); 2045 state->rule.ptr = &pf_default_rule; 2046 state->nat_rule.ptr = NULL; 2047 state->anchor.ptr = NULL; 2048 state->rt_kif = NULL; 2049 state->creation = time_second; 2050 state->pfsync_time = 0; 2051 state->packets[0] = state->packets[1] = 0; 2052 state->bytes[0] = state->bytes[1] = 0; 2053 2054 if (pf_insert_state(kif, state)) { 2055 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 2056 pool_put(&pf_state_pl, state); 2057 error = ENOMEM; 2058 } 2059 break; 2060 } 2061 2062 case DIOCGETSTATE: { 2063 struct pfioc_state *ps = (struct pfioc_state *)addr; 2064 struct pf_state *state; 2065 u_int32_t nr; 2066 int secs; 2067 2068 nr = 0; 2069 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2070 if (nr >= ps->nr) 2071 break; 2072 nr++; 2073 } 2074 if (state == NULL) { 2075 error = EBUSY; 2076 break; 2077 } 2078 secs = time_second; 2079 bcopy(state, &ps->state, sizeof(ps->state)); 2080 strlcpy(ps->state.u.ifname, state->u.s.kif->pfik_name, 2081 sizeof(ps->state.u.ifname)); 2082 ps->state.rule.nr = state->rule.ptr->nr; 2083 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ? 2084 -1 : state->nat_rule.ptr->nr; 2085 ps->state.anchor.nr = (state->anchor.ptr == NULL) ? 2086 -1 : state->anchor.ptr->nr; 2087 ps->state.creation = secs - ps->state.creation; 2088 ps->state.expire = pf_state_expires(state); 2089 if (ps->state.expire > secs) 2090 ps->state.expire -= secs; 2091 else 2092 ps->state.expire = 0; 2093 break; 2094 } 2095 2096 case DIOCGETSTATES: { 2097 struct pfioc_states *ps = (struct pfioc_states *)addr; 2098 struct pf_state *state; 2099 struct pf_state *p, *pstore; 2100 u_int32_t nr = 0; 2101 int space = ps->ps_len; 2102 2103 if (space == 0) { 2104 nr = pf_status.states; 2105 ps->ps_len = sizeof(struct pf_state) * nr; 2106 break; 2107 } 2108 2109#ifdef __FreeBSD__ 2110 PF_UNLOCK(); 2111#endif 2112 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2113#ifdef __FreeBSD__ 2114 PF_LOCK(); 2115#endif 2116 2117 p = ps->ps_states; 2118 2119 state = TAILQ_FIRST(&state_list); 2120 while (state) { 2121 if (state->timeout != PFTM_UNLINKED) { 2122 int secs = time_second; 2123 2124 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 2125 break; 2126 2127 bcopy(state, pstore, sizeof(*pstore)); 2128 strlcpy(pstore->u.ifname, 2129 state->u.s.kif->pfik_name, 2130 sizeof(pstore->u.ifname)); 2131 pstore->rule.nr = state->rule.ptr->nr; 2132 pstore->nat_rule.nr = (state->nat_rule.ptr == 2133 NULL) ? -1 : state->nat_rule.ptr->nr; 2134 pstore->anchor.nr = (state->anchor.ptr == 2135 NULL) ? -1 : state->anchor.ptr->nr; 2136 pstore->creation = secs - pstore->creation; 2137 pstore->expire = pf_state_expires(state); 2138 if (pstore->expire > secs) 2139 pstore->expire -= secs; 2140 else 2141 pstore->expire = 0; 2142#ifdef __FreeBSD__ 2143 PF_COPYOUT(pstore, p, sizeof(*p), error); 2144#else 2145 error = copyout(pstore, p, sizeof(*p)); 2146#endif 2147 if (error) { 2148 free(pstore, M_TEMP); 2149 goto fail; 2150 } 2151 p++; 2152 nr++; 2153 } 2154 state = TAILQ_NEXT(state, u.s.entry_list); 2155 } 2156 2157 ps->ps_len = sizeof(struct pf_state) * nr; 2158 2159 free(pstore, M_TEMP); 2160 break; 2161 } 2162 2163 case DIOCGETSTATUS: { 2164 struct pf_status *s = (struct pf_status *)addr; 2165 bcopy(&pf_status, s, sizeof(struct pf_status)); 2166 pfi_fill_oldstatus(s); 2167 break; 2168 } 2169 2170 case DIOCSETSTATUSIF: { 2171 struct pfioc_if *pi = (struct pfioc_if *)addr; 2172 2173 if (pi->ifname[0] == 0) { 2174 bzero(pf_status.ifname, IFNAMSIZ); 2175 break; 2176 } 2177 if (ifunit(pi->ifname) == NULL) { 2178 error = EINVAL; 2179 break; 2180 } 2181 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2182 break; 2183 } 2184 2185 case DIOCCLRSTATUS: { 2186 bzero(pf_status.counters, sizeof(pf_status.counters)); 2187 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2188 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2189 pf_status.since = time_second; 2190 if (*pf_status.ifname) 2191 pfi_clr_istats(pf_status.ifname); 2192 break; 2193 } 2194 2195 case DIOCNATLOOK: { 2196 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2197 struct pf_state *state; 2198 struct pf_state_cmp key; 2199 int m = 0, direction = pnl->direction; 2200 2201 key.af = pnl->af; 2202 key.proto = pnl->proto; 2203 2204 if (!pnl->proto || 2205 PF_AZERO(&pnl->saddr, pnl->af) || 2206 PF_AZERO(&pnl->daddr, pnl->af) || 2207 ((pnl->proto == IPPROTO_TCP || 2208 pnl->proto == IPPROTO_UDP) && 2209 (!pnl->dport || !pnl->sport))) 2210 error = EINVAL; 2211 else { 2212 /* 2213 * userland gives us source and dest of connection, 2214 * reverse the lookup so we ask for what happens with 2215 * the return traffic, enabling us to find it in the 2216 * state tree. 2217 */ 2218 if (direction == PF_IN) { 2219 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2220 key.ext.port = pnl->dport; 2221 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2222 key.gwy.port = pnl->sport; 2223 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2224 } else { 2225 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2226 key.lan.port = pnl->dport; 2227 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2228 key.ext.port = pnl->sport; 2229 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2230 } 2231 if (m > 1) 2232 error = E2BIG; /* more than one state */ 2233 else if (state != NULL) { 2234 if (direction == PF_IN) { 2235 PF_ACPY(&pnl->rsaddr, &state->lan.addr, 2236 state->af); 2237 pnl->rsport = state->lan.port; 2238 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2239 pnl->af); 2240 pnl->rdport = pnl->dport; 2241 } else { 2242 PF_ACPY(&pnl->rdaddr, &state->gwy.addr, 2243 state->af); 2244 pnl->rdport = state->gwy.port; 2245 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2246 pnl->af); 2247 pnl->rsport = pnl->sport; 2248 } 2249 } else 2250 error = ENOENT; 2251 } 2252 break; 2253 } 2254 2255 case DIOCSETTIMEOUT: { 2256 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2257 int old; 2258 2259 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2260 pt->seconds < 0) { 2261 error = EINVAL; 2262 goto fail; 2263 } 2264 old = pf_default_rule.timeout[pt->timeout]; 2265 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2266 pt->seconds = 1; 2267 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2268 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 2269 wakeup(pf_purge_thread); 2270 pt->seconds = old; 2271 break; 2272 } 2273 2274 case DIOCGETTIMEOUT: { 2275 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2276 2277 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2278 error = EINVAL; 2279 goto fail; 2280 } 2281 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2282 break; 2283 } 2284 2285 case DIOCGETLIMIT: { 2286 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2287 2288 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2289 error = EINVAL; 2290 goto fail; 2291 } 2292 pl->limit = pf_pool_limits[pl->index].limit; 2293 break; 2294 } 2295 2296 case DIOCSETLIMIT: { 2297 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2298 int old_limit; 2299 2300 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2301 pf_pool_limits[pl->index].pp == NULL) { 2302 error = EINVAL; 2303 goto fail; 2304 } 2305#ifdef __FreeBSD__ 2306 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit); 2307#else 2308 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2309 pl->limit, NULL, 0) != 0) { 2310 error = EBUSY; 2311 goto fail; 2312 } 2313#endif 2314 old_limit = pf_pool_limits[pl->index].limit; 2315 pf_pool_limits[pl->index].limit = pl->limit; 2316 pl->limit = old_limit; 2317 break; 2318 } 2319 2320 case DIOCSETDEBUG: { 2321 u_int32_t *level = (u_int32_t *)addr; 2322 2323 pf_status.debug = *level; 2324 break; 2325 } 2326 2327 case DIOCCLRRULECTRS: { 2328 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2329 struct pf_ruleset *ruleset = &pf_main_ruleset; 2330 struct pf_rule *rule; 2331 2332 TAILQ_FOREACH(rule, 2333 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2334 rule->evaluations = 0; 2335 rule->packets[0] = rule->packets[1] = 0; 2336 rule->bytes[0] = rule->bytes[1] = 0; 2337 } 2338 break; 2339 } 2340 2341#ifdef __FreeBSD__ 2342 case DIOCGIFSPEED: { 2343 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2344 struct pf_ifspeed ps; 2345 struct ifnet *ifp; 2346 2347 if (psp->ifname[0] != 0) { 2348 /* Can we completely trust user-land? */ 2349 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2350 ifp = ifunit(ps.ifname); 2351 if (ifp != NULL) 2352 psp->baudrate = ifp->if_baudrate; 2353 else 2354 error = EINVAL; 2355 } else 2356 error = EINVAL; 2357 break; 2358 } 2359#endif /* __FreeBSD__ */ 2360 2361#ifdef ALTQ 2362 case DIOCSTARTALTQ: { 2363 struct pf_altq *altq; 2364 2365 /* enable all altq interfaces on active list */ 2366 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2367#ifdef __FreeBSD__ 2368 if (altq->qname[0] == 0 && (altq->local_flags & 2369 PFALTQ_FLAG_IF_REMOVED) == 0) { 2370#else 2371 if (altq->qname[0] == 0) { 2372#endif 2373 error = pf_enable_altq(altq); 2374 if (error != 0) 2375 break; 2376 } 2377 } 2378 if (error == 0) 2379 pf_altq_running = 1; 2380 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2381 break; 2382 } 2383 2384 case DIOCSTOPALTQ: { 2385 struct pf_altq *altq; 2386 2387 /* disable all altq interfaces on active list */ 2388 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2389#ifdef __FreeBSD__ 2390 if (altq->qname[0] == 0 && (altq->local_flags & 2391 PFALTQ_FLAG_IF_REMOVED) == 0) { 2392#else 2393 if (altq->qname[0] == 0) { 2394#endif 2395 error = pf_disable_altq(altq); 2396 if (error != 0) 2397 break; 2398 } 2399 } 2400 if (error == 0) 2401 pf_altq_running = 0; 2402 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2403 break; 2404 } 2405 2406 case DIOCADDALTQ: { 2407 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2408 struct pf_altq *altq, *a; 2409 2410 if (pa->ticket != ticket_altqs_inactive) { 2411 error = EBUSY; 2412 break; 2413 } 2414 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2415 if (altq == NULL) { 2416 error = ENOMEM; 2417 break; 2418 } 2419 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2420#ifdef __FreeBSD__ 2421 altq->local_flags = 0; 2422#endif 2423 2424 /* 2425 * if this is for a queue, find the discipline and 2426 * copy the necessary fields 2427 */ 2428 if (altq->qname[0] != 0) { 2429 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2430 error = EBUSY; 2431 pool_put(&pf_altq_pl, altq); 2432 break; 2433 } 2434 altq->altq_disc = NULL; 2435 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2436 if (strncmp(a->ifname, altq->ifname, 2437 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2438 altq->altq_disc = a->altq_disc; 2439 break; 2440 } 2441 } 2442 } 2443 2444#ifdef __FreeBSD__ 2445 struct ifnet *ifp; 2446 2447 if ((ifp = ifunit(altq->ifname)) == NULL) { 2448 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 2449 } else { 2450 PF_UNLOCK(); 2451#endif 2452 error = altq_add(altq); 2453#ifdef __FreeBSD__ 2454 PF_LOCK(); 2455 } 2456#endif 2457 if (error) { 2458 pool_put(&pf_altq_pl, altq); 2459 break; 2460 } 2461 2462 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2463 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2464 break; 2465 } 2466 2467 case DIOCGETALTQS: { 2468 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2469 struct pf_altq *altq; 2470 2471 pa->nr = 0; 2472 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2473 pa->nr++; 2474 pa->ticket = ticket_altqs_active; 2475 break; 2476 } 2477 2478 case DIOCGETALTQ: { 2479 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2480 struct pf_altq *altq; 2481 u_int32_t nr; 2482 2483 if (pa->ticket != ticket_altqs_active) { 2484 error = EBUSY; 2485 break; 2486 } 2487 nr = 0; 2488 altq = TAILQ_FIRST(pf_altqs_active); 2489 while ((altq != NULL) && (nr < pa->nr)) { 2490 altq = TAILQ_NEXT(altq, entries); 2491 nr++; 2492 } 2493 if (altq == NULL) { 2494 error = EBUSY; 2495 break; 2496 } 2497 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2498 break; 2499 } 2500 2501 case DIOCCHANGEALTQ: 2502 /* CHANGEALTQ not supported yet! */ 2503 error = ENODEV; 2504 break; 2505 2506 case DIOCGETQSTATS: { 2507 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2508 struct pf_altq *altq; 2509 u_int32_t nr; 2510 int nbytes; 2511 2512 if (pq->ticket != ticket_altqs_active) { 2513 error = EBUSY; 2514 break; 2515 } 2516 nbytes = pq->nbytes; 2517 nr = 0; 2518 altq = TAILQ_FIRST(pf_altqs_active); 2519 while ((altq != NULL) && (nr < pq->nr)) { 2520 altq = TAILQ_NEXT(altq, entries); 2521 nr++; 2522 } 2523 if (altq == NULL) { 2524 error = EBUSY; 2525 break; 2526 } 2527#ifdef __FreeBSD__ 2528 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 2529 error = ENXIO; 2530 break; 2531 } 2532 PF_UNLOCK(); 2533#endif 2534 error = altq_getqstats(altq, pq->buf, &nbytes); 2535#ifdef __FreeBSD__ 2536 PF_LOCK(); 2537#endif 2538 if (error == 0) { 2539 pq->scheduler = altq->scheduler; 2540 pq->nbytes = nbytes; 2541 } 2542 break; 2543 } 2544#endif /* ALTQ */ 2545 2546 case DIOCBEGINADDRS: { 2547 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2548 2549 pf_empty_pool(&pf_pabuf); 2550 pp->ticket = ++ticket_pabuf; 2551 break; 2552 } 2553 2554 case DIOCADDADDR: { 2555 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2556 2557 if (pp->ticket != ticket_pabuf) { 2558 error = EBUSY; 2559 break; 2560 } 2561#ifndef INET 2562 if (pp->af == AF_INET) { 2563 error = EAFNOSUPPORT; 2564 break; 2565 } 2566#endif /* INET */ 2567#ifndef INET6 2568 if (pp->af == AF_INET6) { 2569 error = EAFNOSUPPORT; 2570 break; 2571 } 2572#endif /* INET6 */ 2573 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2574 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2575 pp->addr.addr.type != PF_ADDR_TABLE) { 2576 error = EINVAL; 2577 break; 2578 } 2579 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2580 if (pa == NULL) { 2581 error = ENOMEM; 2582 break; 2583 } 2584 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2585 if (pa->ifname[0]) { 2586 pa->kif = pfi_kif_get(pa->ifname); 2587 if (pa->kif == NULL) { 2588 pool_put(&pf_pooladdr_pl, pa); 2589 error = EINVAL; 2590 break; 2591 } 2592 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2593 } 2594 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2595 pfi_dynaddr_remove(&pa->addr); 2596 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2597 pool_put(&pf_pooladdr_pl, pa); 2598 error = EINVAL; 2599 break; 2600 } 2601 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2602 break; 2603 } 2604 2605 case DIOCGETADDRS: { 2606 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2607 2608 pp->nr = 0; 2609 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2610 pp->r_num, 0, 1, 0); 2611 if (pool == NULL) { 2612 error = EBUSY; 2613 break; 2614 } 2615 TAILQ_FOREACH(pa, &pool->list, entries) 2616 pp->nr++; 2617 break; 2618 } 2619 2620 case DIOCGETADDR: { 2621 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2622 u_int32_t nr = 0; 2623 2624 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2625 pp->r_num, 0, 1, 1); 2626 if (pool == NULL) { 2627 error = EBUSY; 2628 break; 2629 } 2630 pa = TAILQ_FIRST(&pool->list); 2631 while ((pa != NULL) && (nr < pp->nr)) { 2632 pa = TAILQ_NEXT(pa, entries); 2633 nr++; 2634 } 2635 if (pa == NULL) { 2636 error = EBUSY; 2637 break; 2638 } 2639 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2640 pfi_dynaddr_copyout(&pp->addr.addr); 2641 pf_tbladdr_copyout(&pp->addr.addr); 2642 pf_rtlabel_copyout(&pp->addr.addr); 2643 break; 2644 } 2645 2646 case DIOCCHANGEADDR: { 2647 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2648 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2649 struct pf_ruleset *ruleset; 2650 2651 if (pca->action < PF_CHANGE_ADD_HEAD || 2652 pca->action > PF_CHANGE_REMOVE) { 2653 error = EINVAL; 2654 break; 2655 } 2656 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2657 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2658 pca->addr.addr.type != PF_ADDR_TABLE) { 2659 error = EINVAL; 2660 break; 2661 } 2662 2663 ruleset = pf_find_ruleset(pca->anchor); 2664 if (ruleset == NULL) { 2665 error = EBUSY; 2666 break; 2667 } 2668 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2669 pca->r_num, pca->r_last, 1, 1); 2670 if (pool == NULL) { 2671 error = EBUSY; 2672 break; 2673 } 2674 if (pca->action != PF_CHANGE_REMOVE) { 2675 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2676 if (newpa == NULL) { 2677 error = ENOMEM; 2678 break; 2679 } 2680 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2681#ifndef INET 2682 if (pca->af == AF_INET) { 2683 pool_put(&pf_pooladdr_pl, newpa); 2684 error = EAFNOSUPPORT; 2685 break; 2686 } 2687#endif /* INET */ 2688#ifndef INET6 2689 if (pca->af == AF_INET6) { 2690 pool_put(&pf_pooladdr_pl, newpa); 2691 error = EAFNOSUPPORT; 2692 break; 2693 } 2694#endif /* INET6 */ 2695 if (newpa->ifname[0]) { 2696 newpa->kif = pfi_kif_get(newpa->ifname); 2697 if (newpa->kif == NULL) { 2698 pool_put(&pf_pooladdr_pl, newpa); 2699 error = EINVAL; 2700 break; 2701 } 2702 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2703 } else 2704 newpa->kif = NULL; 2705 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2706 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2707 pfi_dynaddr_remove(&newpa->addr); 2708 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2709 pool_put(&pf_pooladdr_pl, newpa); 2710 error = EINVAL; 2711 break; 2712 } 2713 } 2714 2715 if (pca->action == PF_CHANGE_ADD_HEAD) 2716 oldpa = TAILQ_FIRST(&pool->list); 2717 else if (pca->action == PF_CHANGE_ADD_TAIL) 2718 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2719 else { 2720 int i = 0; 2721 2722 oldpa = TAILQ_FIRST(&pool->list); 2723 while ((oldpa != NULL) && (i < pca->nr)) { 2724 oldpa = TAILQ_NEXT(oldpa, entries); 2725 i++; 2726 } 2727 if (oldpa == NULL) { 2728 error = EINVAL; 2729 break; 2730 } 2731 } 2732 2733 if (pca->action == PF_CHANGE_REMOVE) { 2734 TAILQ_REMOVE(&pool->list, oldpa, entries); 2735 pfi_dynaddr_remove(&oldpa->addr); 2736 pf_tbladdr_remove(&oldpa->addr); 2737 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2738 pool_put(&pf_pooladdr_pl, oldpa); 2739 } else { 2740 if (oldpa == NULL) 2741 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2742 else if (pca->action == PF_CHANGE_ADD_HEAD || 2743 pca->action == PF_CHANGE_ADD_BEFORE) 2744 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2745 else 2746 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2747 newpa, entries); 2748 } 2749 2750 pool->cur = TAILQ_FIRST(&pool->list); 2751 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2752 pca->af); 2753 break; 2754 } 2755 2756 case DIOCGETRULESETS: { 2757 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2758 struct pf_ruleset *ruleset; 2759 struct pf_anchor *anchor; 2760 2761 pr->path[sizeof(pr->path) - 1] = 0; 2762 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2763 error = EINVAL; 2764 break; 2765 } 2766 pr->nr = 0; 2767 if (ruleset->anchor == NULL) { 2768 /* XXX kludge for pf_main_ruleset */ 2769 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2770 if (anchor->parent == NULL) 2771 pr->nr++; 2772 } else { 2773 RB_FOREACH(anchor, pf_anchor_node, 2774 &ruleset->anchor->children) 2775 pr->nr++; 2776 } 2777 break; 2778 } 2779 2780 case DIOCGETRULESET: { 2781 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2782 struct pf_ruleset *ruleset; 2783 struct pf_anchor *anchor; 2784 u_int32_t nr = 0; 2785 2786 pr->path[sizeof(pr->path) - 1] = 0; 2787 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2788 error = EINVAL; 2789 break; 2790 } 2791 pr->name[0] = 0; 2792 if (ruleset->anchor == NULL) { 2793 /* XXX kludge for pf_main_ruleset */ 2794 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2795 if (anchor->parent == NULL && nr++ == pr->nr) { 2796 strlcpy(pr->name, anchor->name, 2797 sizeof(pr->name)); 2798 break; 2799 } 2800 } else { 2801 RB_FOREACH(anchor, pf_anchor_node, 2802 &ruleset->anchor->children) 2803 if (nr++ == pr->nr) { 2804 strlcpy(pr->name, anchor->name, 2805 sizeof(pr->name)); 2806 break; 2807 } 2808 } 2809 if (!pr->name[0]) 2810 error = EBUSY; 2811 break; 2812 } 2813 2814 case DIOCRCLRTABLES: { 2815 struct pfioc_table *io = (struct pfioc_table *)addr; 2816 2817 if (io->pfrio_esize != 0) { 2818 error = ENODEV; 2819 break; 2820 } 2821 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2822 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2823 break; 2824 } 2825 2826 case DIOCRADDTABLES: { 2827 struct pfioc_table *io = (struct pfioc_table *)addr; 2828 2829 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2830 error = ENODEV; 2831 break; 2832 } 2833 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2834 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2835 break; 2836 } 2837 2838 case DIOCRDELTABLES: { 2839 struct pfioc_table *io = (struct pfioc_table *)addr; 2840 2841 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2842 error = ENODEV; 2843 break; 2844 } 2845 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2846 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2847 break; 2848 } 2849 2850 case DIOCRGETTABLES: { 2851 struct pfioc_table *io = (struct pfioc_table *)addr; 2852 2853 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2854 error = ENODEV; 2855 break; 2856 } 2857 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2858 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2859 break; 2860 } 2861 2862 case DIOCRGETTSTATS: { 2863 struct pfioc_table *io = (struct pfioc_table *)addr; 2864 2865 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2866 error = ENODEV; 2867 break; 2868 } 2869 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2870 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2871 break; 2872 } 2873 2874 case DIOCRCLRTSTATS: { 2875 struct pfioc_table *io = (struct pfioc_table *)addr; 2876 2877 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2878 error = ENODEV; 2879 break; 2880 } 2881 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2882 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2883 break; 2884 } 2885 2886 case DIOCRSETTFLAGS: { 2887 struct pfioc_table *io = (struct pfioc_table *)addr; 2888 2889 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2890 error = ENODEV; 2891 break; 2892 } 2893 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2894 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2895 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2896 break; 2897 } 2898 2899 case DIOCRCLRADDRS: { 2900 struct pfioc_table *io = (struct pfioc_table *)addr; 2901 2902 if (io->pfrio_esize != 0) { 2903 error = ENODEV; 2904 break; 2905 } 2906 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2907 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2908 break; 2909 } 2910 2911 case DIOCRADDADDRS: { 2912 struct pfioc_table *io = (struct pfioc_table *)addr; 2913 2914 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2915 error = ENODEV; 2916 break; 2917 } 2918 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2919 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2920 PFR_FLAG_USERIOCTL); 2921 break; 2922 } 2923 2924 case DIOCRDELADDRS: { 2925 struct pfioc_table *io = (struct pfioc_table *)addr; 2926 2927 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2928 error = ENODEV; 2929 break; 2930 } 2931 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2932 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2933 PFR_FLAG_USERIOCTL); 2934 break; 2935 } 2936 2937 case DIOCRSETADDRS: { 2938 struct pfioc_table *io = (struct pfioc_table *)addr; 2939 2940 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2941 error = ENODEV; 2942 break; 2943 } 2944 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2945 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2946 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2947 PFR_FLAG_USERIOCTL, 0); 2948 break; 2949 } 2950 2951 case DIOCRGETADDRS: { 2952 struct pfioc_table *io = (struct pfioc_table *)addr; 2953 2954 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2955 error = ENODEV; 2956 break; 2957 } 2958 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2959 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2960 break; 2961 } 2962 2963 case DIOCRGETASTATS: { 2964 struct pfioc_table *io = (struct pfioc_table *)addr; 2965 2966 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2967 error = ENODEV; 2968 break; 2969 } 2970 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2971 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2972 break; 2973 } 2974 2975 case DIOCRCLRASTATS: { 2976 struct pfioc_table *io = (struct pfioc_table *)addr; 2977 2978 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2979 error = ENODEV; 2980 break; 2981 } 2982 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2983 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2984 PFR_FLAG_USERIOCTL); 2985 break; 2986 } 2987 2988 case DIOCRTSTADDRS: { 2989 struct pfioc_table *io = (struct pfioc_table *)addr; 2990 2991 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2992 error = ENODEV; 2993 break; 2994 } 2995 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2996 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2997 PFR_FLAG_USERIOCTL); 2998 break; 2999 } 3000 3001 case DIOCRINADEFINE: { 3002 struct pfioc_table *io = (struct pfioc_table *)addr; 3003 3004 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3005 error = ENODEV; 3006 break; 3007 } 3008 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 3009 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 3010 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3011 break; 3012 } 3013 3014 case DIOCOSFPADD: { 3015 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 3016 error = pf_osfp_add(io); 3017 break; 3018 } 3019 3020 case DIOCOSFPGET: { 3021 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 3022 error = pf_osfp_get(io); 3023 break; 3024 } 3025 3026 case DIOCXBEGIN: { 3027 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3028 struct pfioc_trans_e *ioe; 3029 struct pfr_table *table; 3030 int i; 3031 3032 if (io->esize != sizeof(*ioe)) { 3033 error = ENODEV; 3034 goto fail; 3035 } 3036#ifdef __FreeBSD__ 3037 PF_UNLOCK(); 3038#endif 3039 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 3040 M_TEMP, M_WAITOK); 3041 table = (struct pfr_table *)malloc(sizeof(*table), 3042 M_TEMP, M_WAITOK); 3043#ifdef __FreeBSD__ 3044 PF_LOCK(); 3045#endif 3046 for (i = 0; i < io->size; i++) { 3047#ifdef __FreeBSD__ 3048 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3049 if (error) { 3050#else 3051 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3052#endif 3053 free(table, M_TEMP); 3054 free(ioe, M_TEMP); 3055 error = EFAULT; 3056 goto fail; 3057 } 3058 switch (ioe->rs_num) { 3059#ifdef ALTQ 3060 case PF_RULESET_ALTQ: 3061 if (ioe->anchor[0]) { 3062 free(table, M_TEMP); 3063 free(ioe, M_TEMP); 3064 error = EINVAL; 3065 goto fail; 3066 } 3067 if ((error = pf_begin_altq(&ioe->ticket))) { 3068 free(table, M_TEMP); 3069 free(ioe, M_TEMP); 3070 goto fail; 3071 } 3072 break; 3073#endif /* ALTQ */ 3074 case PF_RULESET_TABLE: 3075 bzero(table, sizeof(*table)); 3076 strlcpy(table->pfrt_anchor, ioe->anchor, 3077 sizeof(table->pfrt_anchor)); 3078 if ((error = pfr_ina_begin(table, 3079 &ioe->ticket, NULL, 0))) { 3080 free(table, M_TEMP); 3081 free(ioe, M_TEMP); 3082 goto fail; 3083 } 3084 break; 3085 default: 3086 if ((error = pf_begin_rules(&ioe->ticket, 3087 ioe->rs_num, ioe->anchor))) { 3088 free(table, M_TEMP); 3089 free(ioe, M_TEMP); 3090 goto fail; 3091 } 3092 break; 3093 } 3094#ifdef __FreeBSD__ 3095 PF_COPYOUT(ioe, io->array+i, sizeof(io->array[i]), 3096 error); 3097 if (error) { 3098#else 3099 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 3100#endif 3101 free(table, M_TEMP); 3102 free(ioe, M_TEMP); 3103 error = EFAULT; 3104 goto fail; 3105 } 3106 } 3107 free(table, M_TEMP); 3108 free(ioe, M_TEMP); 3109 break; 3110 } 3111 3112 case DIOCXROLLBACK: { 3113 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3114 struct pfioc_trans_e *ioe; 3115 struct pfr_table *table; 3116 int i; 3117 3118 if (io->esize != sizeof(*ioe)) { 3119 error = ENODEV; 3120 goto fail; 3121 } 3122#ifdef __FreeBSD__ 3123 PF_UNLOCK(); 3124#endif 3125 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 3126 M_TEMP, M_WAITOK); 3127 table = (struct pfr_table *)malloc(sizeof(*table), 3128 M_TEMP, M_WAITOK); 3129#ifdef __FreeBSD__ 3130 PF_LOCK(); 3131#endif 3132 for (i = 0; i < io->size; i++) { 3133#ifdef __FreeBSD__ 3134 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3135 if (error) { 3136#else 3137 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3138#endif 3139 free(table, M_TEMP); 3140 free(ioe, M_TEMP); 3141 error = EFAULT; 3142 goto fail; 3143 } 3144 switch (ioe->rs_num) { 3145#ifdef ALTQ 3146 case PF_RULESET_ALTQ: 3147 if (ioe->anchor[0]) { 3148 free(table, M_TEMP); 3149 free(ioe, M_TEMP); 3150 error = EINVAL; 3151 goto fail; 3152 } 3153 if ((error = pf_rollback_altq(ioe->ticket))) { 3154 free(table, M_TEMP); 3155 free(ioe, M_TEMP); 3156 goto fail; /* really bad */ 3157 } 3158 break; 3159#endif /* ALTQ */ 3160 case PF_RULESET_TABLE: 3161 bzero(table, sizeof(*table)); 3162 strlcpy(table->pfrt_anchor, ioe->anchor, 3163 sizeof(table->pfrt_anchor)); 3164 if ((error = pfr_ina_rollback(table, 3165 ioe->ticket, NULL, 0))) { 3166 free(table, M_TEMP); 3167 free(ioe, M_TEMP); 3168 goto fail; /* really bad */ 3169 } 3170 break; 3171 default: 3172 if ((error = pf_rollback_rules(ioe->ticket, 3173 ioe->rs_num, ioe->anchor))) { 3174 free(table, M_TEMP); 3175 free(ioe, M_TEMP); 3176 goto fail; /* really bad */ 3177 } 3178 break; 3179 } 3180 } 3181 free(table, M_TEMP); 3182 free(ioe, M_TEMP); 3183 break; 3184 } 3185 3186 case DIOCXCOMMIT: { 3187 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3188 struct pfioc_trans_e *ioe; 3189 struct pfr_table *table; 3190 struct pf_ruleset *rs; 3191 int i; 3192 3193 if (io->esize != sizeof(*ioe)) { 3194 error = ENODEV; 3195 goto fail; 3196 } 3197#ifdef __FreeBSD__ 3198 PF_UNLOCK(); 3199#endif 3200 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 3201 M_TEMP, M_WAITOK); 3202 table = (struct pfr_table *)malloc(sizeof(*table), 3203 M_TEMP, M_WAITOK); 3204#ifdef __FreeBSD__ 3205 PF_LOCK(); 3206#endif 3207 /* first makes sure everything will succeed */ 3208 for (i = 0; i < io->size; i++) { 3209#ifdef __FreeBSD__ 3210 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3211 if (error) { 3212#else 3213 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3214#endif 3215 free(table, M_TEMP); 3216 free(ioe, M_TEMP); 3217 error = EFAULT; 3218 goto fail; 3219 } 3220 switch (ioe->rs_num) { 3221#ifdef ALTQ 3222 case PF_RULESET_ALTQ: 3223 if (ioe->anchor[0]) { 3224 free(table, M_TEMP); 3225 free(ioe, M_TEMP); 3226 error = EINVAL; 3227 goto fail; 3228 } 3229 if (!altqs_inactive_open || ioe->ticket != 3230 ticket_altqs_inactive) { 3231 free(table, M_TEMP); 3232 free(ioe, M_TEMP); 3233 error = EBUSY; 3234 goto fail; 3235 } 3236 break; 3237#endif /* ALTQ */ 3238 case PF_RULESET_TABLE: 3239 rs = pf_find_ruleset(ioe->anchor); 3240 if (rs == NULL || !rs->topen || ioe->ticket != 3241 rs->tticket) { 3242 free(table, M_TEMP); 3243 free(ioe, M_TEMP); 3244 error = EBUSY; 3245 goto fail; 3246 } 3247 break; 3248 default: 3249 if (ioe->rs_num < 0 || ioe->rs_num >= 3250 PF_RULESET_MAX) { 3251 free(table, M_TEMP); 3252 free(ioe, M_TEMP); 3253 error = EINVAL; 3254 goto fail; 3255 } 3256 rs = pf_find_ruleset(ioe->anchor); 3257 if (rs == NULL || 3258 !rs->rules[ioe->rs_num].inactive.open || 3259 rs->rules[ioe->rs_num].inactive.ticket != 3260 ioe->ticket) { 3261 free(table, M_TEMP); 3262 free(ioe, M_TEMP); 3263 error = EBUSY; 3264 goto fail; 3265 } 3266 break; 3267 } 3268 } 3269 /* now do the commit - no errors should happen here */ 3270 for (i = 0; i < io->size; i++) { 3271#ifdef __FreeBSD__ 3272 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3273 if (error) { 3274#else 3275 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3276#endif 3277 free(table, M_TEMP); 3278 free(ioe, M_TEMP); 3279 error = EFAULT; 3280 goto fail; 3281 } 3282 switch (ioe->rs_num) { 3283#ifdef ALTQ 3284 case PF_RULESET_ALTQ: 3285 if ((error = pf_commit_altq(ioe->ticket))) { 3286 free(table, M_TEMP); 3287 free(ioe, M_TEMP); 3288 goto fail; /* really bad */ 3289 } 3290 break; 3291#endif /* ALTQ */ 3292 case PF_RULESET_TABLE: 3293 bzero(table, sizeof(*table)); 3294 strlcpy(table->pfrt_anchor, ioe->anchor, 3295 sizeof(table->pfrt_anchor)); 3296 if ((error = pfr_ina_commit(table, ioe->ticket, 3297 NULL, NULL, 0))) { 3298 free(table, M_TEMP); 3299 free(ioe, M_TEMP); 3300 goto fail; /* really bad */ 3301 } 3302 break; 3303 default: 3304 if ((error = pf_commit_rules(ioe->ticket, 3305 ioe->rs_num, ioe->anchor))) { 3306 free(table, M_TEMP); 3307 free(ioe, M_TEMP); 3308 goto fail; /* really bad */ 3309 } 3310 break; 3311 } 3312 } 3313 free(table, M_TEMP); 3314 free(ioe, M_TEMP); 3315 break; 3316 } 3317 3318 case DIOCGETSRCNODES: { 3319 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3320 struct pf_src_node *n, *p, *pstore; 3321 u_int32_t nr = 0; 3322 int space = psn->psn_len; 3323 3324 if (space == 0) { 3325 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3326 nr++; 3327 psn->psn_len = sizeof(struct pf_src_node) * nr; 3328 break; 3329 } 3330 3331#ifdef __FreeBSD__ 3332 PF_UNLOCK(); 3333#endif 3334 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 3335#ifdef __FreeBSD__ 3336 PF_LOCK(); 3337#endif 3338 3339 p = psn->psn_src_nodes; 3340 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3341 int secs = time_second, diff; 3342 3343 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3344 break; 3345 3346 bcopy(n, pstore, sizeof(*pstore)); 3347 if (n->rule.ptr != NULL) 3348 pstore->rule.nr = n->rule.ptr->nr; 3349 pstore->creation = secs - pstore->creation; 3350 if (pstore->expire > secs) 3351 pstore->expire -= secs; 3352 else 3353 pstore->expire = 0; 3354 3355 /* adjust the connection rate estimate */ 3356 diff = secs - n->conn_rate.last; 3357 if (diff >= n->conn_rate.seconds) 3358 pstore->conn_rate.count = 0; 3359 else 3360 pstore->conn_rate.count -= 3361 n->conn_rate.count * diff / 3362 n->conn_rate.seconds; 3363 3364#ifdef __FreeBSD__ 3365 PF_COPYOUT(pstore, p, sizeof(*p), error); 3366#else 3367 error = copyout(pstore, p, sizeof(*p)); 3368#endif 3369 if (error) { 3370 free(pstore, M_TEMP); 3371 goto fail; 3372 } 3373 p++; 3374 nr++; 3375 } 3376 psn->psn_len = sizeof(struct pf_src_node) * nr; 3377 3378 free(pstore, M_TEMP); 3379 break; 3380 } 3381 3382 case DIOCCLRSRCNODES: { 3383 struct pf_src_node *n; 3384 struct pf_state *state; 3385 3386 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3387 state->src_node = NULL; 3388 state->nat_src_node = NULL; 3389 } 3390 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3391 n->expire = 1; 3392 n->states = 0; 3393 } 3394 pf_purge_expired_src_nodes(1); 3395 pf_status.src_nodes = 0; 3396 break; 3397 } 3398 3399 case DIOCKILLSRCNODES: { 3400 struct pf_src_node *sn; 3401 struct pf_state *s; 3402 struct pfioc_src_node_kill *psnk = \ 3403 (struct pfioc_src_node_kill *) addr; 3404 int killed = 0; 3405 3406 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 3407 if (PF_MATCHA(psnk->psnk_src.neg, \ 3408 &psnk->psnk_src.addr.v.a.addr, \ 3409 &psnk->psnk_src.addr.v.a.mask, \ 3410 &sn->addr, sn->af) && 3411 PF_MATCHA(psnk->psnk_dst.neg, \ 3412 &psnk->psnk_dst.addr.v.a.addr, \ 3413 &psnk->psnk_dst.addr.v.a.mask, \ 3414 &sn->raddr, sn->af)) { 3415 /* Handle state to src_node linkage */ 3416 if (sn->states != 0) { 3417 RB_FOREACH(s, pf_state_tree_id, 3418 &tree_id) { 3419 if (s->src_node == sn) 3420 s->src_node = NULL; 3421 if (s->nat_src_node == sn) 3422 s->nat_src_node = NULL; 3423 } 3424 sn->states = 0; 3425 } 3426 sn->expire = 1; 3427 killed++; 3428 } 3429 } 3430 3431 if (killed > 0) 3432 pf_purge_expired_src_nodes(1); 3433 3434 psnk->psnk_af = killed; 3435 break; 3436 } 3437 3438 case DIOCSETHOSTID: { 3439 u_int32_t *hostid = (u_int32_t *)addr; 3440 3441 if (*hostid == 0) 3442 pf_status.hostid = arc4random(); 3443 else 3444 pf_status.hostid = *hostid; 3445 break; 3446 } 3447 3448 case DIOCOSFPFLUSH: 3449 pf_osfp_flush(); 3450 break; 3451 3452 case DIOCIGETIFACES: { 3453 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3454 3455 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3456 error = ENODEV; 3457 break; 3458 } 3459 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3460 &io->pfiio_size); 3461 break; 3462 } 3463 3464 case DIOCSETIFFLAG: { 3465 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3466 3467 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3468 break; 3469 } 3470 3471 case DIOCCLRIFFLAG: { 3472 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3473 3474 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3475 break; 3476 } 3477 3478 default: 3479 error = ENODEV; 3480 break; 3481 } 3482fail: 3483#ifdef __FreeBSD__ 3484 PF_UNLOCK(); 3485 3486 if (flags & FWRITE) 3487 sx_xunlock(&pf_consistency_lock); 3488 else 3489 sx_sunlock(&pf_consistency_lock); 3490#else 3491 splx(s); 3492 /* XXX: Lock order? */ 3493 if (flags & FWRITE) 3494 rw_exit_write(&pf_consistency_lock); 3495 else 3496 rw_exit_read(&pf_consistency_lock); 3497#endif 3498 return (error); 3499} 3500 3501#ifdef __FreeBSD__ 3502/* 3503 * XXX - Check for version missmatch!!! 3504 */ 3505static void 3506pf_clear_states(void) 3507{ 3508 struct pf_state *state; 3509 3510 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3511 state->timeout = PFTM_PURGE; 3512#if NPFSYNC 3513 /* don't send out individual delete messages */ 3514 state->sync_flags = PFSTATE_NOSYNC; 3515#endif 3516 pf_unlink_state(state); 3517 } 3518 3519#if 0 /* NPFSYNC */ 3520/* 3521 * XXX This is called on module unload, we do not want to sync that over? */ 3522 */ 3523 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 3524#endif 3525} 3526 3527static int 3528pf_clear_tables(void) 3529{ 3530 struct pfioc_table io; 3531 int error; 3532 3533 bzero(&io, sizeof(io)); 3534 3535 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3536 io.pfrio_flags); 3537 3538 return (error); 3539} 3540 3541static void 3542pf_clear_srcnodes(void) 3543{ 3544 struct pf_src_node *n; 3545 struct pf_state *state; 3546 3547 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3548 state->src_node = NULL; 3549 state->nat_src_node = NULL; 3550 } 3551 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3552 n->expire = 1; 3553 n->states = 0; 3554 } 3555} 3556/* 3557 * XXX - Check for version missmatch!!! 3558 */ 3559 3560/* 3561 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3562 */ 3563static int 3564shutdown_pf(void) 3565{ 3566 int error = 0; 3567 u_int32_t t[5]; 3568 char nn = '\0'; 3569 3570 pf_status.running = 0; 3571 do { 3572 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 3573 != 0) { 3574 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3575 break; 3576 } 3577 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 3578 != 0) { 3579 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3580 break; /* XXX: rollback? */ 3581 } 3582 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 3583 != 0) { 3584 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3585 break; /* XXX: rollback? */ 3586 } 3587 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3588 != 0) { 3589 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3590 break; /* XXX: rollback? */ 3591 } 3592 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3593 != 0) { 3594 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3595 break; /* XXX: rollback? */ 3596 } 3597 3598 /* XXX: these should always succeed here */ 3599 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3600 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3601 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3602 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3603 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3604 3605 if ((error = pf_clear_tables()) != 0) 3606 break; 3607 3608#ifdef ALTQ 3609 if ((error = pf_begin_altq(&t[0])) != 0) { 3610 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3611 break; 3612 } 3613 pf_commit_altq(t[0]); 3614#endif 3615 3616 pf_clear_states(); 3617 3618 pf_clear_srcnodes(); 3619 3620 /* status does not use malloced mem so no need to cleanup */ 3621 /* fingerprints and interfaces have thier own cleanup code */ 3622 } while(0); 3623 3624 return (error); 3625} 3626 3627#ifdef INET 3628static int 3629pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3630 struct inpcb *inp) 3631{ 3632 /* 3633 * XXX Wed Jul 9 22:03:16 2003 UTC 3634 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3635 * in network stack. OpenBSD's network stack have converted 3636 * ip_len/ip_off to host byte order frist as FreeBSD. 3637 * Now this is not true anymore , so we should convert back to network 3638 * byte order. 3639 */ 3640 struct ip *h = NULL; 3641 int chk; 3642 3643 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) { 3644 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3645 h = mtod(*m, struct ip *); 3646 HTONS(h->ip_len); 3647 HTONS(h->ip_off); 3648 } 3649 chk = pf_test(PF_IN, ifp, m, NULL, inp); 3650 if (chk && *m) { 3651 m_freem(*m); 3652 *m = NULL; 3653 } 3654 if (*m != NULL) { 3655 /* pf_test can change ip header location */ 3656 h = mtod(*m, struct ip *); 3657 NTOHS(h->ip_len); 3658 NTOHS(h->ip_off); 3659 } 3660 return chk; 3661} 3662 3663static int 3664pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3665 struct inpcb *inp) 3666{ 3667 /* 3668 * XXX Wed Jul 9 22:03:16 2003 UTC 3669 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3670 * in network stack. OpenBSD's network stack have converted 3671 * ip_len/ip_off to host byte order frist as FreeBSD. 3672 * Now this is not true anymore , so we should convert back to network 3673 * byte order. 3674 */ 3675 struct ip *h = NULL; 3676 int chk; 3677 3678 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3679 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3680 in_delayed_cksum(*m); 3681 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3682 } 3683 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) { 3684 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3685 h = mtod(*m, struct ip *); 3686 HTONS(h->ip_len); 3687 HTONS(h->ip_off); 3688 } 3689 chk = pf_test(PF_OUT, ifp, m, NULL, inp); 3690 if (chk && *m) { 3691 m_freem(*m); 3692 *m = NULL; 3693 } 3694 if (*m != NULL) { 3695 /* pf_test can change ip header location */ 3696 h = mtod(*m, struct ip *); 3697 NTOHS(h->ip_len); 3698 NTOHS(h->ip_off); 3699 } 3700 return chk; 3701} 3702#endif 3703 3704#ifdef INET6 3705static int 3706pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3707 struct inpcb *inp) 3708{ 3709 3710 /* 3711 * IPv6 is not affected by ip_len/ip_off byte order changes. 3712 */ 3713 int chk; 3714 3715 /* 3716 * In case of loopback traffic IPv6 uses the real interface in 3717 * order to support scoped addresses. In order to support stateful 3718 * filtering we have change this to lo0 as it is the case in IPv4. 3719 */ 3720 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, 3721 NULL, inp); 3722 if (chk && *m) { 3723 m_freem(*m); 3724 *m = NULL; 3725 } 3726 return chk; 3727} 3728 3729static int 3730pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3731 struct inpcb *inp) 3732{ 3733 /* 3734 * IPv6 does not affected ip_len/ip_off byte order changes. 3735 */ 3736 int chk; 3737 3738 /* We need a proper CSUM before we start (s. OpenBSD ip_output) */ 3739 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3740#ifdef INET 3741 /* XXX-BZ copy&paste error from r126261? */ 3742 in_delayed_cksum(*m); 3743#endif 3744 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3745 } 3746 chk = pf_test6(PF_OUT, ifp, m, NULL, inp); 3747 if (chk && *m) { 3748 m_freem(*m); 3749 *m = NULL; 3750 } 3751 return chk; 3752} 3753#endif /* INET6 */ 3754 3755static int 3756hook_pf(void) 3757{ 3758 struct pfil_head *pfh_inet; 3759#ifdef INET6 3760 struct pfil_head *pfh_inet6; 3761#endif 3762 3763 PF_ASSERT(MA_NOTOWNED); 3764 3765 if (pf_pfil_hooked) 3766 return (0); 3767 3768 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3769 if (pfh_inet == NULL) 3770 return (ESRCH); /* XXX */ 3771#ifdef INET 3772 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3773 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3774#endif 3775#ifdef INET6 3776 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3777 if (pfh_inet6 == NULL) { 3778#ifdef INET 3779 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3780 pfh_inet); 3781 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3782 pfh_inet); 3783#endif 3784 return (ESRCH); /* XXX */ 3785 } 3786 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3787 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3788#endif 3789 3790 pf_pfil_hooked = 1; 3791 return (0); 3792} 3793 3794static int 3795dehook_pf(void) 3796{ 3797 struct pfil_head *pfh_inet; 3798#ifdef INET6 3799 struct pfil_head *pfh_inet6; 3800#endif 3801 3802 PF_ASSERT(MA_NOTOWNED); 3803 3804 if (pf_pfil_hooked == 0) 3805 return (0); 3806 3807 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3808 if (pfh_inet == NULL) 3809 return (ESRCH); /* XXX */ 3810#ifdef INET 3811 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3812 pfh_inet); 3813 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3814 pfh_inet); 3815#endif 3816#ifdef INET6 3817 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3818 if (pfh_inet6 == NULL) 3819 return (ESRCH); /* XXX */ 3820 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3821 pfh_inet6); 3822 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3823 pfh_inet6); 3824#endif 3825 3826 pf_pfil_hooked = 0; 3827 return (0); 3828} 3829 3830static int 3831pf_load(void) 3832{ 3833 init_zone_var(); 3834 init_pf_mutex(); 3835 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3836 if (pfattach() < 0) { 3837 destroy_dev(pf_dev); 3838 destroy_pf_mutex(); 3839 return (ENOMEM); 3840 } 3841 return (0); 3842} 3843 3844static int 3845pf_unload(void) 3846{ 3847 int error = 0; 3848 3849 PF_LOCK(); 3850 pf_status.running = 0; 3851 PF_UNLOCK(); 3852 error = dehook_pf(); 3853 if (error) { 3854 /* 3855 * Should not happen! 3856 * XXX Due to error code ESRCH, kldunload will show 3857 * a message like 'No such process'. 3858 */ 3859 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 3860 return error; 3861 } 3862 PF_LOCK(); 3863 shutdown_pf(); 3864 pf_end_threads = 1; 3865 while (pf_end_threads < 2) { 3866 wakeup_one(pf_purge_thread); 3867 msleep(pf_purge_thread, &pf_task_mtx, 0, "pftmo", hz); 3868 } 3869 pfi_cleanup(); 3870 pf_osfp_flush(); 3871 pf_osfp_cleanup(); 3872 cleanup_pf_zone(); 3873 PF_UNLOCK(); 3874 destroy_dev(pf_dev); 3875 destroy_pf_mutex(); 3876 return error; 3877} 3878 3879static int 3880pf_modevent(module_t mod, int type, void *data) 3881{ 3882 int error = 0; 3883 3884 switch(type) { 3885 case MOD_LOAD: 3886 error = pf_load(); 3887 break; 3888 3889 case MOD_UNLOAD: 3890 error = pf_unload(); 3891 break; 3892 default: 3893 error = EINVAL; 3894 break; 3895 } 3896 return error; 3897} 3898 3899static moduledata_t pf_mod = { 3900 "pf", 3901 pf_modevent, 3902 0 3903}; 3904 3905DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST); 3906MODULE_VERSION(pf, PF_MODVER); 3907#endif /* __FreeBSD__ */ 3908