pf_ioctl.c revision 185571
1/* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */ 2 3/* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38#ifdef __FreeBSD__ 39#include "opt_inet.h" 40#include "opt_inet6.h" 41 42#include <sys/cdefs.h> 43__FBSDID("$FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 185571 2008-12-02 21:37:28Z bz $"); 44#endif 45 46#ifdef __FreeBSD__ 47#include "opt_bpf.h" 48#include "opt_pf.h" 49 50#ifdef DEV_BPF 51#define NBPFILTER DEV_BPF 52#else 53#define NBPFILTER 0 54#endif 55 56#ifdef DEV_PFLOG 57#define NPFLOG DEV_PFLOG 58#else 59#define NPFLOG 0 60#endif 61 62#ifdef DEV_PFSYNC 63#define NPFSYNC DEV_PFSYNC 64#else 65#define NPFSYNC 0 66#endif 67 68#else 69#include "bpfilter.h" 70#include "pflog.h" 71#include "pfsync.h" 72#endif 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/mbuf.h> 77#include <sys/filio.h> 78#include <sys/fcntl.h> 79#include <sys/socket.h> 80#include <sys/socketvar.h> 81#include <sys/kernel.h> 82#include <sys/time.h> 83#include <sys/malloc.h> 84#ifdef __FreeBSD__ 85#include <sys/module.h> 86#include <sys/conf.h> 87#include <sys/proc.h> 88#include <sys/sysctl.h> 89#include <sys/vimage.h> 90#else 91#include <sys/timeout.h> 92#include <sys/pool.h> 93#endif 94#include <sys/proc.h> 95#include <sys/malloc.h> 96#include <sys/kthread.h> 97#ifndef __FreeBSD__ 98#include <sys/rwlock.h> 99#include <uvm/uvm_extern.h> 100#endif 101 102#include <net/if.h> 103#include <net/if_types.h> 104#include <net/route.h> 105#ifdef __FreeBSD__ 106#include <net/vnet.h> 107#endif 108 109#include <netinet/in.h> 110#include <netinet/in_var.h> 111#include <netinet/in_systm.h> 112#include <netinet/ip.h> 113#include <netinet/ip_var.h> 114#include <netinet/ip_icmp.h> 115 116#ifdef __FreeBSD__ 117#include <sys/md5.h> 118#else 119#include <dev/rndvar.h> 120#include <crypto/md5.h> 121#endif 122#include <net/pfvar.h> 123 124#if NPFSYNC > 0 125#include <net/if_pfsync.h> 126#endif /* NPFSYNC > 0 */ 127 128#include <net/if_pflog.h> 129 130#ifdef INET6 131#include <netinet/ip6.h> 132#include <netinet/in_pcb.h> 133#endif /* INET6 */ 134 135#ifdef ALTQ 136#include <altq/altq.h> 137#endif 138 139#ifdef __FreeBSD__ 140#include <sys/limits.h> 141#include <sys/lock.h> 142#include <sys/mutex.h> 143#include <net/pfil.h> 144#endif /* __FreeBSD__ */ 145 146#ifdef __FreeBSD__ 147void init_zone_var(void); 148void cleanup_pf_zone(void); 149int pfattach(void); 150#else 151void pfattach(int); 152void pf_thread_create(void *); 153int pfopen(dev_t, int, int, struct proc *); 154int pfclose(dev_t, int, int, struct proc *); 155#endif 156struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 157 u_int8_t, u_int8_t, u_int8_t); 158 159void pf_mv_pool(struct pf_palist *, struct pf_palist *); 160void pf_empty_pool(struct pf_palist *); 161#ifdef __FreeBSD__ 162int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 163#else 164int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *); 165#endif 166#ifdef ALTQ 167int pf_begin_altq(u_int32_t *); 168int pf_rollback_altq(u_int32_t); 169int pf_commit_altq(u_int32_t); 170int pf_enable_altq(struct pf_altq *); 171int pf_disable_altq(struct pf_altq *); 172#endif /* ALTQ */ 173int pf_begin_rules(u_int32_t *, int, const char *); 174int pf_rollback_rules(u_int32_t, int, char *); 175int pf_setup_pfsync_matching(struct pf_ruleset *); 176void pf_hash_rule(MD5_CTX *, struct pf_rule *); 177void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 178int pf_commit_rules(u_int32_t, int, char *); 179 180struct pf_rule pf_default_rule; 181#ifdef __FreeBSD__ 182struct sx pf_consistency_lock; 183SX_SYSINIT(pf_consistency_lock, &pf_consistency_lock, "pf_statetbl_lock"); 184#else 185struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER; 186#endif 187#ifdef ALTQ 188static int pf_altq_running; 189#endif 190 191#define TAGID_MAX 50000 192TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 193 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 194 195#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 196#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 197#endif 198u_int16_t tagname2tag(struct pf_tags *, char *); 199void tag2tagname(struct pf_tags *, u_int16_t, char *); 200void tag_unref(struct pf_tags *, u_int16_t); 201int pf_rtlabel_add(struct pf_addr_wrap *); 202void pf_rtlabel_remove(struct pf_addr_wrap *); 203void pf_rtlabel_copyout(struct pf_addr_wrap *); 204 205#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 206 207 208#ifdef __FreeBSD__ 209static struct cdev *pf_dev; 210 211/* 212 * XXX - These are new and need to be checked when moveing to a new version 213 */ 214static void pf_clear_states(void); 215static int pf_clear_tables(void); 216static void pf_clear_srcnodes(void); 217/* 218 * XXX - These are new and need to be checked when moveing to a new version 219 */ 220 221/* 222 * Wrapper functions for pfil(9) hooks 223 */ 224static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 225 int dir, struct inpcb *inp); 226static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 227 int dir, struct inpcb *inp); 228#ifdef INET6 229static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 230 int dir, struct inpcb *inp); 231static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 232 int dir, struct inpcb *inp); 233#endif 234 235static int hook_pf(void); 236static int dehook_pf(void); 237static int shutdown_pf(void); 238static int pf_load(void); 239static int pf_unload(void); 240 241static struct cdevsw pf_cdevsw = { 242 .d_ioctl = pfioctl, 243 .d_name = PF_NAME, 244 .d_version = D_VERSION, 245}; 246 247static volatile int pf_pfil_hooked = 0; 248int pf_end_threads = 0; 249struct mtx pf_task_mtx; 250pflog_packet_t *pflog_packet_ptr = NULL; 251 252int debug_pfugidhack = 0; 253SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0, 254 "Enable/disable pf user/group rules mpsafe hack"); 255 256void 257init_pf_mutex(void) 258{ 259 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF); 260} 261 262void 263destroy_pf_mutex(void) 264{ 265 mtx_destroy(&pf_task_mtx); 266} 267 268void 269init_zone_var(void) 270{ 271 pf_src_tree_pl = pf_rule_pl = NULL; 272 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 273 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 274 pf_state_scrub_pl = NULL; 275 pfr_ktable_pl = pfr_kentry_pl = NULL; 276} 277 278void 279cleanup_pf_zone(void) 280{ 281 UMA_DESTROY(pf_src_tree_pl); 282 UMA_DESTROY(pf_rule_pl); 283 UMA_DESTROY(pf_state_pl); 284 UMA_DESTROY(pf_altq_pl); 285 UMA_DESTROY(pf_pooladdr_pl); 286 UMA_DESTROY(pf_frent_pl); 287 UMA_DESTROY(pf_frag_pl); 288 UMA_DESTROY(pf_cache_pl); 289 UMA_DESTROY(pf_cent_pl); 290 UMA_DESTROY(pfr_ktable_pl); 291 UMA_DESTROY(pfr_kentry_pl2); 292 UMA_DESTROY(pfr_kentry_pl); 293 UMA_DESTROY(pf_state_scrub_pl); 294 UMA_DESTROY(pfi_addr_pl); 295} 296 297int 298pfattach(void) 299{ 300 u_int32_t *my_timeout = pf_default_rule.timeout; 301 int error = 1; 302 303 do { 304 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 305 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 306 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 307 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 308 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl"); 309 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 310 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 311 UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2"); 312 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 313 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 314 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 315 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 316 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 317 "pfstatescrub"); 318 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 319 error = 0; 320 } while(0); 321 if (error) { 322 cleanup_pf_zone(); 323 return (error); 324 } 325 pfr_initialize(); 326 pfi_initialize(); 327 if ( (error = pf_osfp_initialize()) ) { 328 cleanup_pf_zone(); 329 pf_osfp_cleanup(); 330 return (error); 331 } 332 333 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 334 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 335 pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl; 336 pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 337 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 338 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 339 pf_pool_limits[PF_LIMIT_TABLES].pp = pfr_ktable_pl; 340 pf_pool_limits[PF_LIMIT_TABLES].limit = PFR_KTABLE_HIWAT; 341 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = pfr_kentry_pl; 342 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 343 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 344 pf_pool_limits[PF_LIMIT_STATES].limit); 345 346 RB_INIT(&tree_src_tracking); 347 RB_INIT(&pf_anchors); 348 pf_init_ruleset(&pf_main_ruleset); 349 TAILQ_INIT(&pf_altqs[0]); 350 TAILQ_INIT(&pf_altqs[1]); 351 TAILQ_INIT(&pf_pabuf); 352 pf_altqs_active = &pf_altqs[0]; 353 pf_altqs_inactive = &pf_altqs[1]; 354 TAILQ_INIT(&state_list); 355 356 /* default rule should never be garbage collected */ 357 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 358 pf_default_rule.action = PF_PASS; 359 pf_default_rule.nr = -1; 360 pf_default_rule.rtableid = -1; 361 362 /* initialize default timeouts */ 363 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 364 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 365 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 366 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 367 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 368 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 369 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 370 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 371 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 372 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 373 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 374 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 375 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 376 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 377 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 378 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 379 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 380 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 381 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 382 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 383 384 pf_normalize_init(); 385 bzero(&pf_status, sizeof(pf_status)); 386 pf_status.debug = PF_DEBUG_URGENT; 387 388 pf_pfil_hooked = 0; 389 390 /* XXX do our best to avoid a conflict */ 391 pf_status.hostid = arc4random(); 392 393 if (kproc_create(pf_purge_thread, NULL, NULL, 0, 0, "pfpurge")) 394 return (ENXIO); 395 396 return (error); 397} 398#else /* !__FreeBSD__ */ 399void 400pfattach(int num) 401{ 402 u_int32_t *timeout = pf_default_rule.timeout; 403 404 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 405 &pool_allocator_nointr); 406 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 407 "pfsrctrpl", NULL); 408 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 409 NULL); 410 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 411 &pool_allocator_nointr); 412 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 413 "pfpooladdrpl", &pool_allocator_nointr); 414 pfr_initialize(); 415 pfi_initialize(); 416 pf_osfp_initialize(); 417 418 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 419 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 420 421 if (ctob(physmem) <= 100*1024*1024) 422 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 423 PFR_KENTRY_HIWAT_SMALL; 424 425 RB_INIT(&tree_src_tracking); 426 RB_INIT(&pf_anchors); 427 pf_init_ruleset(&pf_main_ruleset); 428 TAILQ_INIT(&pf_altqs[0]); 429 TAILQ_INIT(&pf_altqs[1]); 430 TAILQ_INIT(&pf_pabuf); 431 pf_altqs_active = &pf_altqs[0]; 432 pf_altqs_inactive = &pf_altqs[1]; 433 TAILQ_INIT(&state_list); 434 435 /* default rule should never be garbage collected */ 436 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 437 pf_default_rule.action = PF_PASS; 438 pf_default_rule.nr = -1; 439 pf_default_rule.rtableid = -1; 440 441 /* initialize default timeouts */ 442 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 443 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 444 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 445 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 446 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 447 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 448 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 449 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 450 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 451 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 452 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 453 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 454 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 455 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 456 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 457 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 458 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 459 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 460 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 461 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 462 463 pf_normalize_init(); 464 bzero(&pf_status, sizeof(pf_status)); 465 pf_status.debug = PF_DEBUG_URGENT; 466 467 /* XXX do our best to avoid a conflict */ 468 pf_status.hostid = arc4random(); 469 470 /* require process context to purge states, so perform in a thread */ 471 kproc_create_deferred(pf_thread_create, NULL); 472} 473 474void 475pf_thread_create(void *v) 476{ 477 if (kproc_create(pf_purge_thread, NULL, NULL, "pfpurge")) 478 panic("pfpurge thread"); 479} 480 481int 482pfopen(struct cdev *dev, int flags, int fmt, struct proc *p) 483{ 484 if (dev2unit(dev) >= 1) 485 return (ENXIO); 486 return (0); 487} 488 489int 490pfclose(struct cdev *dev, int flags, int fmt, struct proc *p) 491{ 492 if (dev2unit(dev) >= 1) 493 return (ENXIO); 494 return (0); 495} 496#endif /* __FreeBSD__ */ 497 498struct pf_pool * 499pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 500 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 501 u_int8_t check_ticket) 502{ 503 struct pf_ruleset *ruleset; 504 struct pf_rule *rule; 505 int rs_num; 506 507 ruleset = pf_find_ruleset(anchor); 508 if (ruleset == NULL) 509 return (NULL); 510 rs_num = pf_get_ruleset_number(rule_action); 511 if (rs_num >= PF_RULESET_MAX) 512 return (NULL); 513 if (active) { 514 if (check_ticket && ticket != 515 ruleset->rules[rs_num].active.ticket) 516 return (NULL); 517 if (r_last) 518 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 519 pf_rulequeue); 520 else 521 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 522 } else { 523 if (check_ticket && ticket != 524 ruleset->rules[rs_num].inactive.ticket) 525 return (NULL); 526 if (r_last) 527 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 528 pf_rulequeue); 529 else 530 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 531 } 532 if (!r_last) { 533 while ((rule != NULL) && (rule->nr != rule_number)) 534 rule = TAILQ_NEXT(rule, entries); 535 } 536 if (rule == NULL) 537 return (NULL); 538 539 return (&rule->rpool); 540} 541 542void 543pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 544{ 545 struct pf_pooladdr *mv_pool_pa; 546 547 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 548 TAILQ_REMOVE(poola, mv_pool_pa, entries); 549 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 550 } 551} 552 553void 554pf_empty_pool(struct pf_palist *poola) 555{ 556 struct pf_pooladdr *empty_pool_pa; 557 558 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 559 pfi_dynaddr_remove(&empty_pool_pa->addr); 560 pf_tbladdr_remove(&empty_pool_pa->addr); 561 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 562 TAILQ_REMOVE(poola, empty_pool_pa, entries); 563 pool_put(&pf_pooladdr_pl, empty_pool_pa); 564 } 565} 566 567void 568pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 569{ 570 if (rulequeue != NULL) { 571 if (rule->states <= 0) { 572 /* 573 * XXX - we need to remove the table *before* detaching 574 * the rule to make sure the table code does not delete 575 * the anchor under our feet. 576 */ 577 pf_tbladdr_remove(&rule->src.addr); 578 pf_tbladdr_remove(&rule->dst.addr); 579 if (rule->overload_tbl) 580 pfr_detach_table(rule->overload_tbl); 581 } 582 TAILQ_REMOVE(rulequeue, rule, entries); 583 rule->entries.tqe_prev = NULL; 584 rule->nr = -1; 585 } 586 587 if (rule->states > 0 || rule->src_nodes > 0 || 588 rule->entries.tqe_prev != NULL) 589 return; 590 pf_tag_unref(rule->tag); 591 pf_tag_unref(rule->match_tag); 592#ifdef ALTQ 593 if (rule->pqid != rule->qid) 594 pf_qid_unref(rule->pqid); 595 pf_qid_unref(rule->qid); 596#endif 597 pf_rtlabel_remove(&rule->src.addr); 598 pf_rtlabel_remove(&rule->dst.addr); 599 pfi_dynaddr_remove(&rule->src.addr); 600 pfi_dynaddr_remove(&rule->dst.addr); 601 if (rulequeue == NULL) { 602 pf_tbladdr_remove(&rule->src.addr); 603 pf_tbladdr_remove(&rule->dst.addr); 604 if (rule->overload_tbl) 605 pfr_detach_table(rule->overload_tbl); 606 } 607 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 608 pf_anchor_remove(rule); 609 pf_empty_pool(&rule->rpool.list); 610 pool_put(&pf_rule_pl, rule); 611} 612 613u_int16_t 614tagname2tag(struct pf_tags *head, char *tagname) 615{ 616 struct pf_tagname *tag, *p = NULL; 617 u_int16_t new_tagid = 1; 618 619 TAILQ_FOREACH(tag, head, entries) 620 if (strcmp(tagname, tag->name) == 0) { 621 tag->ref++; 622 return (tag->tag); 623 } 624 625 /* 626 * to avoid fragmentation, we do a linear search from the beginning 627 * and take the first free slot we find. if there is none or the list 628 * is empty, append a new entry at the end. 629 */ 630 631 /* new entry */ 632 if (!TAILQ_EMPTY(head)) 633 for (p = TAILQ_FIRST(head); p != NULL && 634 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 635 new_tagid = p->tag + 1; 636 637 if (new_tagid > TAGID_MAX) 638 return (0); 639 640 /* allocate and fill new struct pf_tagname */ 641 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 642 M_TEMP, M_NOWAIT); 643 if (tag == NULL) 644 return (0); 645 bzero(tag, sizeof(struct pf_tagname)); 646 strlcpy(tag->name, tagname, sizeof(tag->name)); 647 tag->tag = new_tagid; 648 tag->ref++; 649 650 if (p != NULL) /* insert new entry before p */ 651 TAILQ_INSERT_BEFORE(p, tag, entries); 652 else /* either list empty or no free slot in between */ 653 TAILQ_INSERT_TAIL(head, tag, entries); 654 655 return (tag->tag); 656} 657 658void 659tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 660{ 661 struct pf_tagname *tag; 662 663 TAILQ_FOREACH(tag, head, entries) 664 if (tag->tag == tagid) { 665 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 666 return; 667 } 668} 669 670void 671tag_unref(struct pf_tags *head, u_int16_t tag) 672{ 673 struct pf_tagname *p, *next; 674 675 if (tag == 0) 676 return; 677 678 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 679 next = TAILQ_NEXT(p, entries); 680 if (tag == p->tag) { 681 if (--p->ref == 0) { 682 TAILQ_REMOVE(head, p, entries); 683 free(p, M_TEMP); 684 } 685 break; 686 } 687 } 688} 689 690u_int16_t 691pf_tagname2tag(char *tagname) 692{ 693 return (tagname2tag(&pf_tags, tagname)); 694} 695 696void 697pf_tag2tagname(u_int16_t tagid, char *p) 698{ 699 tag2tagname(&pf_tags, tagid, p); 700} 701 702void 703pf_tag_ref(u_int16_t tag) 704{ 705 struct pf_tagname *t; 706 707 TAILQ_FOREACH(t, &pf_tags, entries) 708 if (t->tag == tag) 709 break; 710 if (t != NULL) 711 t->ref++; 712} 713 714void 715pf_tag_unref(u_int16_t tag) 716{ 717 tag_unref(&pf_tags, tag); 718} 719 720int 721pf_rtlabel_add(struct pf_addr_wrap *a) 722{ 723#ifdef __FreeBSD__ 724 /* XXX_IMPORT: later */ 725 return (0); 726#else 727 if (a->type == PF_ADDR_RTLABEL && 728 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 729 return (-1); 730 return (0); 731#endif 732} 733 734void 735pf_rtlabel_remove(struct pf_addr_wrap *a) 736{ 737#ifdef __FreeBSD__ 738 /* XXX_IMPORT: later */ 739#else 740 if (a->type == PF_ADDR_RTLABEL) 741 rtlabel_unref(a->v.rtlabel); 742#endif 743} 744 745void 746pf_rtlabel_copyout(struct pf_addr_wrap *a) 747{ 748#ifdef __FreeBSD__ 749 /* XXX_IMPORT: later */ 750 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 751 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 752#else 753 const char *name; 754 755 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 756 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 757 strlcpy(a->v.rtlabelname, "?", 758 sizeof(a->v.rtlabelname)); 759 else 760 strlcpy(a->v.rtlabelname, name, 761 sizeof(a->v.rtlabelname)); 762 } 763#endif 764} 765 766#ifdef ALTQ 767u_int32_t 768pf_qname2qid(char *qname) 769{ 770 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 771} 772 773void 774pf_qid2qname(u_int32_t qid, char *p) 775{ 776 tag2tagname(&pf_qids, (u_int16_t)qid, p); 777} 778 779void 780pf_qid_unref(u_int32_t qid) 781{ 782 tag_unref(&pf_qids, (u_int16_t)qid); 783} 784 785int 786pf_begin_altq(u_int32_t *ticket) 787{ 788 struct pf_altq *altq; 789 int error = 0; 790 791 /* Purge the old altq list */ 792 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 793 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 794#ifdef __FreeBSD__ 795 if (altq->qname[0] == 0 && 796 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 797#else 798 if (altq->qname[0] == 0) { 799#endif 800 /* detach and destroy the discipline */ 801 error = altq_remove(altq); 802 } else 803 pf_qid_unref(altq->qid); 804 pool_put(&pf_altq_pl, altq); 805 } 806 if (error) 807 return (error); 808 *ticket = ++ticket_altqs_inactive; 809 altqs_inactive_open = 1; 810 return (0); 811} 812 813int 814pf_rollback_altq(u_int32_t ticket) 815{ 816 struct pf_altq *altq; 817 int error = 0; 818 819 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 820 return (0); 821 /* Purge the old altq list */ 822 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 823 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 824#ifdef __FreeBSD__ 825 if (altq->qname[0] == 0 && 826 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 827#else 828 if (altq->qname[0] == 0) { 829#endif 830 /* detach and destroy the discipline */ 831 error = altq_remove(altq); 832 } else 833 pf_qid_unref(altq->qid); 834 pool_put(&pf_altq_pl, altq); 835 } 836 altqs_inactive_open = 0; 837 return (error); 838} 839 840int 841pf_commit_altq(u_int32_t ticket) 842{ 843 struct pf_altqqueue *old_altqs; 844 struct pf_altq *altq; 845 int s, err, error = 0; 846 847 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 848 return (EBUSY); 849 850 /* swap altqs, keep the old. */ 851 s = splsoftnet(); 852 old_altqs = pf_altqs_active; 853 pf_altqs_active = pf_altqs_inactive; 854 pf_altqs_inactive = old_altqs; 855 ticket_altqs_active = ticket_altqs_inactive; 856 857 /* Attach new disciplines */ 858 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 859#ifdef __FreeBSD__ 860 if (altq->qname[0] == 0 && 861 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 862#else 863 if (altq->qname[0] == 0) { 864#endif 865 /* attach the discipline */ 866 error = altq_pfattach(altq); 867 if (error == 0 && pf_altq_running) 868 error = pf_enable_altq(altq); 869 if (error != 0) { 870 splx(s); 871 return (error); 872 } 873 } 874 } 875 876 /* Purge the old altq list */ 877 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 878 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 879#ifdef __FreeBSD__ 880 if (altq->qname[0] == 0 && 881 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 882#else 883 if (altq->qname[0] == 0) { 884#endif 885 /* detach and destroy the discipline */ 886 if (pf_altq_running) 887 error = pf_disable_altq(altq); 888 err = altq_pfdetach(altq); 889 if (err != 0 && error == 0) 890 error = err; 891 err = altq_remove(altq); 892 if (err != 0 && error == 0) 893 error = err; 894 } else 895 pf_qid_unref(altq->qid); 896 pool_put(&pf_altq_pl, altq); 897 } 898 splx(s); 899 900 altqs_inactive_open = 0; 901 return (error); 902} 903 904int 905pf_enable_altq(struct pf_altq *altq) 906{ 907 struct ifnet *ifp; 908 struct tb_profile tb; 909 int s, error = 0; 910 911 if ((ifp = ifunit(altq->ifname)) == NULL) 912 return (EINVAL); 913 914 if (ifp->if_snd.altq_type != ALTQT_NONE) 915 error = altq_enable(&ifp->if_snd); 916 917 /* set tokenbucket regulator */ 918 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 919 tb.rate = altq->ifbandwidth; 920 tb.depth = altq->tbrsize; 921 s = splnet(); 922#ifdef __FreeBSD__ 923 PF_UNLOCK(); 924#endif 925 error = tbr_set(&ifp->if_snd, &tb); 926#ifdef __FreeBSD__ 927 PF_LOCK(); 928#endif 929 splx(s); 930 } 931 932 return (error); 933} 934 935int 936pf_disable_altq(struct pf_altq *altq) 937{ 938 struct ifnet *ifp; 939 struct tb_profile tb; 940 int s, error; 941 942 if ((ifp = ifunit(altq->ifname)) == NULL) 943 return (EINVAL); 944 945 /* 946 * when the discipline is no longer referenced, it was overridden 947 * by a new one. if so, just return. 948 */ 949 if (altq->altq_disc != ifp->if_snd.altq_disc) 950 return (0); 951 952 error = altq_disable(&ifp->if_snd); 953 954 if (error == 0) { 955 /* clear tokenbucket regulator */ 956 tb.rate = 0; 957 s = splnet(); 958#ifdef __FreeBSD__ 959 PF_UNLOCK(); 960#endif 961 error = tbr_set(&ifp->if_snd, &tb); 962#ifdef __FreeBSD__ 963 PF_LOCK(); 964#endif 965 splx(s); 966 } 967 968 return (error); 969} 970 971#ifdef __FreeBSD__ 972void 973pf_altq_ifnet_event(struct ifnet *ifp, int remove) 974{ 975 struct ifnet *ifp1; 976 struct pf_altq *a1, *a2, *a3; 977 u_int32_t ticket; 978 int error = 0; 979 980 /* Interrupt userland queue modifications */ 981 if (altqs_inactive_open) 982 pf_rollback_altq(ticket_altqs_inactive); 983 984 /* Start new altq ruleset */ 985 if (pf_begin_altq(&ticket)) 986 return; 987 988 /* Copy the current active set */ 989 TAILQ_FOREACH(a1, pf_altqs_active, entries) { 990 a2 = pool_get(&pf_altq_pl, PR_NOWAIT); 991 if (a2 == NULL) { 992 error = ENOMEM; 993 break; 994 } 995 bcopy(a1, a2, sizeof(struct pf_altq)); 996 997 if (a2->qname[0] != 0) { 998 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 999 error = EBUSY; 1000 pool_put(&pf_altq_pl, a2); 1001 break; 1002 } 1003 a2->altq_disc = NULL; 1004 TAILQ_FOREACH(a3, pf_altqs_inactive, entries) { 1005 if (strncmp(a3->ifname, a2->ifname, 1006 IFNAMSIZ) == 0 && a3->qname[0] == 0) { 1007 a2->altq_disc = a3->altq_disc; 1008 break; 1009 } 1010 } 1011 } 1012 /* Deactivate the interface in question */ 1013 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1014 if ((ifp1 = ifunit(a2->ifname)) == NULL || 1015 (remove && ifp1 == ifp)) { 1016 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1017 } else { 1018 PF_UNLOCK(); 1019 error = altq_add(a2); 1020 PF_LOCK(); 1021 1022 if (ticket != ticket_altqs_inactive) 1023 error = EBUSY; 1024 1025 if (error) { 1026 pool_put(&pf_altq_pl, a2); 1027 break; 1028 } 1029 } 1030 1031 TAILQ_INSERT_TAIL(pf_altqs_inactive, a2, entries); 1032 } 1033 1034 if (error != 0) 1035 pf_rollback_altq(ticket); 1036 else 1037 pf_commit_altq(ticket); 1038} 1039#endif 1040#endif /* ALTQ */ 1041 1042int 1043pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1044{ 1045 struct pf_ruleset *rs; 1046 struct pf_rule *rule; 1047 1048 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1049 return (EINVAL); 1050 rs = pf_find_or_create_ruleset(anchor); 1051 if (rs == NULL) 1052 return (EINVAL); 1053 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1054 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1055 rs->rules[rs_num].inactive.rcount--; 1056 } 1057 *ticket = ++rs->rules[rs_num].inactive.ticket; 1058 rs->rules[rs_num].inactive.open = 1; 1059 return (0); 1060} 1061 1062int 1063pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1064{ 1065 struct pf_ruleset *rs; 1066 struct pf_rule *rule; 1067 1068 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1069 return (EINVAL); 1070 rs = pf_find_ruleset(anchor); 1071 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1072 rs->rules[rs_num].inactive.ticket != ticket) 1073 return (0); 1074 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1075 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1076 rs->rules[rs_num].inactive.rcount--; 1077 } 1078 rs->rules[rs_num].inactive.open = 0; 1079 return (0); 1080} 1081 1082#define PF_MD5_UPD(st, elm) \ 1083 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1084 1085#define PF_MD5_UPD_STR(st, elm) \ 1086 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1087 1088#define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1089 (stor) = htonl((st)->elm); \ 1090 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1091} while (0) 1092 1093#define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1094 (stor) = htons((st)->elm); \ 1095 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1096} while (0) 1097 1098void 1099pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1100{ 1101 PF_MD5_UPD(pfr, addr.type); 1102 switch (pfr->addr.type) { 1103 case PF_ADDR_DYNIFTL: 1104 PF_MD5_UPD(pfr, addr.v.ifname); 1105 PF_MD5_UPD(pfr, addr.iflags); 1106 break; 1107 case PF_ADDR_TABLE: 1108 PF_MD5_UPD(pfr, addr.v.tblname); 1109 break; 1110 case PF_ADDR_ADDRMASK: 1111 /* XXX ignore af? */ 1112 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1113 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1114 break; 1115 case PF_ADDR_RTLABEL: 1116 PF_MD5_UPD(pfr, addr.v.rtlabelname); 1117 break; 1118 } 1119 1120 PF_MD5_UPD(pfr, port[0]); 1121 PF_MD5_UPD(pfr, port[1]); 1122 PF_MD5_UPD(pfr, neg); 1123 PF_MD5_UPD(pfr, port_op); 1124} 1125 1126void 1127pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 1128{ 1129 u_int16_t x; 1130 u_int32_t y; 1131 1132 pf_hash_rule_addr(ctx, &rule->src); 1133 pf_hash_rule_addr(ctx, &rule->dst); 1134 PF_MD5_UPD_STR(rule, label); 1135 PF_MD5_UPD_STR(rule, ifname); 1136 PF_MD5_UPD_STR(rule, match_tagname); 1137 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1138 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1139 PF_MD5_UPD_HTONL(rule, prob, y); 1140 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1141 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1142 PF_MD5_UPD(rule, uid.op); 1143 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1144 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1145 PF_MD5_UPD(rule, gid.op); 1146 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1147 PF_MD5_UPD(rule, action); 1148 PF_MD5_UPD(rule, direction); 1149 PF_MD5_UPD(rule, af); 1150 PF_MD5_UPD(rule, quick); 1151 PF_MD5_UPD(rule, ifnot); 1152 PF_MD5_UPD(rule, match_tag_not); 1153 PF_MD5_UPD(rule, natpass); 1154 PF_MD5_UPD(rule, keep_state); 1155 PF_MD5_UPD(rule, proto); 1156 PF_MD5_UPD(rule, type); 1157 PF_MD5_UPD(rule, code); 1158 PF_MD5_UPD(rule, flags); 1159 PF_MD5_UPD(rule, flagset); 1160 PF_MD5_UPD(rule, allow_opts); 1161 PF_MD5_UPD(rule, rt); 1162 PF_MD5_UPD(rule, tos); 1163} 1164 1165int 1166pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1167{ 1168 struct pf_ruleset *rs; 1169 struct pf_rule *rule, **old_array; 1170 struct pf_rulequeue *old_rules; 1171 int s, error; 1172 u_int32_t old_rcount; 1173 1174 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1175 return (EINVAL); 1176 rs = pf_find_ruleset(anchor); 1177 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1178 ticket != rs->rules[rs_num].inactive.ticket) 1179 return (EBUSY); 1180 1181 /* Calculate checksum for the main ruleset */ 1182 if (rs == &pf_main_ruleset) { 1183 error = pf_setup_pfsync_matching(rs); 1184 if (error != 0) 1185 return (error); 1186 } 1187 1188 /* Swap rules, keep the old. */ 1189 s = splsoftnet(); 1190 old_rules = rs->rules[rs_num].active.ptr; 1191 old_rcount = rs->rules[rs_num].active.rcount; 1192 old_array = rs->rules[rs_num].active.ptr_array; 1193 1194 rs->rules[rs_num].active.ptr = 1195 rs->rules[rs_num].inactive.ptr; 1196 rs->rules[rs_num].active.ptr_array = 1197 rs->rules[rs_num].inactive.ptr_array; 1198 rs->rules[rs_num].active.rcount = 1199 rs->rules[rs_num].inactive.rcount; 1200 rs->rules[rs_num].inactive.ptr = old_rules; 1201 rs->rules[rs_num].inactive.ptr_array = old_array; 1202 rs->rules[rs_num].inactive.rcount = old_rcount; 1203 1204 rs->rules[rs_num].active.ticket = 1205 rs->rules[rs_num].inactive.ticket; 1206 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1207 1208 1209 /* Purge the old rule list. */ 1210 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1211 pf_rm_rule(old_rules, rule); 1212 if (rs->rules[rs_num].inactive.ptr_array) 1213 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1214 rs->rules[rs_num].inactive.ptr_array = NULL; 1215 rs->rules[rs_num].inactive.rcount = 0; 1216 rs->rules[rs_num].inactive.open = 0; 1217 pf_remove_if_empty_ruleset(rs); 1218 splx(s); 1219 return (0); 1220} 1221 1222int 1223pf_setup_pfsync_matching(struct pf_ruleset *rs) 1224{ 1225 MD5_CTX ctx; 1226 struct pf_rule *rule; 1227 int rs_cnt; 1228 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1229 1230 MD5Init(&ctx); 1231 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1232 /* XXX PF_RULESET_SCRUB as well? */ 1233 if (rs_cnt == PF_RULESET_SCRUB) 1234 continue; 1235 1236 if (rs->rules[rs_cnt].inactive.ptr_array) 1237 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1238 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1239 1240 if (rs->rules[rs_cnt].inactive.rcount) { 1241 rs->rules[rs_cnt].inactive.ptr_array = 1242 malloc(sizeof(caddr_t) * 1243 rs->rules[rs_cnt].inactive.rcount, 1244 M_TEMP, M_NOWAIT); 1245 1246 if (!rs->rules[rs_cnt].inactive.ptr_array) 1247 return (ENOMEM); 1248 } 1249 1250 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1251 entries) { 1252 pf_hash_rule(&ctx, rule); 1253 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1254 } 1255 } 1256 1257 MD5Final(digest, &ctx); 1258 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1259 return (0); 1260} 1261 1262int 1263#ifdef __FreeBSD__ 1264pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1265#else 1266pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1267#endif 1268{ 1269 struct pf_pooladdr *pa = NULL; 1270 struct pf_pool *pool = NULL; 1271#ifndef __FreeBSD__ 1272 int s; 1273#endif 1274 int error = 0; 1275 1276 /* XXX keep in sync with switch() below */ 1277#ifdef __FreeBSD__ 1278 if (securelevel_gt(td->td_ucred, 2)) 1279#else 1280 if (securelevel > 1) 1281#endif 1282 switch (cmd) { 1283 case DIOCGETRULES: 1284 case DIOCGETRULE: 1285 case DIOCGETADDRS: 1286 case DIOCGETADDR: 1287 case DIOCGETSTATE: 1288 case DIOCSETSTATUSIF: 1289 case DIOCGETSTATUS: 1290 case DIOCCLRSTATUS: 1291 case DIOCNATLOOK: 1292 case DIOCSETDEBUG: 1293 case DIOCGETSTATES: 1294 case DIOCGETTIMEOUT: 1295 case DIOCCLRRULECTRS: 1296 case DIOCGETLIMIT: 1297 case DIOCGETALTQS: 1298 case DIOCGETALTQ: 1299 case DIOCGETQSTATS: 1300 case DIOCGETRULESETS: 1301 case DIOCGETRULESET: 1302 case DIOCRGETTABLES: 1303 case DIOCRGETTSTATS: 1304 case DIOCRCLRTSTATS: 1305 case DIOCRCLRADDRS: 1306 case DIOCRADDADDRS: 1307 case DIOCRDELADDRS: 1308 case DIOCRSETADDRS: 1309 case DIOCRGETADDRS: 1310 case DIOCRGETASTATS: 1311 case DIOCRCLRASTATS: 1312 case DIOCRTSTADDRS: 1313 case DIOCOSFPGET: 1314 case DIOCGETSRCNODES: 1315 case DIOCCLRSRCNODES: 1316 case DIOCIGETIFACES: 1317#ifdef __FreeBSD__ 1318 case DIOCGIFSPEED: 1319#endif 1320 case DIOCSETIFFLAG: 1321 case DIOCCLRIFFLAG: 1322 break; 1323 case DIOCRCLRTABLES: 1324 case DIOCRADDTABLES: 1325 case DIOCRDELTABLES: 1326 case DIOCRSETTFLAGS: 1327 if (((struct pfioc_table *)addr)->pfrio_flags & 1328 PFR_FLAG_DUMMY) 1329 break; /* dummy operation ok */ 1330 return (EPERM); 1331 default: 1332 return (EPERM); 1333 } 1334 1335 if (!(flags & FWRITE)) 1336 switch (cmd) { 1337 case DIOCGETRULES: 1338 case DIOCGETADDRS: 1339 case DIOCGETADDR: 1340 case DIOCGETSTATE: 1341 case DIOCGETSTATUS: 1342 case DIOCGETSTATES: 1343 case DIOCGETTIMEOUT: 1344 case DIOCGETLIMIT: 1345 case DIOCGETALTQS: 1346 case DIOCGETALTQ: 1347 case DIOCGETQSTATS: 1348 case DIOCGETRULESETS: 1349 case DIOCGETRULESET: 1350 case DIOCNATLOOK: 1351 case DIOCRGETTABLES: 1352 case DIOCRGETTSTATS: 1353 case DIOCRGETADDRS: 1354 case DIOCRGETASTATS: 1355 case DIOCRTSTADDRS: 1356 case DIOCOSFPGET: 1357 case DIOCGETSRCNODES: 1358 case DIOCIGETIFACES: 1359#ifdef __FreeBSD__ 1360 case DIOCGIFSPEED: 1361#endif 1362 break; 1363 case DIOCRCLRTABLES: 1364 case DIOCRADDTABLES: 1365 case DIOCRDELTABLES: 1366 case DIOCRCLRTSTATS: 1367 case DIOCRCLRADDRS: 1368 case DIOCRADDADDRS: 1369 case DIOCRDELADDRS: 1370 case DIOCRSETADDRS: 1371 case DIOCRSETTFLAGS: 1372 if (((struct pfioc_table *)addr)->pfrio_flags & 1373 PFR_FLAG_DUMMY) { 1374 flags |= FWRITE; /* need write lock for dummy */ 1375 break; /* dummy operation ok */ 1376 } 1377 return (EACCES); 1378 case DIOCGETRULE: 1379 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) 1380 return (EACCES); 1381 break; 1382 default: 1383 return (EACCES); 1384 } 1385 1386 if (flags & FWRITE) 1387#ifdef __FreeBSD__ 1388 sx_xlock(&pf_consistency_lock); 1389 else 1390 sx_slock(&pf_consistency_lock); 1391#else 1392 rw_enter_write(&pf_consistency_lock); 1393 else 1394 rw_enter_read(&pf_consistency_lock); 1395#endif 1396 1397#ifdef __FreeBSD__ 1398 PF_LOCK(); 1399#else 1400 s = splsoftnet(); 1401#endif 1402 switch (cmd) { 1403 1404 case DIOCSTART: 1405 if (pf_status.running) 1406 error = EEXIST; 1407 else { 1408#ifdef __FreeBSD__ 1409 PF_UNLOCK(); 1410 error = hook_pf(); 1411 PF_LOCK(); 1412 if (error) { 1413 DPFPRINTF(PF_DEBUG_MISC, 1414 ("pf: pfil registeration fail\n")); 1415 break; 1416 } 1417#endif 1418 pf_status.running = 1; 1419 pf_status.since = time_second; 1420 if (pf_status.stateid == 0) { 1421 pf_status.stateid = time_second; 1422 pf_status.stateid = pf_status.stateid << 32; 1423 } 1424 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1425 } 1426 break; 1427 1428 case DIOCSTOP: 1429 if (!pf_status.running) 1430 error = ENOENT; 1431 else { 1432 pf_status.running = 0; 1433#ifdef __FreeBSD__ 1434 PF_UNLOCK(); 1435 error = dehook_pf(); 1436 PF_LOCK(); 1437 if (error) { 1438 pf_status.running = 1; 1439 DPFPRINTF(PF_DEBUG_MISC, 1440 ("pf: pfil unregisteration failed\n")); 1441 } 1442#endif 1443 pf_status.since = time_second; 1444 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1445 } 1446 break; 1447 1448 case DIOCADDRULE: { 1449 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1450 struct pf_ruleset *ruleset; 1451 struct pf_rule *rule, *tail; 1452 struct pf_pooladdr *pa; 1453 int rs_num; 1454 1455 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1456 ruleset = pf_find_ruleset(pr->anchor); 1457 if (ruleset == NULL) { 1458 error = EINVAL; 1459 break; 1460 } 1461 rs_num = pf_get_ruleset_number(pr->rule.action); 1462 if (rs_num >= PF_RULESET_MAX) { 1463 error = EINVAL; 1464 break; 1465 } 1466 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1467 error = EINVAL; 1468 break; 1469 } 1470 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1471#ifdef __FreeBSD__ 1472 DPFPRINTF(PF_DEBUG_MISC, 1473 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num, 1474 ruleset->rules[rs_num].inactive.ticket)); 1475#endif 1476 error = EBUSY; 1477 break; 1478 } 1479 if (pr->pool_ticket != ticket_pabuf) { 1480#ifdef __FreeBSD__ 1481 DPFPRINTF(PF_DEBUG_MISC, 1482 ("pool_ticket: %d != %d\n", pr->pool_ticket, 1483 ticket_pabuf)); 1484#endif 1485 error = EBUSY; 1486 break; 1487 } 1488 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1489 if (rule == NULL) { 1490 error = ENOMEM; 1491 break; 1492 } 1493 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1494#ifdef __FreeBSD__ 1495 rule->cuid = td->td_ucred->cr_ruid; 1496 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1497#else 1498 rule->cuid = p->p_cred->p_ruid; 1499 rule->cpid = p->p_pid; 1500#endif 1501 rule->anchor = NULL; 1502 rule->kif = NULL; 1503 TAILQ_INIT(&rule->rpool.list); 1504 /* initialize refcounting */ 1505 rule->states = 0; 1506 rule->src_nodes = 0; 1507 rule->entries.tqe_prev = NULL; 1508#ifndef INET 1509 if (rule->af == AF_INET) { 1510 pool_put(&pf_rule_pl, rule); 1511 error = EAFNOSUPPORT; 1512 break; 1513 } 1514#endif /* INET */ 1515#ifndef INET6 1516 if (rule->af == AF_INET6) { 1517 pool_put(&pf_rule_pl, rule); 1518 error = EAFNOSUPPORT; 1519 break; 1520 } 1521#endif /* INET6 */ 1522 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1523 pf_rulequeue); 1524 if (tail) 1525 rule->nr = tail->nr + 1; 1526 else 1527 rule->nr = 0; 1528 if (rule->ifname[0]) { 1529 rule->kif = pfi_kif_get(rule->ifname); 1530 if (rule->kif == NULL) { 1531 pool_put(&pf_rule_pl, rule); 1532 error = EINVAL; 1533 break; 1534 } 1535 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1536 } 1537 1538#ifdef __FreeBSD__ /* ROUTING */ 1539 if (rule->rtableid > 0 && rule->rtableid > rt_numfibs) 1540#else 1541 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1542#endif 1543 error = EBUSY; 1544 1545#ifdef ALTQ 1546 /* set queue IDs */ 1547 if (rule->qname[0] != 0) { 1548 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1549 error = EBUSY; 1550 else if (rule->pqname[0] != 0) { 1551 if ((rule->pqid = 1552 pf_qname2qid(rule->pqname)) == 0) 1553 error = EBUSY; 1554 } else 1555 rule->pqid = rule->qid; 1556 } 1557#endif 1558 if (rule->tagname[0]) 1559 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1560 error = EBUSY; 1561 if (rule->match_tagname[0]) 1562 if ((rule->match_tag = 1563 pf_tagname2tag(rule->match_tagname)) == 0) 1564 error = EBUSY; 1565 if (rule->rt && !rule->direction) 1566 error = EINVAL; 1567#if NPFLOG > 0 1568#ifdef __FreeBSD__ 1569 if (!rule->log) 1570 rule->logif = 0; 1571#endif 1572 if (rule->logif >= PFLOGIFS_MAX) 1573 error = EINVAL; 1574#endif 1575 if (pf_rtlabel_add(&rule->src.addr) || 1576 pf_rtlabel_add(&rule->dst.addr)) 1577 error = EBUSY; 1578 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1579 error = EINVAL; 1580 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1581 error = EINVAL; 1582 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1583 error = EINVAL; 1584 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1585 error = EINVAL; 1586 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1587 error = EINVAL; 1588 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1589 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1590 error = EINVAL; 1591 1592 if (rule->overload_tblname[0]) { 1593 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1594 rule->overload_tblname)) == NULL) 1595 error = EINVAL; 1596 else 1597 rule->overload_tbl->pfrkt_flags |= 1598 PFR_TFLAG_ACTIVE; 1599 } 1600 1601 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1602 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1603 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1604 (rule->rt > PF_FASTROUTE)) && 1605 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1606 error = EINVAL; 1607 1608 if (error) { 1609 pf_rm_rule(NULL, rule); 1610 break; 1611 } 1612 1613#ifdef __FreeBSD__ 1614 if (!debug_pfugidhack && (rule->uid.op || rule->gid.op || 1615 rule->log & PF_LOG_SOCKET_LOOKUP)) { 1616 DPFPRINTF(PF_DEBUG_MISC, 1617 ("pf: debug.pfugidhack enabled\n")); 1618 debug_pfugidhack = 1; 1619 } 1620#endif 1621 1622 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1623 rule->evaluations = rule->packets[0] = rule->packets[1] = 1624 rule->bytes[0] = rule->bytes[1] = 0; 1625 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1626 rule, entries); 1627 ruleset->rules[rs_num].inactive.rcount++; 1628 break; 1629 } 1630 1631 case DIOCGETRULES: { 1632 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1633 struct pf_ruleset *ruleset; 1634 struct pf_rule *tail; 1635 int rs_num; 1636 1637 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1638 ruleset = pf_find_ruleset(pr->anchor); 1639 if (ruleset == NULL) { 1640 error = EINVAL; 1641 break; 1642 } 1643 rs_num = pf_get_ruleset_number(pr->rule.action); 1644 if (rs_num >= PF_RULESET_MAX) { 1645 error = EINVAL; 1646 break; 1647 } 1648 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1649 pf_rulequeue); 1650 if (tail) 1651 pr->nr = tail->nr + 1; 1652 else 1653 pr->nr = 0; 1654 pr->ticket = ruleset->rules[rs_num].active.ticket; 1655 break; 1656 } 1657 1658 case DIOCGETRULE: { 1659 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1660 struct pf_ruleset *ruleset; 1661 struct pf_rule *rule; 1662 int rs_num, i; 1663 1664 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1665 ruleset = pf_find_ruleset(pr->anchor); 1666 if (ruleset == NULL) { 1667 error = EINVAL; 1668 break; 1669 } 1670 rs_num = pf_get_ruleset_number(pr->rule.action); 1671 if (rs_num >= PF_RULESET_MAX) { 1672 error = EINVAL; 1673 break; 1674 } 1675 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1676 error = EBUSY; 1677 break; 1678 } 1679 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1680 while ((rule != NULL) && (rule->nr != pr->nr)) 1681 rule = TAILQ_NEXT(rule, entries); 1682 if (rule == NULL) { 1683 error = EBUSY; 1684 break; 1685 } 1686 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1687 if (pf_anchor_copyout(ruleset, rule, pr)) { 1688 error = EBUSY; 1689 break; 1690 } 1691 pfi_dynaddr_copyout(&pr->rule.src.addr); 1692 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1693 pf_tbladdr_copyout(&pr->rule.src.addr); 1694 pf_tbladdr_copyout(&pr->rule.dst.addr); 1695 pf_rtlabel_copyout(&pr->rule.src.addr); 1696 pf_rtlabel_copyout(&pr->rule.dst.addr); 1697 for (i = 0; i < PF_SKIP_COUNT; ++i) 1698 if (rule->skip[i].ptr == NULL) 1699 pr->rule.skip[i].nr = -1; 1700 else 1701 pr->rule.skip[i].nr = 1702 rule->skip[i].ptr->nr; 1703 1704 if (pr->action == PF_GET_CLR_CNTR) { 1705 rule->evaluations = 0; 1706 rule->packets[0] = rule->packets[1] = 0; 1707 rule->bytes[0] = rule->bytes[1] = 0; 1708 } 1709 break; 1710 } 1711 1712 case DIOCCHANGERULE: { 1713 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1714 struct pf_ruleset *ruleset; 1715 struct pf_rule *oldrule = NULL, *newrule = NULL; 1716 u_int32_t nr = 0; 1717 int rs_num; 1718 1719 if (!(pcr->action == PF_CHANGE_REMOVE || 1720 pcr->action == PF_CHANGE_GET_TICKET) && 1721 pcr->pool_ticket != ticket_pabuf) { 1722 error = EBUSY; 1723 break; 1724 } 1725 1726 if (pcr->action < PF_CHANGE_ADD_HEAD || 1727 pcr->action > PF_CHANGE_GET_TICKET) { 1728 error = EINVAL; 1729 break; 1730 } 1731 ruleset = pf_find_ruleset(pcr->anchor); 1732 if (ruleset == NULL) { 1733 error = EINVAL; 1734 break; 1735 } 1736 rs_num = pf_get_ruleset_number(pcr->rule.action); 1737 if (rs_num >= PF_RULESET_MAX) { 1738 error = EINVAL; 1739 break; 1740 } 1741 1742 if (pcr->action == PF_CHANGE_GET_TICKET) { 1743 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1744 break; 1745 } else { 1746 if (pcr->ticket != 1747 ruleset->rules[rs_num].active.ticket) { 1748 error = EINVAL; 1749 break; 1750 } 1751 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1752 error = EINVAL; 1753 break; 1754 } 1755 } 1756 1757 if (pcr->action != PF_CHANGE_REMOVE) { 1758 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1759 if (newrule == NULL) { 1760 error = ENOMEM; 1761 break; 1762 } 1763 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1764#ifdef __FreeBSD__ 1765 newrule->cuid = td->td_ucred->cr_ruid; 1766 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1767#else 1768 newrule->cuid = p->p_cred->p_ruid; 1769 newrule->cpid = p->p_pid; 1770#endif 1771 TAILQ_INIT(&newrule->rpool.list); 1772 /* initialize refcounting */ 1773 newrule->states = 0; 1774 newrule->entries.tqe_prev = NULL; 1775#ifndef INET 1776 if (newrule->af == AF_INET) { 1777 pool_put(&pf_rule_pl, newrule); 1778 error = EAFNOSUPPORT; 1779 break; 1780 } 1781#endif /* INET */ 1782#ifndef INET6 1783 if (newrule->af == AF_INET6) { 1784 pool_put(&pf_rule_pl, newrule); 1785 error = EAFNOSUPPORT; 1786 break; 1787 } 1788#endif /* INET6 */ 1789 if (newrule->ifname[0]) { 1790 newrule->kif = pfi_kif_get(newrule->ifname); 1791 if (newrule->kif == NULL) { 1792 pool_put(&pf_rule_pl, newrule); 1793 error = EINVAL; 1794 break; 1795 } 1796 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1797 } else 1798 newrule->kif = NULL; 1799 1800 if (newrule->rtableid > 0 && 1801#ifdef __FreeBSD__ /* ROUTING */ 1802 newrule->rtableid > rt_numfibs) 1803#else 1804 !rtable_exists(newrule->rtableid)) 1805#endif 1806 error = EBUSY; 1807 1808#ifdef ALTQ 1809 /* set queue IDs */ 1810 if (newrule->qname[0] != 0) { 1811 if ((newrule->qid = 1812 pf_qname2qid(newrule->qname)) == 0) 1813 error = EBUSY; 1814 else if (newrule->pqname[0] != 0) { 1815 if ((newrule->pqid = 1816 pf_qname2qid(newrule->pqname)) == 0) 1817 error = EBUSY; 1818 } else 1819 newrule->pqid = newrule->qid; 1820 } 1821#endif /* ALTQ */ 1822 if (newrule->tagname[0]) 1823 if ((newrule->tag = 1824 pf_tagname2tag(newrule->tagname)) == 0) 1825 error = EBUSY; 1826 if (newrule->match_tagname[0]) 1827 if ((newrule->match_tag = pf_tagname2tag( 1828 newrule->match_tagname)) == 0) 1829 error = EBUSY; 1830 if (newrule->rt && !newrule->direction) 1831 error = EINVAL; 1832#ifdef __FreeBSD__ 1833#if NPFLOG > 0 1834 if (!newrule->log) 1835 newrule->logif = 0; 1836 if (newrule->logif >= PFLOGIFS_MAX) 1837 error = EINVAL; 1838#endif 1839#endif 1840 if (pf_rtlabel_add(&newrule->src.addr) || 1841 pf_rtlabel_add(&newrule->dst.addr)) 1842 error = EBUSY; 1843 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1844 error = EINVAL; 1845 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1846 error = EINVAL; 1847 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1848 error = EINVAL; 1849 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1850 error = EINVAL; 1851 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1852 error = EINVAL; 1853 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1854 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1855 error = EINVAL; 1856 1857 if (newrule->overload_tblname[0]) { 1858 if ((newrule->overload_tbl = pfr_attach_table( 1859 ruleset, newrule->overload_tblname)) == 1860 NULL) 1861 error = EINVAL; 1862 else 1863 newrule->overload_tbl->pfrkt_flags |= 1864 PFR_TFLAG_ACTIVE; 1865 } 1866 1867 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1868 if (((((newrule->action == PF_NAT) || 1869 (newrule->action == PF_RDR) || 1870 (newrule->action == PF_BINAT) || 1871 (newrule->rt > PF_FASTROUTE)) && 1872 !newrule->anchor)) && 1873 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1874 error = EINVAL; 1875 1876 if (error) { 1877 pf_rm_rule(NULL, newrule); 1878 break; 1879 } 1880 1881#ifdef __FreeBSD__ 1882 if (!debug_pfugidhack && (newrule->uid.op || 1883 newrule->gid.op || 1884 newrule->log & PF_LOG_SOCKET_LOOKUP)) { 1885 DPFPRINTF(PF_DEBUG_MISC, 1886 ("pf: debug.pfugidhack enabled\n")); 1887 debug_pfugidhack = 1; 1888 } 1889#endif 1890 1891 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1892 newrule->evaluations = 0; 1893 newrule->packets[0] = newrule->packets[1] = 0; 1894 newrule->bytes[0] = newrule->bytes[1] = 0; 1895 } 1896 pf_empty_pool(&pf_pabuf); 1897 1898 if (pcr->action == PF_CHANGE_ADD_HEAD) 1899 oldrule = TAILQ_FIRST( 1900 ruleset->rules[rs_num].active.ptr); 1901 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1902 oldrule = TAILQ_LAST( 1903 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1904 else { 1905 oldrule = TAILQ_FIRST( 1906 ruleset->rules[rs_num].active.ptr); 1907 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1908 oldrule = TAILQ_NEXT(oldrule, entries); 1909 if (oldrule == NULL) { 1910 if (newrule != NULL) 1911 pf_rm_rule(NULL, newrule); 1912 error = EINVAL; 1913 break; 1914 } 1915 } 1916 1917 if (pcr->action == PF_CHANGE_REMOVE) { 1918 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1919 ruleset->rules[rs_num].active.rcount--; 1920 } else { 1921 if (oldrule == NULL) 1922 TAILQ_INSERT_TAIL( 1923 ruleset->rules[rs_num].active.ptr, 1924 newrule, entries); 1925 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1926 pcr->action == PF_CHANGE_ADD_BEFORE) 1927 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1928 else 1929 TAILQ_INSERT_AFTER( 1930 ruleset->rules[rs_num].active.ptr, 1931 oldrule, newrule, entries); 1932 ruleset->rules[rs_num].active.rcount++; 1933 } 1934 1935 nr = 0; 1936 TAILQ_FOREACH(oldrule, 1937 ruleset->rules[rs_num].active.ptr, entries) 1938 oldrule->nr = nr++; 1939 1940 ruleset->rules[rs_num].active.ticket++; 1941 1942 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1943 pf_remove_if_empty_ruleset(ruleset); 1944 1945 break; 1946 } 1947 1948 case DIOCCLRSTATES: { 1949 struct pf_state *state, *nexts; 1950 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1951 int killed = 0; 1952 1953 for (state = RB_MIN(pf_state_tree_id, &tree_id); state; 1954 state = nexts) { 1955 nexts = RB_NEXT(pf_state_tree_id, &tree_id, state); 1956 1957 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1958 state->u.s.kif->pfik_name)) { 1959#if NPFSYNC 1960 /* don't send out individual delete messages */ 1961 state->sync_flags = PFSTATE_NOSYNC; 1962#endif 1963 pf_unlink_state(state); 1964 killed++; 1965 } 1966 } 1967 psk->psk_af = killed; 1968#if NPFSYNC 1969 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1970#endif 1971 break; 1972 } 1973 1974 case DIOCKILLSTATES: { 1975 struct pf_state *state, *nexts; 1976 struct pf_state_host *src, *dst; 1977 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1978 int killed = 0; 1979 1980 for (state = RB_MIN(pf_state_tree_id, &tree_id); state; 1981 state = nexts) { 1982 nexts = RB_NEXT(pf_state_tree_id, &tree_id, state); 1983 1984 if (state->direction == PF_OUT) { 1985 src = &state->lan; 1986 dst = &state->ext; 1987 } else { 1988 src = &state->ext; 1989 dst = &state->lan; 1990 } 1991 if ((!psk->psk_af || state->af == psk->psk_af) 1992 && (!psk->psk_proto || psk->psk_proto == 1993 state->proto) && 1994 PF_MATCHA(psk->psk_src.neg, 1995 &psk->psk_src.addr.v.a.addr, 1996 &psk->psk_src.addr.v.a.mask, 1997 &src->addr, state->af) && 1998 PF_MATCHA(psk->psk_dst.neg, 1999 &psk->psk_dst.addr.v.a.addr, 2000 &psk->psk_dst.addr.v.a.mask, 2001 &dst->addr, state->af) && 2002 (psk->psk_src.port_op == 0 || 2003 pf_match_port(psk->psk_src.port_op, 2004 psk->psk_src.port[0], psk->psk_src.port[1], 2005 src->port)) && 2006 (psk->psk_dst.port_op == 0 || 2007 pf_match_port(psk->psk_dst.port_op, 2008 psk->psk_dst.port[0], psk->psk_dst.port[1], 2009 dst->port)) && 2010 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 2011 state->u.s.kif->pfik_name))) { 2012#if NPFSYNC > 0 2013 /* send immediate delete of state */ 2014 pfsync_delete_state(state); 2015 state->sync_flags |= PFSTATE_NOSYNC; 2016#endif 2017 pf_unlink_state(state); 2018 killed++; 2019 } 2020 } 2021 psk->psk_af = killed; 2022 break; 2023 } 2024 2025 case DIOCADDSTATE: { 2026 struct pfioc_state *ps = (struct pfioc_state *)addr; 2027 struct pf_state *state; 2028 struct pfi_kif *kif; 2029 2030 if (ps->state.timeout >= PFTM_MAX && 2031 ps->state.timeout != PFTM_UNTIL_PACKET) { 2032 error = EINVAL; 2033 break; 2034 } 2035 state = pool_get(&pf_state_pl, PR_NOWAIT); 2036 if (state == NULL) { 2037 error = ENOMEM; 2038 break; 2039 } 2040 kif = pfi_kif_get(ps->state.u.ifname); 2041 if (kif == NULL) { 2042 pool_put(&pf_state_pl, state); 2043 error = ENOENT; 2044 break; 2045 } 2046 bcopy(&ps->state, state, sizeof(struct pf_state)); 2047 bzero(&state->u, sizeof(state->u)); 2048 state->rule.ptr = &pf_default_rule; 2049 state->nat_rule.ptr = NULL; 2050 state->anchor.ptr = NULL; 2051 state->rt_kif = NULL; 2052 state->creation = time_second; 2053 state->pfsync_time = 0; 2054 state->packets[0] = state->packets[1] = 0; 2055 state->bytes[0] = state->bytes[1] = 0; 2056 2057 if (pf_insert_state(kif, state)) { 2058 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 2059 pool_put(&pf_state_pl, state); 2060 error = ENOMEM; 2061 } 2062 break; 2063 } 2064 2065 case DIOCGETSTATE: { 2066 struct pfioc_state *ps = (struct pfioc_state *)addr; 2067 struct pf_state *state; 2068 u_int32_t nr; 2069 int secs; 2070 2071 nr = 0; 2072 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2073 if (nr >= ps->nr) 2074 break; 2075 nr++; 2076 } 2077 if (state == NULL) { 2078 error = EBUSY; 2079 break; 2080 } 2081 secs = time_second; 2082 bcopy(state, &ps->state, sizeof(ps->state)); 2083 strlcpy(ps->state.u.ifname, state->u.s.kif->pfik_name, 2084 sizeof(ps->state.u.ifname)); 2085 ps->state.rule.nr = state->rule.ptr->nr; 2086 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ? 2087 -1 : state->nat_rule.ptr->nr; 2088 ps->state.anchor.nr = (state->anchor.ptr == NULL) ? 2089 -1 : state->anchor.ptr->nr; 2090 ps->state.creation = secs - ps->state.creation; 2091 ps->state.expire = pf_state_expires(state); 2092 if (ps->state.expire > secs) 2093 ps->state.expire -= secs; 2094 else 2095 ps->state.expire = 0; 2096 break; 2097 } 2098 2099 case DIOCGETSTATES: { 2100 struct pfioc_states *ps = (struct pfioc_states *)addr; 2101 struct pf_state *state; 2102 struct pf_state *p, *pstore; 2103 u_int32_t nr = 0; 2104 int space = ps->ps_len; 2105 2106 if (space == 0) { 2107 nr = pf_status.states; 2108 ps->ps_len = sizeof(struct pf_state) * nr; 2109 break; 2110 } 2111 2112#ifdef __FreeBSD__ 2113 PF_UNLOCK(); 2114#endif 2115 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2116#ifdef __FreeBSD__ 2117 PF_LOCK(); 2118#endif 2119 2120 p = ps->ps_states; 2121 2122 state = TAILQ_FIRST(&state_list); 2123 while (state) { 2124 if (state->timeout != PFTM_UNLINKED) { 2125 int secs = time_second; 2126 2127 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 2128 break; 2129 2130 bcopy(state, pstore, sizeof(*pstore)); 2131 strlcpy(pstore->u.ifname, 2132 state->u.s.kif->pfik_name, 2133 sizeof(pstore->u.ifname)); 2134 pstore->rule.nr = state->rule.ptr->nr; 2135 pstore->nat_rule.nr = (state->nat_rule.ptr == 2136 NULL) ? -1 : state->nat_rule.ptr->nr; 2137 pstore->anchor.nr = (state->anchor.ptr == 2138 NULL) ? -1 : state->anchor.ptr->nr; 2139 pstore->creation = secs - pstore->creation; 2140 pstore->expire = pf_state_expires(state); 2141 if (pstore->expire > secs) 2142 pstore->expire -= secs; 2143 else 2144 pstore->expire = 0; 2145#ifdef __FreeBSD__ 2146 PF_COPYOUT(pstore, p, sizeof(*p), error); 2147#else 2148 error = copyout(pstore, p, sizeof(*p)); 2149#endif 2150 if (error) { 2151 free(pstore, M_TEMP); 2152 goto fail; 2153 } 2154 p++; 2155 nr++; 2156 } 2157 state = TAILQ_NEXT(state, u.s.entry_list); 2158 } 2159 2160 ps->ps_len = sizeof(struct pf_state) * nr; 2161 2162 free(pstore, M_TEMP); 2163 break; 2164 } 2165 2166 case DIOCGETSTATUS: { 2167 struct pf_status *s = (struct pf_status *)addr; 2168 bcopy(&pf_status, s, sizeof(struct pf_status)); 2169 pfi_fill_oldstatus(s); 2170 break; 2171 } 2172 2173 case DIOCSETSTATUSIF: { 2174 struct pfioc_if *pi = (struct pfioc_if *)addr; 2175 2176 if (pi->ifname[0] == 0) { 2177 bzero(pf_status.ifname, IFNAMSIZ); 2178 break; 2179 } 2180 if (ifunit(pi->ifname) == NULL) { 2181 error = EINVAL; 2182 break; 2183 } 2184 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2185 break; 2186 } 2187 2188 case DIOCCLRSTATUS: { 2189 bzero(pf_status.counters, sizeof(pf_status.counters)); 2190 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2191 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2192 pf_status.since = time_second; 2193 if (*pf_status.ifname) 2194 pfi_clr_istats(pf_status.ifname); 2195 break; 2196 } 2197 2198 case DIOCNATLOOK: { 2199 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2200 struct pf_state *state; 2201 struct pf_state_cmp key; 2202 int m = 0, direction = pnl->direction; 2203 2204 key.af = pnl->af; 2205 key.proto = pnl->proto; 2206 2207 if (!pnl->proto || 2208 PF_AZERO(&pnl->saddr, pnl->af) || 2209 PF_AZERO(&pnl->daddr, pnl->af) || 2210 ((pnl->proto == IPPROTO_TCP || 2211 pnl->proto == IPPROTO_UDP) && 2212 (!pnl->dport || !pnl->sport))) 2213 error = EINVAL; 2214 else { 2215 /* 2216 * userland gives us source and dest of connection, 2217 * reverse the lookup so we ask for what happens with 2218 * the return traffic, enabling us to find it in the 2219 * state tree. 2220 */ 2221 if (direction == PF_IN) { 2222 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2223 key.ext.port = pnl->dport; 2224 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2225 key.gwy.port = pnl->sport; 2226 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2227 } else { 2228 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2229 key.lan.port = pnl->dport; 2230 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2231 key.ext.port = pnl->sport; 2232 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2233 } 2234 if (m > 1) 2235 error = E2BIG; /* more than one state */ 2236 else if (state != NULL) { 2237 if (direction == PF_IN) { 2238 PF_ACPY(&pnl->rsaddr, &state->lan.addr, 2239 state->af); 2240 pnl->rsport = state->lan.port; 2241 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2242 pnl->af); 2243 pnl->rdport = pnl->dport; 2244 } else { 2245 PF_ACPY(&pnl->rdaddr, &state->gwy.addr, 2246 state->af); 2247 pnl->rdport = state->gwy.port; 2248 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2249 pnl->af); 2250 pnl->rsport = pnl->sport; 2251 } 2252 } else 2253 error = ENOENT; 2254 } 2255 break; 2256 } 2257 2258 case DIOCSETTIMEOUT: { 2259 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2260 int old; 2261 2262 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2263 pt->seconds < 0) { 2264 error = EINVAL; 2265 goto fail; 2266 } 2267 old = pf_default_rule.timeout[pt->timeout]; 2268 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2269 pt->seconds = 1; 2270 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2271 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 2272 wakeup(pf_purge_thread); 2273 pt->seconds = old; 2274 break; 2275 } 2276 2277 case DIOCGETTIMEOUT: { 2278 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2279 2280 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2281 error = EINVAL; 2282 goto fail; 2283 } 2284 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2285 break; 2286 } 2287 2288 case DIOCGETLIMIT: { 2289 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2290 2291 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2292 error = EINVAL; 2293 goto fail; 2294 } 2295 pl->limit = pf_pool_limits[pl->index].limit; 2296 break; 2297 } 2298 2299 case DIOCSETLIMIT: { 2300 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2301 int old_limit; 2302 2303 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2304 pf_pool_limits[pl->index].pp == NULL) { 2305 error = EINVAL; 2306 goto fail; 2307 } 2308#ifdef __FreeBSD__ 2309 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit); 2310#else 2311 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2312 pl->limit, NULL, 0) != 0) { 2313 error = EBUSY; 2314 goto fail; 2315 } 2316#endif 2317 old_limit = pf_pool_limits[pl->index].limit; 2318 pf_pool_limits[pl->index].limit = pl->limit; 2319 pl->limit = old_limit; 2320 break; 2321 } 2322 2323 case DIOCSETDEBUG: { 2324 u_int32_t *level = (u_int32_t *)addr; 2325 2326 pf_status.debug = *level; 2327 break; 2328 } 2329 2330 case DIOCCLRRULECTRS: { 2331 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2332 struct pf_ruleset *ruleset = &pf_main_ruleset; 2333 struct pf_rule *rule; 2334 2335 TAILQ_FOREACH(rule, 2336 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2337 rule->evaluations = 0; 2338 rule->packets[0] = rule->packets[1] = 0; 2339 rule->bytes[0] = rule->bytes[1] = 0; 2340 } 2341 break; 2342 } 2343 2344#ifdef __FreeBSD__ 2345 case DIOCGIFSPEED: { 2346 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2347 struct pf_ifspeed ps; 2348 struct ifnet *ifp; 2349 2350 if (psp->ifname[0] != 0) { 2351 /* Can we completely trust user-land? */ 2352 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2353 ifp = ifunit(ps.ifname); 2354 if (ifp != NULL) 2355 psp->baudrate = ifp->if_baudrate; 2356 else 2357 error = EINVAL; 2358 } else 2359 error = EINVAL; 2360 break; 2361 } 2362#endif /* __FreeBSD__ */ 2363 2364#ifdef ALTQ 2365 case DIOCSTARTALTQ: { 2366 struct pf_altq *altq; 2367 2368 /* enable all altq interfaces on active list */ 2369 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2370#ifdef __FreeBSD__ 2371 if (altq->qname[0] == 0 && (altq->local_flags & 2372 PFALTQ_FLAG_IF_REMOVED) == 0) { 2373#else 2374 if (altq->qname[0] == 0) { 2375#endif 2376 error = pf_enable_altq(altq); 2377 if (error != 0) 2378 break; 2379 } 2380 } 2381 if (error == 0) 2382 pf_altq_running = 1; 2383 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2384 break; 2385 } 2386 2387 case DIOCSTOPALTQ: { 2388 struct pf_altq *altq; 2389 2390 /* disable all altq interfaces on active list */ 2391 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2392#ifdef __FreeBSD__ 2393 if (altq->qname[0] == 0 && (altq->local_flags & 2394 PFALTQ_FLAG_IF_REMOVED) == 0) { 2395#else 2396 if (altq->qname[0] == 0) { 2397#endif 2398 error = pf_disable_altq(altq); 2399 if (error != 0) 2400 break; 2401 } 2402 } 2403 if (error == 0) 2404 pf_altq_running = 0; 2405 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2406 break; 2407 } 2408 2409 case DIOCADDALTQ: { 2410 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2411 struct pf_altq *altq, *a; 2412 2413 if (pa->ticket != ticket_altqs_inactive) { 2414 error = EBUSY; 2415 break; 2416 } 2417 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2418 if (altq == NULL) { 2419 error = ENOMEM; 2420 break; 2421 } 2422 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2423#ifdef __FreeBSD__ 2424 altq->local_flags = 0; 2425#endif 2426 2427 /* 2428 * if this is for a queue, find the discipline and 2429 * copy the necessary fields 2430 */ 2431 if (altq->qname[0] != 0) { 2432 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2433 error = EBUSY; 2434 pool_put(&pf_altq_pl, altq); 2435 break; 2436 } 2437 altq->altq_disc = NULL; 2438 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2439 if (strncmp(a->ifname, altq->ifname, 2440 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2441 altq->altq_disc = a->altq_disc; 2442 break; 2443 } 2444 } 2445 } 2446 2447#ifdef __FreeBSD__ 2448 struct ifnet *ifp; 2449 2450 if ((ifp = ifunit(altq->ifname)) == NULL) { 2451 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 2452 } else { 2453 PF_UNLOCK(); 2454#endif 2455 error = altq_add(altq); 2456#ifdef __FreeBSD__ 2457 PF_LOCK(); 2458 } 2459#endif 2460 if (error) { 2461 pool_put(&pf_altq_pl, altq); 2462 break; 2463 } 2464 2465 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2466 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2467 break; 2468 } 2469 2470 case DIOCGETALTQS: { 2471 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2472 struct pf_altq *altq; 2473 2474 pa->nr = 0; 2475 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2476 pa->nr++; 2477 pa->ticket = ticket_altqs_active; 2478 break; 2479 } 2480 2481 case DIOCGETALTQ: { 2482 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2483 struct pf_altq *altq; 2484 u_int32_t nr; 2485 2486 if (pa->ticket != ticket_altqs_active) { 2487 error = EBUSY; 2488 break; 2489 } 2490 nr = 0; 2491 altq = TAILQ_FIRST(pf_altqs_active); 2492 while ((altq != NULL) && (nr < pa->nr)) { 2493 altq = TAILQ_NEXT(altq, entries); 2494 nr++; 2495 } 2496 if (altq == NULL) { 2497 error = EBUSY; 2498 break; 2499 } 2500 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2501 break; 2502 } 2503 2504 case DIOCCHANGEALTQ: 2505 /* CHANGEALTQ not supported yet! */ 2506 error = ENODEV; 2507 break; 2508 2509 case DIOCGETQSTATS: { 2510 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2511 struct pf_altq *altq; 2512 u_int32_t nr; 2513 int nbytes; 2514 2515 if (pq->ticket != ticket_altqs_active) { 2516 error = EBUSY; 2517 break; 2518 } 2519 nbytes = pq->nbytes; 2520 nr = 0; 2521 altq = TAILQ_FIRST(pf_altqs_active); 2522 while ((altq != NULL) && (nr < pq->nr)) { 2523 altq = TAILQ_NEXT(altq, entries); 2524 nr++; 2525 } 2526 if (altq == NULL) { 2527 error = EBUSY; 2528 break; 2529 } 2530#ifdef __FreeBSD__ 2531 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 2532 error = ENXIO; 2533 break; 2534 } 2535 PF_UNLOCK(); 2536#endif 2537 error = altq_getqstats(altq, pq->buf, &nbytes); 2538#ifdef __FreeBSD__ 2539 PF_LOCK(); 2540#endif 2541 if (error == 0) { 2542 pq->scheduler = altq->scheduler; 2543 pq->nbytes = nbytes; 2544 } 2545 break; 2546 } 2547#endif /* ALTQ */ 2548 2549 case DIOCBEGINADDRS: { 2550 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2551 2552 pf_empty_pool(&pf_pabuf); 2553 pp->ticket = ++ticket_pabuf; 2554 break; 2555 } 2556 2557 case DIOCADDADDR: { 2558 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2559 2560 if (pp->ticket != ticket_pabuf) { 2561 error = EBUSY; 2562 break; 2563 } 2564#ifndef INET 2565 if (pp->af == AF_INET) { 2566 error = EAFNOSUPPORT; 2567 break; 2568 } 2569#endif /* INET */ 2570#ifndef INET6 2571 if (pp->af == AF_INET6) { 2572 error = EAFNOSUPPORT; 2573 break; 2574 } 2575#endif /* INET6 */ 2576 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2577 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2578 pp->addr.addr.type != PF_ADDR_TABLE) { 2579 error = EINVAL; 2580 break; 2581 } 2582 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2583 if (pa == NULL) { 2584 error = ENOMEM; 2585 break; 2586 } 2587 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2588 if (pa->ifname[0]) { 2589 pa->kif = pfi_kif_get(pa->ifname); 2590 if (pa->kif == NULL) { 2591 pool_put(&pf_pooladdr_pl, pa); 2592 error = EINVAL; 2593 break; 2594 } 2595 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2596 } 2597 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2598 pfi_dynaddr_remove(&pa->addr); 2599 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2600 pool_put(&pf_pooladdr_pl, pa); 2601 error = EINVAL; 2602 break; 2603 } 2604 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2605 break; 2606 } 2607 2608 case DIOCGETADDRS: { 2609 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2610 2611 pp->nr = 0; 2612 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2613 pp->r_num, 0, 1, 0); 2614 if (pool == NULL) { 2615 error = EBUSY; 2616 break; 2617 } 2618 TAILQ_FOREACH(pa, &pool->list, entries) 2619 pp->nr++; 2620 break; 2621 } 2622 2623 case DIOCGETADDR: { 2624 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2625 u_int32_t nr = 0; 2626 2627 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2628 pp->r_num, 0, 1, 1); 2629 if (pool == NULL) { 2630 error = EBUSY; 2631 break; 2632 } 2633 pa = TAILQ_FIRST(&pool->list); 2634 while ((pa != NULL) && (nr < pp->nr)) { 2635 pa = TAILQ_NEXT(pa, entries); 2636 nr++; 2637 } 2638 if (pa == NULL) { 2639 error = EBUSY; 2640 break; 2641 } 2642 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2643 pfi_dynaddr_copyout(&pp->addr.addr); 2644 pf_tbladdr_copyout(&pp->addr.addr); 2645 pf_rtlabel_copyout(&pp->addr.addr); 2646 break; 2647 } 2648 2649 case DIOCCHANGEADDR: { 2650 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2651 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2652 struct pf_ruleset *ruleset; 2653 2654 if (pca->action < PF_CHANGE_ADD_HEAD || 2655 pca->action > PF_CHANGE_REMOVE) { 2656 error = EINVAL; 2657 break; 2658 } 2659 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2660 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2661 pca->addr.addr.type != PF_ADDR_TABLE) { 2662 error = EINVAL; 2663 break; 2664 } 2665 2666 ruleset = pf_find_ruleset(pca->anchor); 2667 if (ruleset == NULL) { 2668 error = EBUSY; 2669 break; 2670 } 2671 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2672 pca->r_num, pca->r_last, 1, 1); 2673 if (pool == NULL) { 2674 error = EBUSY; 2675 break; 2676 } 2677 if (pca->action != PF_CHANGE_REMOVE) { 2678 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2679 if (newpa == NULL) { 2680 error = ENOMEM; 2681 break; 2682 } 2683 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2684#ifndef INET 2685 if (pca->af == AF_INET) { 2686 pool_put(&pf_pooladdr_pl, newpa); 2687 error = EAFNOSUPPORT; 2688 break; 2689 } 2690#endif /* INET */ 2691#ifndef INET6 2692 if (pca->af == AF_INET6) { 2693 pool_put(&pf_pooladdr_pl, newpa); 2694 error = EAFNOSUPPORT; 2695 break; 2696 } 2697#endif /* INET6 */ 2698 if (newpa->ifname[0]) { 2699 newpa->kif = pfi_kif_get(newpa->ifname); 2700 if (newpa->kif == NULL) { 2701 pool_put(&pf_pooladdr_pl, newpa); 2702 error = EINVAL; 2703 break; 2704 } 2705 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2706 } else 2707 newpa->kif = NULL; 2708 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2709 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2710 pfi_dynaddr_remove(&newpa->addr); 2711 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2712 pool_put(&pf_pooladdr_pl, newpa); 2713 error = EINVAL; 2714 break; 2715 } 2716 } 2717 2718 if (pca->action == PF_CHANGE_ADD_HEAD) 2719 oldpa = TAILQ_FIRST(&pool->list); 2720 else if (pca->action == PF_CHANGE_ADD_TAIL) 2721 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2722 else { 2723 int i = 0; 2724 2725 oldpa = TAILQ_FIRST(&pool->list); 2726 while ((oldpa != NULL) && (i < pca->nr)) { 2727 oldpa = TAILQ_NEXT(oldpa, entries); 2728 i++; 2729 } 2730 if (oldpa == NULL) { 2731 error = EINVAL; 2732 break; 2733 } 2734 } 2735 2736 if (pca->action == PF_CHANGE_REMOVE) { 2737 TAILQ_REMOVE(&pool->list, oldpa, entries); 2738 pfi_dynaddr_remove(&oldpa->addr); 2739 pf_tbladdr_remove(&oldpa->addr); 2740 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2741 pool_put(&pf_pooladdr_pl, oldpa); 2742 } else { 2743 if (oldpa == NULL) 2744 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2745 else if (pca->action == PF_CHANGE_ADD_HEAD || 2746 pca->action == PF_CHANGE_ADD_BEFORE) 2747 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2748 else 2749 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2750 newpa, entries); 2751 } 2752 2753 pool->cur = TAILQ_FIRST(&pool->list); 2754 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2755 pca->af); 2756 break; 2757 } 2758 2759 case DIOCGETRULESETS: { 2760 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2761 struct pf_ruleset *ruleset; 2762 struct pf_anchor *anchor; 2763 2764 pr->path[sizeof(pr->path) - 1] = 0; 2765 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2766 error = EINVAL; 2767 break; 2768 } 2769 pr->nr = 0; 2770 if (ruleset->anchor == NULL) { 2771 /* XXX kludge for pf_main_ruleset */ 2772 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2773 if (anchor->parent == NULL) 2774 pr->nr++; 2775 } else { 2776 RB_FOREACH(anchor, pf_anchor_node, 2777 &ruleset->anchor->children) 2778 pr->nr++; 2779 } 2780 break; 2781 } 2782 2783 case DIOCGETRULESET: { 2784 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2785 struct pf_ruleset *ruleset; 2786 struct pf_anchor *anchor; 2787 u_int32_t nr = 0; 2788 2789 pr->path[sizeof(pr->path) - 1] = 0; 2790 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2791 error = EINVAL; 2792 break; 2793 } 2794 pr->name[0] = 0; 2795 if (ruleset->anchor == NULL) { 2796 /* XXX kludge for pf_main_ruleset */ 2797 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2798 if (anchor->parent == NULL && nr++ == pr->nr) { 2799 strlcpy(pr->name, anchor->name, 2800 sizeof(pr->name)); 2801 break; 2802 } 2803 } else { 2804 RB_FOREACH(anchor, pf_anchor_node, 2805 &ruleset->anchor->children) 2806 if (nr++ == pr->nr) { 2807 strlcpy(pr->name, anchor->name, 2808 sizeof(pr->name)); 2809 break; 2810 } 2811 } 2812 if (!pr->name[0]) 2813 error = EBUSY; 2814 break; 2815 } 2816 2817 case DIOCRCLRTABLES: { 2818 struct pfioc_table *io = (struct pfioc_table *)addr; 2819 2820 if (io->pfrio_esize != 0) { 2821 error = ENODEV; 2822 break; 2823 } 2824 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2825 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2826 break; 2827 } 2828 2829 case DIOCRADDTABLES: { 2830 struct pfioc_table *io = (struct pfioc_table *)addr; 2831 2832 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2833 error = ENODEV; 2834 break; 2835 } 2836 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2837 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2838 break; 2839 } 2840 2841 case DIOCRDELTABLES: { 2842 struct pfioc_table *io = (struct pfioc_table *)addr; 2843 2844 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2845 error = ENODEV; 2846 break; 2847 } 2848 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2849 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2850 break; 2851 } 2852 2853 case DIOCRGETTABLES: { 2854 struct pfioc_table *io = (struct pfioc_table *)addr; 2855 2856 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2857 error = ENODEV; 2858 break; 2859 } 2860 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2861 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2862 break; 2863 } 2864 2865 case DIOCRGETTSTATS: { 2866 struct pfioc_table *io = (struct pfioc_table *)addr; 2867 2868 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2869 error = ENODEV; 2870 break; 2871 } 2872 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2873 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2874 break; 2875 } 2876 2877 case DIOCRCLRTSTATS: { 2878 struct pfioc_table *io = (struct pfioc_table *)addr; 2879 2880 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2881 error = ENODEV; 2882 break; 2883 } 2884 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2885 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2886 break; 2887 } 2888 2889 case DIOCRSETTFLAGS: { 2890 struct pfioc_table *io = (struct pfioc_table *)addr; 2891 2892 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2893 error = ENODEV; 2894 break; 2895 } 2896 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2897 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2898 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2899 break; 2900 } 2901 2902 case DIOCRCLRADDRS: { 2903 struct pfioc_table *io = (struct pfioc_table *)addr; 2904 2905 if (io->pfrio_esize != 0) { 2906 error = ENODEV; 2907 break; 2908 } 2909 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2910 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2911 break; 2912 } 2913 2914 case DIOCRADDADDRS: { 2915 struct pfioc_table *io = (struct pfioc_table *)addr; 2916 2917 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2918 error = ENODEV; 2919 break; 2920 } 2921 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2922 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2923 PFR_FLAG_USERIOCTL); 2924 break; 2925 } 2926 2927 case DIOCRDELADDRS: { 2928 struct pfioc_table *io = (struct pfioc_table *)addr; 2929 2930 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2931 error = ENODEV; 2932 break; 2933 } 2934 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2935 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2936 PFR_FLAG_USERIOCTL); 2937 break; 2938 } 2939 2940 case DIOCRSETADDRS: { 2941 struct pfioc_table *io = (struct pfioc_table *)addr; 2942 2943 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2944 error = ENODEV; 2945 break; 2946 } 2947 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2948 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2949 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2950 PFR_FLAG_USERIOCTL, 0); 2951 break; 2952 } 2953 2954 case DIOCRGETADDRS: { 2955 struct pfioc_table *io = (struct pfioc_table *)addr; 2956 2957 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2958 error = ENODEV; 2959 break; 2960 } 2961 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2962 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2963 break; 2964 } 2965 2966 case DIOCRGETASTATS: { 2967 struct pfioc_table *io = (struct pfioc_table *)addr; 2968 2969 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2970 error = ENODEV; 2971 break; 2972 } 2973 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2974 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2975 break; 2976 } 2977 2978 case DIOCRCLRASTATS: { 2979 struct pfioc_table *io = (struct pfioc_table *)addr; 2980 2981 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2982 error = ENODEV; 2983 break; 2984 } 2985 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2986 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2987 PFR_FLAG_USERIOCTL); 2988 break; 2989 } 2990 2991 case DIOCRTSTADDRS: { 2992 struct pfioc_table *io = (struct pfioc_table *)addr; 2993 2994 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2995 error = ENODEV; 2996 break; 2997 } 2998 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2999 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 3000 PFR_FLAG_USERIOCTL); 3001 break; 3002 } 3003 3004 case DIOCRINADEFINE: { 3005 struct pfioc_table *io = (struct pfioc_table *)addr; 3006 3007 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3008 error = ENODEV; 3009 break; 3010 } 3011 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 3012 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 3013 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3014 break; 3015 } 3016 3017 case DIOCOSFPADD: { 3018 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 3019 error = pf_osfp_add(io); 3020 break; 3021 } 3022 3023 case DIOCOSFPGET: { 3024 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 3025 error = pf_osfp_get(io); 3026 break; 3027 } 3028 3029 case DIOCXBEGIN: { 3030 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3031 struct pfioc_trans_e *ioe; 3032 struct pfr_table *table; 3033 int i; 3034 3035 if (io->esize != sizeof(*ioe)) { 3036 error = ENODEV; 3037 goto fail; 3038 } 3039#ifdef __FreeBSD__ 3040 PF_UNLOCK(); 3041#endif 3042 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 3043 M_TEMP, M_WAITOK); 3044 table = (struct pfr_table *)malloc(sizeof(*table), 3045 M_TEMP, M_WAITOK); 3046#ifdef __FreeBSD__ 3047 PF_LOCK(); 3048#endif 3049 for (i = 0; i < io->size; i++) { 3050#ifdef __FreeBSD__ 3051 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3052 if (error) { 3053#else 3054 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3055#endif 3056 free(table, M_TEMP); 3057 free(ioe, M_TEMP); 3058 error = EFAULT; 3059 goto fail; 3060 } 3061 switch (ioe->rs_num) { 3062#ifdef ALTQ 3063 case PF_RULESET_ALTQ: 3064 if (ioe->anchor[0]) { 3065 free(table, M_TEMP); 3066 free(ioe, M_TEMP); 3067 error = EINVAL; 3068 goto fail; 3069 } 3070 if ((error = pf_begin_altq(&ioe->ticket))) { 3071 free(table, M_TEMP); 3072 free(ioe, M_TEMP); 3073 goto fail; 3074 } 3075 break; 3076#endif /* ALTQ */ 3077 case PF_RULESET_TABLE: 3078 bzero(table, sizeof(*table)); 3079 strlcpy(table->pfrt_anchor, ioe->anchor, 3080 sizeof(table->pfrt_anchor)); 3081 if ((error = pfr_ina_begin(table, 3082 &ioe->ticket, NULL, 0))) { 3083 free(table, M_TEMP); 3084 free(ioe, M_TEMP); 3085 goto fail; 3086 } 3087 break; 3088 default: 3089 if ((error = pf_begin_rules(&ioe->ticket, 3090 ioe->rs_num, ioe->anchor))) { 3091 free(table, M_TEMP); 3092 free(ioe, M_TEMP); 3093 goto fail; 3094 } 3095 break; 3096 } 3097#ifdef __FreeBSD__ 3098 PF_COPYOUT(ioe, io->array+i, sizeof(io->array[i]), 3099 error); 3100 if (error) { 3101#else 3102 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 3103#endif 3104 free(table, M_TEMP); 3105 free(ioe, M_TEMP); 3106 error = EFAULT; 3107 goto fail; 3108 } 3109 } 3110 free(table, M_TEMP); 3111 free(ioe, M_TEMP); 3112 break; 3113 } 3114 3115 case DIOCXROLLBACK: { 3116 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3117 struct pfioc_trans_e *ioe; 3118 struct pfr_table *table; 3119 int i; 3120 3121 if (io->esize != sizeof(*ioe)) { 3122 error = ENODEV; 3123 goto fail; 3124 } 3125#ifdef __FreeBSD__ 3126 PF_UNLOCK(); 3127#endif 3128 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 3129 M_TEMP, M_WAITOK); 3130 table = (struct pfr_table *)malloc(sizeof(*table), 3131 M_TEMP, M_WAITOK); 3132#ifdef __FreeBSD__ 3133 PF_LOCK(); 3134#endif 3135 for (i = 0; i < io->size; i++) { 3136#ifdef __FreeBSD__ 3137 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3138 if (error) { 3139#else 3140 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3141#endif 3142 free(table, M_TEMP); 3143 free(ioe, M_TEMP); 3144 error = EFAULT; 3145 goto fail; 3146 } 3147 switch (ioe->rs_num) { 3148#ifdef ALTQ 3149 case PF_RULESET_ALTQ: 3150 if (ioe->anchor[0]) { 3151 free(table, M_TEMP); 3152 free(ioe, M_TEMP); 3153 error = EINVAL; 3154 goto fail; 3155 } 3156 if ((error = pf_rollback_altq(ioe->ticket))) { 3157 free(table, M_TEMP); 3158 free(ioe, M_TEMP); 3159 goto fail; /* really bad */ 3160 } 3161 break; 3162#endif /* ALTQ */ 3163 case PF_RULESET_TABLE: 3164 bzero(table, sizeof(*table)); 3165 strlcpy(table->pfrt_anchor, ioe->anchor, 3166 sizeof(table->pfrt_anchor)); 3167 if ((error = pfr_ina_rollback(table, 3168 ioe->ticket, NULL, 0))) { 3169 free(table, M_TEMP); 3170 free(ioe, M_TEMP); 3171 goto fail; /* really bad */ 3172 } 3173 break; 3174 default: 3175 if ((error = pf_rollback_rules(ioe->ticket, 3176 ioe->rs_num, ioe->anchor))) { 3177 free(table, M_TEMP); 3178 free(ioe, M_TEMP); 3179 goto fail; /* really bad */ 3180 } 3181 break; 3182 } 3183 } 3184 free(table, M_TEMP); 3185 free(ioe, M_TEMP); 3186 break; 3187 } 3188 3189 case DIOCXCOMMIT: { 3190 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3191 struct pfioc_trans_e *ioe; 3192 struct pfr_table *table; 3193 struct pf_ruleset *rs; 3194 int i; 3195 3196 if (io->esize != sizeof(*ioe)) { 3197 error = ENODEV; 3198 goto fail; 3199 } 3200#ifdef __FreeBSD__ 3201 PF_UNLOCK(); 3202#endif 3203 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 3204 M_TEMP, M_WAITOK); 3205 table = (struct pfr_table *)malloc(sizeof(*table), 3206 M_TEMP, M_WAITOK); 3207#ifdef __FreeBSD__ 3208 PF_LOCK(); 3209#endif 3210 /* first makes sure everything will succeed */ 3211 for (i = 0; i < io->size; i++) { 3212#ifdef __FreeBSD__ 3213 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3214 if (error) { 3215#else 3216 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3217#endif 3218 free(table, M_TEMP); 3219 free(ioe, M_TEMP); 3220 error = EFAULT; 3221 goto fail; 3222 } 3223 switch (ioe->rs_num) { 3224#ifdef ALTQ 3225 case PF_RULESET_ALTQ: 3226 if (ioe->anchor[0]) { 3227 free(table, M_TEMP); 3228 free(ioe, M_TEMP); 3229 error = EINVAL; 3230 goto fail; 3231 } 3232 if (!altqs_inactive_open || ioe->ticket != 3233 ticket_altqs_inactive) { 3234 free(table, M_TEMP); 3235 free(ioe, M_TEMP); 3236 error = EBUSY; 3237 goto fail; 3238 } 3239 break; 3240#endif /* ALTQ */ 3241 case PF_RULESET_TABLE: 3242 rs = pf_find_ruleset(ioe->anchor); 3243 if (rs == NULL || !rs->topen || ioe->ticket != 3244 rs->tticket) { 3245 free(table, M_TEMP); 3246 free(ioe, M_TEMP); 3247 error = EBUSY; 3248 goto fail; 3249 } 3250 break; 3251 default: 3252 if (ioe->rs_num < 0 || ioe->rs_num >= 3253 PF_RULESET_MAX) { 3254 free(table, M_TEMP); 3255 free(ioe, M_TEMP); 3256 error = EINVAL; 3257 goto fail; 3258 } 3259 rs = pf_find_ruleset(ioe->anchor); 3260 if (rs == NULL || 3261 !rs->rules[ioe->rs_num].inactive.open || 3262 rs->rules[ioe->rs_num].inactive.ticket != 3263 ioe->ticket) { 3264 free(table, M_TEMP); 3265 free(ioe, M_TEMP); 3266 error = EBUSY; 3267 goto fail; 3268 } 3269 break; 3270 } 3271 } 3272 /* now do the commit - no errors should happen here */ 3273 for (i = 0; i < io->size; i++) { 3274#ifdef __FreeBSD__ 3275 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3276 if (error) { 3277#else 3278 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3279#endif 3280 free(table, M_TEMP); 3281 free(ioe, M_TEMP); 3282 error = EFAULT; 3283 goto fail; 3284 } 3285 switch (ioe->rs_num) { 3286#ifdef ALTQ 3287 case PF_RULESET_ALTQ: 3288 if ((error = pf_commit_altq(ioe->ticket))) { 3289 free(table, M_TEMP); 3290 free(ioe, M_TEMP); 3291 goto fail; /* really bad */ 3292 } 3293 break; 3294#endif /* ALTQ */ 3295 case PF_RULESET_TABLE: 3296 bzero(table, sizeof(*table)); 3297 strlcpy(table->pfrt_anchor, ioe->anchor, 3298 sizeof(table->pfrt_anchor)); 3299 if ((error = pfr_ina_commit(table, ioe->ticket, 3300 NULL, NULL, 0))) { 3301 free(table, M_TEMP); 3302 free(ioe, M_TEMP); 3303 goto fail; /* really bad */ 3304 } 3305 break; 3306 default: 3307 if ((error = pf_commit_rules(ioe->ticket, 3308 ioe->rs_num, ioe->anchor))) { 3309 free(table, M_TEMP); 3310 free(ioe, M_TEMP); 3311 goto fail; /* really bad */ 3312 } 3313 break; 3314 } 3315 } 3316 free(table, M_TEMP); 3317 free(ioe, M_TEMP); 3318 break; 3319 } 3320 3321 case DIOCGETSRCNODES: { 3322 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3323 struct pf_src_node *n, *p, *pstore; 3324 u_int32_t nr = 0; 3325 int space = psn->psn_len; 3326 3327 if (space == 0) { 3328 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3329 nr++; 3330 psn->psn_len = sizeof(struct pf_src_node) * nr; 3331 break; 3332 } 3333 3334#ifdef __FreeBSD__ 3335 PF_UNLOCK(); 3336#endif 3337 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 3338#ifdef __FreeBSD__ 3339 PF_LOCK(); 3340#endif 3341 3342 p = psn->psn_src_nodes; 3343 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3344 int secs = time_second, diff; 3345 3346 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3347 break; 3348 3349 bcopy(n, pstore, sizeof(*pstore)); 3350 if (n->rule.ptr != NULL) 3351 pstore->rule.nr = n->rule.ptr->nr; 3352 pstore->creation = secs - pstore->creation; 3353 if (pstore->expire > secs) 3354 pstore->expire -= secs; 3355 else 3356 pstore->expire = 0; 3357 3358 /* adjust the connection rate estimate */ 3359 diff = secs - n->conn_rate.last; 3360 if (diff >= n->conn_rate.seconds) 3361 pstore->conn_rate.count = 0; 3362 else 3363 pstore->conn_rate.count -= 3364 n->conn_rate.count * diff / 3365 n->conn_rate.seconds; 3366 3367#ifdef __FreeBSD__ 3368 PF_COPYOUT(pstore, p, sizeof(*p), error); 3369#else 3370 error = copyout(pstore, p, sizeof(*p)); 3371#endif 3372 if (error) { 3373 free(pstore, M_TEMP); 3374 goto fail; 3375 } 3376 p++; 3377 nr++; 3378 } 3379 psn->psn_len = sizeof(struct pf_src_node) * nr; 3380 3381 free(pstore, M_TEMP); 3382 break; 3383 } 3384 3385 case DIOCCLRSRCNODES: { 3386 struct pf_src_node *n; 3387 struct pf_state *state; 3388 3389 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3390 state->src_node = NULL; 3391 state->nat_src_node = NULL; 3392 } 3393 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3394 n->expire = 1; 3395 n->states = 0; 3396 } 3397 pf_purge_expired_src_nodes(1); 3398 pf_status.src_nodes = 0; 3399 break; 3400 } 3401 3402 case DIOCKILLSRCNODES: { 3403 struct pf_src_node *sn; 3404 struct pf_state *s; 3405 struct pfioc_src_node_kill *psnk = \ 3406 (struct pfioc_src_node_kill *) addr; 3407 int killed = 0; 3408 3409 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 3410 if (PF_MATCHA(psnk->psnk_src.neg, \ 3411 &psnk->psnk_src.addr.v.a.addr, \ 3412 &psnk->psnk_src.addr.v.a.mask, \ 3413 &sn->addr, sn->af) && 3414 PF_MATCHA(psnk->psnk_dst.neg, \ 3415 &psnk->psnk_dst.addr.v.a.addr, \ 3416 &psnk->psnk_dst.addr.v.a.mask, \ 3417 &sn->raddr, sn->af)) { 3418 /* Handle state to src_node linkage */ 3419 if (sn->states != 0) { 3420 RB_FOREACH(s, pf_state_tree_id, 3421 &tree_id) { 3422 if (s->src_node == sn) 3423 s->src_node = NULL; 3424 if (s->nat_src_node == sn) 3425 s->nat_src_node = NULL; 3426 } 3427 sn->states = 0; 3428 } 3429 sn->expire = 1; 3430 killed++; 3431 } 3432 } 3433 3434 if (killed > 0) 3435 pf_purge_expired_src_nodes(1); 3436 3437 psnk->psnk_af = killed; 3438 break; 3439 } 3440 3441 case DIOCSETHOSTID: { 3442 u_int32_t *hostid = (u_int32_t *)addr; 3443 3444 if (*hostid == 0) 3445 pf_status.hostid = arc4random(); 3446 else 3447 pf_status.hostid = *hostid; 3448 break; 3449 } 3450 3451 case DIOCOSFPFLUSH: 3452 pf_osfp_flush(); 3453 break; 3454 3455 case DIOCIGETIFACES: { 3456 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3457 3458 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3459 error = ENODEV; 3460 break; 3461 } 3462 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3463 &io->pfiio_size); 3464 break; 3465 } 3466 3467 case DIOCSETIFFLAG: { 3468 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3469 3470 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3471 break; 3472 } 3473 3474 case DIOCCLRIFFLAG: { 3475 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3476 3477 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3478 break; 3479 } 3480 3481 default: 3482 error = ENODEV; 3483 break; 3484 } 3485fail: 3486#ifdef __FreeBSD__ 3487 PF_UNLOCK(); 3488 3489 if (flags & FWRITE) 3490 sx_xunlock(&pf_consistency_lock); 3491 else 3492 sx_sunlock(&pf_consistency_lock); 3493#else 3494 splx(s); 3495 /* XXX: Lock order? */ 3496 if (flags & FWRITE) 3497 rw_exit_write(&pf_consistency_lock); 3498 else 3499 rw_exit_read(&pf_consistency_lock); 3500#endif 3501 return (error); 3502} 3503 3504#ifdef __FreeBSD__ 3505/* 3506 * XXX - Check for version missmatch!!! 3507 */ 3508static void 3509pf_clear_states(void) 3510{ 3511 struct pf_state *state; 3512 3513 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3514 state->timeout = PFTM_PURGE; 3515#if NPFSYNC 3516 /* don't send out individual delete messages */ 3517 state->sync_flags = PFSTATE_NOSYNC; 3518#endif 3519 pf_unlink_state(state); 3520 } 3521 3522#if 0 /* NPFSYNC */ 3523/* 3524 * XXX This is called on module unload, we do not want to sync that over? */ 3525 */ 3526 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 3527#endif 3528} 3529 3530static int 3531pf_clear_tables(void) 3532{ 3533 struct pfioc_table io; 3534 int error; 3535 3536 bzero(&io, sizeof(io)); 3537 3538 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3539 io.pfrio_flags); 3540 3541 return (error); 3542} 3543 3544static void 3545pf_clear_srcnodes(void) 3546{ 3547 struct pf_src_node *n; 3548 struct pf_state *state; 3549 3550 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3551 state->src_node = NULL; 3552 state->nat_src_node = NULL; 3553 } 3554 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3555 n->expire = 1; 3556 n->states = 0; 3557 } 3558} 3559/* 3560 * XXX - Check for version missmatch!!! 3561 */ 3562 3563/* 3564 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3565 */ 3566static int 3567shutdown_pf(void) 3568{ 3569 int error = 0; 3570 u_int32_t t[5]; 3571 char nn = '\0'; 3572 3573 pf_status.running = 0; 3574 do { 3575 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 3576 != 0) { 3577 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3578 break; 3579 } 3580 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 3581 != 0) { 3582 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3583 break; /* XXX: rollback? */ 3584 } 3585 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 3586 != 0) { 3587 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3588 break; /* XXX: rollback? */ 3589 } 3590 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3591 != 0) { 3592 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3593 break; /* XXX: rollback? */ 3594 } 3595 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3596 != 0) { 3597 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3598 break; /* XXX: rollback? */ 3599 } 3600 3601 /* XXX: these should always succeed here */ 3602 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3603 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3604 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3605 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3606 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3607 3608 if ((error = pf_clear_tables()) != 0) 3609 break; 3610 3611#ifdef ALTQ 3612 if ((error = pf_begin_altq(&t[0])) != 0) { 3613 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3614 break; 3615 } 3616 pf_commit_altq(t[0]); 3617#endif 3618 3619 pf_clear_states(); 3620 3621 pf_clear_srcnodes(); 3622 3623 /* status does not use malloced mem so no need to cleanup */ 3624 /* fingerprints and interfaces have thier own cleanup code */ 3625 } while(0); 3626 3627 return (error); 3628} 3629 3630static int 3631pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3632 struct inpcb *inp) 3633{ 3634 /* 3635 * XXX Wed Jul 9 22:03:16 2003 UTC 3636 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3637 * in network stack. OpenBSD's network stack have converted 3638 * ip_len/ip_off to host byte order frist as FreeBSD. 3639 * Now this is not true anymore , so we should convert back to network 3640 * byte order. 3641 */ 3642 struct ip *h = NULL; 3643 int chk; 3644 3645 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) { 3646 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3647 h = mtod(*m, struct ip *); 3648 HTONS(h->ip_len); 3649 HTONS(h->ip_off); 3650 } 3651 chk = pf_test(PF_IN, ifp, m, NULL, inp); 3652 if (chk && *m) { 3653 m_freem(*m); 3654 *m = NULL; 3655 } 3656 if (*m != NULL) { 3657 /* pf_test can change ip header location */ 3658 h = mtod(*m, struct ip *); 3659 NTOHS(h->ip_len); 3660 NTOHS(h->ip_off); 3661 } 3662 return chk; 3663} 3664 3665static int 3666pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3667 struct inpcb *inp) 3668{ 3669 /* 3670 * XXX Wed Jul 9 22:03:16 2003 UTC 3671 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3672 * in network stack. OpenBSD's network stack have converted 3673 * ip_len/ip_off to host byte order frist as FreeBSD. 3674 * Now this is not true anymore , so we should convert back to network 3675 * byte order. 3676 */ 3677 struct ip *h = NULL; 3678 int chk; 3679 3680 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3681 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3682 in_delayed_cksum(*m); 3683 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3684 } 3685 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) { 3686 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3687 h = mtod(*m, struct ip *); 3688 HTONS(h->ip_len); 3689 HTONS(h->ip_off); 3690 } 3691 chk = pf_test(PF_OUT, ifp, m, NULL, inp); 3692 if (chk && *m) { 3693 m_freem(*m); 3694 *m = NULL; 3695 } 3696 if (*m != NULL) { 3697 /* pf_test can change ip header location */ 3698 h = mtod(*m, struct ip *); 3699 NTOHS(h->ip_len); 3700 NTOHS(h->ip_off); 3701 } 3702 return chk; 3703} 3704 3705#ifdef INET6 3706static int 3707pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3708 struct inpcb *inp) 3709{ 3710 INIT_VNET_NET(curvnet); 3711 3712 /* 3713 * IPv6 is not affected by ip_len/ip_off byte order changes. 3714 */ 3715 int chk; 3716 3717 /* 3718 * In case of loopback traffic IPv6 uses the real interface in 3719 * order to support scoped addresses. In order to support stateful 3720 * filtering we have change this to lo0 as it is the case in IPv4. 3721 */ 3722 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? &V_loif[0] : ifp, m, 3723 NULL, inp); 3724 if (chk && *m) { 3725 m_freem(*m); 3726 *m = NULL; 3727 } 3728 return chk; 3729} 3730 3731static int 3732pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3733 struct inpcb *inp) 3734{ 3735 /* 3736 * IPv6 does not affected ip_len/ip_off byte order changes. 3737 */ 3738 int chk; 3739 3740 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3741 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3742 in_delayed_cksum(*m); 3743 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3744 } 3745 chk = pf_test6(PF_OUT, ifp, m, NULL, inp); 3746 if (chk && *m) { 3747 m_freem(*m); 3748 *m = NULL; 3749 } 3750 return chk; 3751} 3752#endif /* INET6 */ 3753 3754static int 3755hook_pf(void) 3756{ 3757 struct pfil_head *pfh_inet; 3758#ifdef INET6 3759 struct pfil_head *pfh_inet6; 3760#endif 3761 3762 PF_ASSERT(MA_NOTOWNED); 3763 3764 if (pf_pfil_hooked) 3765 return (0); 3766 3767 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3768 if (pfh_inet == NULL) 3769 return (ESRCH); /* XXX */ 3770 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3771 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3772#ifdef INET6 3773 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3774 if (pfh_inet6 == NULL) { 3775 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3776 pfh_inet); 3777 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3778 pfh_inet); 3779 return (ESRCH); /* XXX */ 3780 } 3781 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3782 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3783#endif 3784 3785 pf_pfil_hooked = 1; 3786 return (0); 3787} 3788 3789static int 3790dehook_pf(void) 3791{ 3792 struct pfil_head *pfh_inet; 3793#ifdef INET6 3794 struct pfil_head *pfh_inet6; 3795#endif 3796 3797 PF_ASSERT(MA_NOTOWNED); 3798 3799 if (pf_pfil_hooked == 0) 3800 return (0); 3801 3802 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3803 if (pfh_inet == NULL) 3804 return (ESRCH); /* XXX */ 3805 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3806 pfh_inet); 3807 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3808 pfh_inet); 3809#ifdef INET6 3810 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3811 if (pfh_inet6 == NULL) 3812 return (ESRCH); /* XXX */ 3813 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3814 pfh_inet6); 3815 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3816 pfh_inet6); 3817#endif 3818 3819 pf_pfil_hooked = 0; 3820 return (0); 3821} 3822 3823static int 3824pf_load(void) 3825{ 3826 init_zone_var(); 3827 init_pf_mutex(); 3828 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3829 if (pfattach() < 0) { 3830 destroy_dev(pf_dev); 3831 destroy_pf_mutex(); 3832 return (ENOMEM); 3833 } 3834 return (0); 3835} 3836 3837static int 3838pf_unload(void) 3839{ 3840 int error = 0; 3841 3842 PF_LOCK(); 3843 pf_status.running = 0; 3844 PF_UNLOCK(); 3845 error = dehook_pf(); 3846 if (error) { 3847 /* 3848 * Should not happen! 3849 * XXX Due to error code ESRCH, kldunload will show 3850 * a message like 'No such process'. 3851 */ 3852 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 3853 return error; 3854 } 3855 PF_LOCK(); 3856 shutdown_pf(); 3857 pf_end_threads = 1; 3858 while (pf_end_threads < 2) { 3859 wakeup_one(pf_purge_thread); 3860 msleep(pf_purge_thread, &pf_task_mtx, 0, "pftmo", hz); 3861 } 3862 pfi_cleanup(); 3863 pf_osfp_flush(); 3864 pf_osfp_cleanup(); 3865 cleanup_pf_zone(); 3866 PF_UNLOCK(); 3867 destroy_dev(pf_dev); 3868 destroy_pf_mutex(); 3869 return error; 3870} 3871 3872static int 3873pf_modevent(module_t mod, int type, void *data) 3874{ 3875 int error = 0; 3876 3877 switch(type) { 3878 case MOD_LOAD: 3879 error = pf_load(); 3880 break; 3881 3882 case MOD_UNLOAD: 3883 error = pf_unload(); 3884 break; 3885 default: 3886 error = EINVAL; 3887 break; 3888 } 3889 return error; 3890} 3891 3892static moduledata_t pf_mod = { 3893 "pf", 3894 pf_modevent, 3895 0 3896}; 3897 3898DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST); 3899MODULE_VERSION(pf, PF_MODVER); 3900#endif /* __FreeBSD__ */ 3901