pf_ioctl.c revision 147321
1/* $FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 147321 2005-06-12 16:46:20Z mlaier $ */ 2/* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */ 3 4/* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39#ifdef __FreeBSD__ 40#include "opt_inet.h" 41#include "opt_inet6.h" 42#endif 43 44#ifdef __FreeBSD__ 45#include "opt_bpf.h" 46#include "opt_pf.h" 47#define NBPFILTER DEV_BPF 48#define NPFLOG DEV_PFLOG 49#define NPFSYNC DEV_PFSYNC 50#else 51#include "bpfilter.h" 52#include "pflog.h" 53#include "pfsync.h" 54#endif 55 56#include <sys/param.h> 57#include <sys/systm.h> 58#include <sys/mbuf.h> 59#include <sys/filio.h> 60#include <sys/fcntl.h> 61#include <sys/socket.h> 62#include <sys/socketvar.h> 63#include <sys/kernel.h> 64#include <sys/time.h> 65#include <sys/malloc.h> 66#ifdef __FreeBSD__ 67#include <sys/module.h> 68#include <sys/conf.h> 69#include <sys/proc.h> 70#else 71#include <sys/timeout.h> 72#include <sys/pool.h> 73#endif 74 75#include <net/if.h> 76#include <net/if_types.h> 77#include <net/route.h> 78 79#include <netinet/in.h> 80#include <netinet/in_var.h> 81#include <netinet/in_systm.h> 82#include <netinet/ip.h> 83#include <netinet/ip_var.h> 84#include <netinet/ip_icmp.h> 85 86#ifndef __FreeBSD__ 87#include <dev/rndvar.h> 88#endif 89#include <net/pfvar.h> 90 91#if NPFSYNC > 0 92#include <net/if_pfsync.h> 93#endif /* NPFSYNC > 0 */ 94 95#ifdef INET6 96#include <netinet/ip6.h> 97#include <netinet/in_pcb.h> 98#endif /* INET6 */ 99 100#ifdef ALTQ 101#include <altq/altq.h> 102#endif 103 104#ifdef __FreeBSD__ 105#include <sys/limits.h> 106#include <sys/lock.h> 107#include <sys/mutex.h> 108#include <net/pfil.h> 109#endif /* __FreeBSD__ */ 110 111#ifdef __FreeBSD__ 112void init_zone_var(void); 113void cleanup_pf_zone(void); 114int pfattach(void); 115#else 116void pfattach(int); 117int pfopen(dev_t, int, int, struct proc *); 118int pfclose(dev_t, int, int, struct proc *); 119#endif 120struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 121 u_int8_t, u_int8_t, u_int8_t); 122int pf_get_ruleset_number(u_int8_t); 123void pf_init_ruleset(struct pf_ruleset *); 124int pf_anchor_setup(struct pf_rule *, 125 const struct pf_ruleset *, const char *); 126int pf_anchor_copyout(const struct pf_ruleset *, 127 const struct pf_rule *, struct pfioc_rule *); 128void pf_anchor_remove(struct pf_rule *); 129 130void pf_mv_pool(struct pf_palist *, struct pf_palist *); 131void pf_empty_pool(struct pf_palist *); 132#ifdef __FreeBSD__ 133int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 134#else 135int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *); 136#endif 137#ifdef ALTQ 138int pf_begin_altq(u_int32_t *); 139int pf_rollback_altq(u_int32_t); 140int pf_commit_altq(u_int32_t); 141int pf_enable_altq(struct pf_altq *); 142int pf_disable_altq(struct pf_altq *); 143#endif /* ALTQ */ 144int pf_begin_rules(u_int32_t *, int, const char *); 145int pf_rollback_rules(u_int32_t, int, char *); 146int pf_commit_rules(u_int32_t, int, char *); 147 148#ifdef __FreeBSD__ 149extern struct callout pf_expire_to; 150#else 151extern struct timeout pf_expire_to; 152#endif 153 154struct pf_rule pf_default_rule; 155#ifdef ALTQ 156static int pf_altq_running; 157#endif 158 159#define TAGID_MAX 50000 160TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 161 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 162 163#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 164#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 165#endif 166static u_int16_t tagname2tag(struct pf_tags *, char *); 167static void tag2tagname(struct pf_tags *, u_int16_t, char *); 168static void tag_unref(struct pf_tags *, u_int16_t); 169int pf_rtlabel_add(struct pf_addr_wrap *); 170void pf_rtlabel_remove(struct pf_addr_wrap *); 171void pf_rtlabel_copyout(struct pf_addr_wrap *); 172 173#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 174 175 176#ifdef __FreeBSD__ 177static struct cdev *pf_dev; 178 179/* 180 * XXX - These are new and need to be checked when moveing to a new version 181 */ 182static void pf_clear_states(void); 183static int pf_clear_tables(void); 184static void pf_clear_srcnodes(void); 185/* 186 * XXX - These are new and need to be checked when moveing to a new version 187 */ 188 189/* 190 * Wrapper functions for pfil(9) hooks 191 */ 192static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 193 int dir, struct inpcb *inp); 194static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 195 int dir, struct inpcb *inp); 196#ifdef INET6 197static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 198 int dir, struct inpcb *inp); 199static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 200 int dir, struct inpcb *inp); 201#endif 202 203static int hook_pf(void); 204static int dehook_pf(void); 205static int shutdown_pf(void); 206static int pf_load(void); 207static int pf_unload(void); 208 209static struct cdevsw pf_cdevsw = { 210 .d_ioctl = pfioctl, 211 .d_name = PF_NAME, 212 .d_version = D_VERSION, 213}; 214 215static volatile int pf_pfil_hooked = 0; 216struct mtx pf_task_mtx; 217 218void 219init_pf_mutex(void) 220{ 221 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF); 222} 223 224void 225destroy_pf_mutex(void) 226{ 227 mtx_destroy(&pf_task_mtx); 228} 229 230void 231init_zone_var(void) 232{ 233 pf_src_tree_pl = pf_rule_pl = NULL; 234 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 235 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 236 pf_state_scrub_pl = NULL; 237 pfr_ktable_pl = pfr_kentry_pl = NULL; 238} 239 240void 241cleanup_pf_zone(void) 242{ 243 UMA_DESTROY(pf_src_tree_pl); 244 UMA_DESTROY(pf_rule_pl); 245 UMA_DESTROY(pf_state_pl); 246 UMA_DESTROY(pf_altq_pl); 247 UMA_DESTROY(pf_pooladdr_pl); 248 UMA_DESTROY(pf_frent_pl); 249 UMA_DESTROY(pf_frag_pl); 250 UMA_DESTROY(pf_cache_pl); 251 UMA_DESTROY(pf_cent_pl); 252 UMA_DESTROY(pfr_ktable_pl); 253 UMA_DESTROY(pfr_kentry_pl); 254 UMA_DESTROY(pf_state_scrub_pl); 255 UMA_DESTROY(pfi_addr_pl); 256} 257 258int 259pfattach(void) 260{ 261 u_int32_t *my_timeout = pf_default_rule.timeout; 262 int error = 1; 263 264 do { 265 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 266 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 267 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 268 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 269 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl"); 270 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 271 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 272 UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2"); 273 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 274 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 275 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 276 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 277 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 278 "pfstatescrub"); 279 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 280 error = 0; 281 } while(0); 282 if (error) { 283 cleanup_pf_zone(); 284 return (error); 285 } 286 pfr_initialize(); 287 pfi_initialize(); 288 if ( (error = pf_osfp_initialize()) ) { 289 cleanup_pf_zone(); 290 pf_osfp_cleanup(); 291 return (error); 292 } 293 294 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 295 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 296 pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl; 297 pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 298 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 299 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 300 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 301 pf_pool_limits[PF_LIMIT_STATES].limit); 302 303 RB_INIT(&tree_src_tracking); 304 RB_INIT(&pf_anchors); 305 pf_init_ruleset(&pf_main_ruleset); 306 TAILQ_INIT(&pf_altqs[0]); 307 TAILQ_INIT(&pf_altqs[1]); 308 TAILQ_INIT(&pf_pabuf); 309 pf_altqs_active = &pf_altqs[0]; 310 pf_altqs_inactive = &pf_altqs[1]; 311 TAILQ_INIT(&state_updates); 312 313 /* default rule should never be garbage collected */ 314 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 315 pf_default_rule.action = PF_PASS; 316 pf_default_rule.nr = -1; 317 318 /* initialize default timeouts */ 319 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 320 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 321 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 322 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 323 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 324 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 325 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 326 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 327 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 328 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 329 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 330 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 331 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 332 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 333 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 334 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 335 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 336 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 337 338 callout_init(&pf_expire_to, NET_CALLOUT_MPSAFE); 339 callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz, 340 pf_purge_timeout, &pf_expire_to); 341 342 pf_normalize_init(); 343 bzero(&pf_status, sizeof(pf_status)); 344 pf_pfil_hooked = 0; 345 346 /* XXX do our best to avoid a conflict */ 347 pf_status.hostid = arc4random(); 348 349 return (error); 350} 351#else /* !__FreeBSD__ */ 352void 353pfattach(int num) 354{ 355 u_int32_t *timeout = pf_default_rule.timeout; 356 357 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 358 &pool_allocator_nointr); 359 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 360 "pfsrctrpl", NULL); 361 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 362 NULL); 363 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 364 &pool_allocator_nointr); 365 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 366 "pfpooladdrpl", &pool_allocator_nointr); 367 pfr_initialize(); 368 pfi_initialize(); 369 pf_osfp_initialize(); 370 371 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 372 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 373 374 RB_INIT(&tree_src_tracking); 375 RB_INIT(&pf_anchors); 376 pf_init_ruleset(&pf_main_ruleset); 377 TAILQ_INIT(&pf_altqs[0]); 378 TAILQ_INIT(&pf_altqs[1]); 379 TAILQ_INIT(&pf_pabuf); 380 pf_altqs_active = &pf_altqs[0]; 381 pf_altqs_inactive = &pf_altqs[1]; 382 TAILQ_INIT(&state_updates); 383 384 /* default rule should never be garbage collected */ 385 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 386 pf_default_rule.action = PF_PASS; 387 pf_default_rule.nr = -1; 388 389 /* initialize default timeouts */ 390 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 391 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 392 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 393 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 394 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 395 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 396 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 397 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 398 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 399 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 400 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 401 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 402 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 403 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 404 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 405 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 406 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 407 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 408 409 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to); 410 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz); 411 412 pf_normalize_init(); 413 bzero(&pf_status, sizeof(pf_status)); 414 pf_status.debug = PF_DEBUG_URGENT; 415 416 /* XXX do our best to avoid a conflict */ 417 pf_status.hostid = arc4random(); 418} 419 420int 421pfopen(struct cdev *dev, int flags, int fmt, struct proc *p) 422{ 423 if (minor(dev) >= 1) 424 return (ENXIO); 425 return (0); 426} 427 428int 429pfclose(struct cdev *dev, int flags, int fmt, struct proc *p) 430{ 431 if (minor(dev) >= 1) 432 return (ENXIO); 433 return (0); 434} 435#endif /* __FreeBSD__ */ 436 437struct pf_pool * 438pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 439 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 440 u_int8_t check_ticket) 441{ 442 struct pf_ruleset *ruleset; 443 struct pf_rule *rule; 444 int rs_num; 445 446 ruleset = pf_find_ruleset(anchor); 447 if (ruleset == NULL) 448 return (NULL); 449 rs_num = pf_get_ruleset_number(rule_action); 450 if (rs_num >= PF_RULESET_MAX) 451 return (NULL); 452 if (active) { 453 if (check_ticket && ticket != 454 ruleset->rules[rs_num].active.ticket) 455 return (NULL); 456 if (r_last) 457 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 458 pf_rulequeue); 459 else 460 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 461 } else { 462 if (check_ticket && ticket != 463 ruleset->rules[rs_num].inactive.ticket) 464 return (NULL); 465 if (r_last) 466 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 467 pf_rulequeue); 468 else 469 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 470 } 471 if (!r_last) { 472 while ((rule != NULL) && (rule->nr != rule_number)) 473 rule = TAILQ_NEXT(rule, entries); 474 } 475 if (rule == NULL) 476 return (NULL); 477 478 return (&rule->rpool); 479} 480 481int 482pf_get_ruleset_number(u_int8_t action) 483{ 484 switch (action) { 485 case PF_SCRUB: 486 case PF_NOSCRUB: 487 return (PF_RULESET_SCRUB); 488 break; 489 case PF_PASS: 490 case PF_DROP: 491 return (PF_RULESET_FILTER); 492 break; 493 case PF_NAT: 494 case PF_NONAT: 495 return (PF_RULESET_NAT); 496 break; 497 case PF_BINAT: 498 case PF_NOBINAT: 499 return (PF_RULESET_BINAT); 500 break; 501 case PF_RDR: 502 case PF_NORDR: 503 return (PF_RULESET_RDR); 504 break; 505 default: 506 return (PF_RULESET_MAX); 507 break; 508 } 509} 510 511void 512pf_init_ruleset(struct pf_ruleset *ruleset) 513{ 514 int i; 515 516 memset(ruleset, 0, sizeof(struct pf_ruleset)); 517 for (i = 0; i < PF_RULESET_MAX; i++) { 518 TAILQ_INIT(&ruleset->rules[i].queues[0]); 519 TAILQ_INIT(&ruleset->rules[i].queues[1]); 520 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0]; 521 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1]; 522 } 523} 524 525struct pf_anchor * 526pf_find_anchor(const char *path) 527{ 528 static struct pf_anchor key; 529 530 memset(&key, 0, sizeof(key)); 531 strlcpy(key.path, path, sizeof(key.path)); 532 return (RB_FIND(pf_anchor_global, &pf_anchors, &key)); 533} 534 535struct pf_ruleset * 536pf_find_ruleset(const char *path) 537{ 538 struct pf_anchor *anchor; 539 540 while (*path == '/') 541 path++; 542 if (!*path) 543 return (&pf_main_ruleset); 544 anchor = pf_find_anchor(path); 545 if (anchor == NULL) 546 return (NULL); 547 else 548 return (&anchor->ruleset); 549} 550 551struct pf_ruleset * 552pf_find_or_create_ruleset(const char *path) 553{ 554 static char p[MAXPATHLEN]; 555 char *q = NULL, *r; /* make the compiler happy */ 556 struct pf_ruleset *ruleset; 557 struct pf_anchor *anchor = NULL, *dup, *parent = NULL; 558 559 while (*path == '/') 560 path++; 561 ruleset = pf_find_ruleset(path); 562 if (ruleset != NULL) 563 return (ruleset); 564 strlcpy(p, path, sizeof(p)); 565#ifdef __FreeBSD__ 566 while (parent == NULL && (q = rindex(p, '/')) != NULL) { 567#else 568 while (parent == NULL && (q = strrchr(p, '/')) != NULL) { 569#endif 570 *q = 0; 571 if ((ruleset = pf_find_ruleset(p)) != NULL) { 572 parent = ruleset->anchor; 573 break; 574 } 575 } 576 if (q == NULL) 577 q = p; 578 else 579 q++; 580 strlcpy(p, path, sizeof(p)); 581 if (!*q) 582 return (NULL); 583#ifdef __FreeBSD__ 584 while ((r = index(q, '/')) != NULL || *q) { 585#else 586 while ((r = strchr(q, '/')) != NULL || *q) { 587#endif 588 if (r != NULL) 589 *r = 0; 590 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE || 591 (parent != NULL && strlen(parent->path) >= 592 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1)) 593 return (NULL); 594 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP, 595 M_NOWAIT); 596 if (anchor == NULL) 597 return (NULL); 598 memset(anchor, 0, sizeof(*anchor)); 599 RB_INIT(&anchor->children); 600 strlcpy(anchor->name, q, sizeof(anchor->name)); 601 if (parent != NULL) { 602 strlcpy(anchor->path, parent->path, 603 sizeof(anchor->path)); 604 strlcat(anchor->path, "/", sizeof(anchor->path)); 605 } 606 strlcat(anchor->path, anchor->name, sizeof(anchor->path)); 607 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) != 608 NULL) { 609 printf("pf_find_or_create_ruleset: RB_INSERT1 " 610 "'%s' '%s' collides with '%s' '%s'\n", 611 anchor->path, anchor->name, dup->path, dup->name); 612 free(anchor, M_TEMP); 613 return (NULL); 614 } 615 if (parent != NULL) { 616 anchor->parent = parent; 617 if ((dup = RB_INSERT(pf_anchor_node, &parent->children, 618 anchor)) != NULL) { 619 printf("pf_find_or_create_ruleset: " 620 "RB_INSERT2 '%s' '%s' collides with " 621 "'%s' '%s'\n", anchor->path, anchor->name, 622 dup->path, dup->name); 623 RB_REMOVE(pf_anchor_global, &pf_anchors, 624 anchor); 625 free(anchor, M_TEMP); 626 return (NULL); 627 } 628 } 629 pf_init_ruleset(&anchor->ruleset); 630 anchor->ruleset.anchor = anchor; 631 parent = anchor; 632 if (r != NULL) 633 q = r + 1; 634 else 635 *q = 0; 636 } 637 return (&anchor->ruleset); 638} 639 640void 641pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset) 642{ 643 struct pf_anchor *parent; 644 int i; 645 646 while (ruleset != NULL) { 647 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL || 648 !RB_EMPTY(&ruleset->anchor->children) || 649 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 || 650 ruleset->topen) 651 return; 652 for (i = 0; i < PF_RULESET_MAX; ++i) 653 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) || 654 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) || 655 ruleset->rules[i].inactive.open) 656 return; 657 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor); 658 if ((parent = ruleset->anchor->parent) != NULL) 659 RB_REMOVE(pf_anchor_node, &parent->children, 660 ruleset->anchor); 661 free(ruleset->anchor, M_TEMP); 662 if (parent == NULL) 663 return; 664 ruleset = &parent->ruleset; 665 } 666} 667 668int 669pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s, 670 const char *name) 671{ 672 static char *p, path[MAXPATHLEN]; 673 struct pf_ruleset *ruleset; 674 675 r->anchor = NULL; 676 r->anchor_relative = 0; 677 r->anchor_wildcard = 0; 678 if (!name[0]) 679 return (0); 680 if (name[0] == '/') 681 strlcpy(path, name + 1, sizeof(path)); 682 else { 683 /* relative path */ 684 r->anchor_relative = 1; 685 if (s->anchor == NULL || !s->anchor->path[0]) 686 path[0] = 0; 687 else 688 strlcpy(path, s->anchor->path, sizeof(path)); 689 while (name[0] == '.' && name[1] == '.' && name[2] == '/') { 690 if (!path[0]) { 691 printf("pf_anchor_setup: .. beyond root\n"); 692 return (1); 693 } 694#ifdef __FreeBSD__ 695 if ((p = rindex(path, '/')) != NULL) 696#else 697 if ((p = strrchr(path, '/')) != NULL) 698#endif 699 *p = 0; 700 else 701 path[0] = 0; 702 r->anchor_relative++; 703 name += 3; 704 } 705 if (path[0]) 706 strlcat(path, "/", sizeof(path)); 707 strlcat(path, name, sizeof(path)); 708 } 709#ifdef __FreeBSD__ 710 if ((p = rindex(path, '/')) != NULL && !strcmp(p, "/*")) { 711#else 712 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) { 713#endif 714 r->anchor_wildcard = 1; 715 *p = 0; 716 } 717 ruleset = pf_find_or_create_ruleset(path); 718 if (ruleset == NULL || ruleset->anchor == NULL) { 719 printf("pf_anchor_setup: ruleset\n"); 720 return (1); 721 } 722 r->anchor = ruleset->anchor; 723 r->anchor->refcnt++; 724 return (0); 725} 726 727int 728pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r, 729 struct pfioc_rule *pr) 730{ 731 pr->anchor_call[0] = 0; 732 if (r->anchor == NULL) 733 return (0); 734 if (!r->anchor_relative) { 735 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call)); 736 strlcat(pr->anchor_call, r->anchor->path, 737 sizeof(pr->anchor_call)); 738 } else { 739 char a[MAXPATHLEN], b[MAXPATHLEN], *p; 740 int i; 741 742 if (rs->anchor == NULL) 743 a[0] = 0; 744 else 745 strlcpy(a, rs->anchor->path, sizeof(a)); 746 strlcpy(b, r->anchor->path, sizeof(b)); 747 for (i = 1; i < r->anchor_relative; ++i) { 748#ifdef __FreeBSD__ 749 if ((p = rindex(a, '/')) == NULL) 750#else 751 if ((p = strrchr(a, '/')) == NULL) 752#endif 753 p = a; 754 *p = 0; 755 strlcat(pr->anchor_call, "../", 756 sizeof(pr->anchor_call)); 757 } 758 if (strncmp(a, b, strlen(a))) { 759 printf("pf_anchor_copyout: '%s' '%s'\n", a, b); 760 return (1); 761 } 762 if (strlen(b) > strlen(a)) 763 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0), 764 sizeof(pr->anchor_call)); 765 } 766 if (r->anchor_wildcard) 767 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*", 768 sizeof(pr->anchor_call)); 769 return (0); 770} 771 772void 773pf_anchor_remove(struct pf_rule *r) 774{ 775 if (r->anchor == NULL) 776 return; 777 if (r->anchor->refcnt <= 0) { 778 printf("pf_anchor_remove: broken refcount"); 779 r->anchor = NULL; 780 return; 781 } 782 if (!--r->anchor->refcnt) 783 pf_remove_if_empty_ruleset(&r->anchor->ruleset); 784 r->anchor = NULL; 785} 786 787void 788pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 789{ 790 struct pf_pooladdr *mv_pool_pa; 791 792 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 793 TAILQ_REMOVE(poola, mv_pool_pa, entries); 794 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 795 } 796} 797 798void 799pf_empty_pool(struct pf_palist *poola) 800{ 801 struct pf_pooladdr *empty_pool_pa; 802 803 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 804 pfi_dynaddr_remove(&empty_pool_pa->addr); 805 pf_tbladdr_remove(&empty_pool_pa->addr); 806 pfi_detach_rule(empty_pool_pa->kif); 807 TAILQ_REMOVE(poola, empty_pool_pa, entries); 808 pool_put(&pf_pooladdr_pl, empty_pool_pa); 809 } 810} 811 812void 813pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 814{ 815 if (rulequeue != NULL) { 816 if (rule->states <= 0) { 817 /* 818 * XXX - we need to remove the table *before* detaching 819 * the rule to make sure the table code does not delete 820 * the anchor under our feet. 821 */ 822 pf_tbladdr_remove(&rule->src.addr); 823 pf_tbladdr_remove(&rule->dst.addr); 824 if (rule->overload_tbl) 825 pfr_detach_table(rule->overload_tbl); 826 } 827 TAILQ_REMOVE(rulequeue, rule, entries); 828 rule->entries.tqe_prev = NULL; 829 rule->nr = -1; 830 } 831 832 if (rule->states > 0 || rule->src_nodes > 0 || 833 rule->entries.tqe_prev != NULL) 834 return; 835 pf_tag_unref(rule->tag); 836 pf_tag_unref(rule->match_tag); 837#ifdef ALTQ 838 if (rule->pqid != rule->qid) 839 pf_qid_unref(rule->pqid); 840 pf_qid_unref(rule->qid); 841#endif 842 pf_rtlabel_remove(&rule->src.addr); 843 pf_rtlabel_remove(&rule->dst.addr); 844 pfi_dynaddr_remove(&rule->src.addr); 845 pfi_dynaddr_remove(&rule->dst.addr); 846 if (rulequeue == NULL) { 847 pf_tbladdr_remove(&rule->src.addr); 848 pf_tbladdr_remove(&rule->dst.addr); 849 if (rule->overload_tbl) 850 pfr_detach_table(rule->overload_tbl); 851 } 852 pfi_detach_rule(rule->kif); 853 pf_anchor_remove(rule); 854 pf_empty_pool(&rule->rpool.list); 855 pool_put(&pf_rule_pl, rule); 856} 857 858static u_int16_t 859tagname2tag(struct pf_tags *head, char *tagname) 860{ 861 struct pf_tagname *tag, *p = NULL; 862 u_int16_t new_tagid = 1; 863 864 TAILQ_FOREACH(tag, head, entries) 865 if (strcmp(tagname, tag->name) == 0) { 866 tag->ref++; 867 return (tag->tag); 868 } 869 870 /* 871 * to avoid fragmentation, we do a linear search from the beginning 872 * and take the first free slot we find. if there is none or the list 873 * is empty, append a new entry at the end. 874 */ 875 876 /* new entry */ 877 if (!TAILQ_EMPTY(head)) 878 for (p = TAILQ_FIRST(head); p != NULL && 879 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 880 new_tagid = p->tag + 1; 881 882 if (new_tagid > TAGID_MAX) 883 return (0); 884 885 /* allocate and fill new struct pf_tagname */ 886 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 887 M_TEMP, M_NOWAIT); 888 if (tag == NULL) 889 return (0); 890 bzero(tag, sizeof(struct pf_tagname)); 891 strlcpy(tag->name, tagname, sizeof(tag->name)); 892 tag->tag = new_tagid; 893 tag->ref++; 894 895 if (p != NULL) /* insert new entry before p */ 896 TAILQ_INSERT_BEFORE(p, tag, entries); 897 else /* either list empty or no free slot in between */ 898 TAILQ_INSERT_TAIL(head, tag, entries); 899 900 return (tag->tag); 901} 902 903static void 904tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 905{ 906 struct pf_tagname *tag; 907 908 TAILQ_FOREACH(tag, head, entries) 909 if (tag->tag == tagid) { 910 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 911 return; 912 } 913} 914 915static void 916tag_unref(struct pf_tags *head, u_int16_t tag) 917{ 918 struct pf_tagname *p, *next; 919 920 if (tag == 0) 921 return; 922 923 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 924 next = TAILQ_NEXT(p, entries); 925 if (tag == p->tag) { 926 if (--p->ref == 0) { 927 TAILQ_REMOVE(head, p, entries); 928 free(p, M_TEMP); 929 } 930 break; 931 } 932 } 933} 934 935u_int16_t 936pf_tagname2tag(char *tagname) 937{ 938 return (tagname2tag(&pf_tags, tagname)); 939} 940 941void 942pf_tag2tagname(u_int16_t tagid, char *p) 943{ 944 return (tag2tagname(&pf_tags, tagid, p)); 945} 946 947void 948pf_tag_ref(u_int16_t tag) 949{ 950 struct pf_tagname *t; 951 952 TAILQ_FOREACH(t, &pf_tags, entries) 953 if (t->tag == tag) 954 break; 955 if (t != NULL) 956 t->ref++; 957} 958 959void 960pf_tag_unref(u_int16_t tag) 961{ 962 return (tag_unref(&pf_tags, tag)); 963} 964 965int 966pf_rtlabel_add(struct pf_addr_wrap *a) 967{ 968#ifdef __FreeBSD__ 969 /* XXX_IMPORT: later */ 970 return (0); 971#else 972 if (a->type == PF_ADDR_RTLABEL && 973 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 974 return (-1); 975 return (0); 976#endif 977} 978 979void 980pf_rtlabel_remove(struct pf_addr_wrap *a) 981{ 982#ifdef __FreeBSD__ 983 /* XXX_IMPORT: later */ 984#else 985 if (a->type == PF_ADDR_RTLABEL) 986 rtlabel_unref(a->v.rtlabel); 987#endif 988} 989 990void 991pf_rtlabel_copyout(struct pf_addr_wrap *a) 992{ 993#ifdef __FreeBSD__ 994 /* XXX_IMPORT: later */ 995 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 996 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 997#else 998 const char *name; 999 1000 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 1001 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 1002 strlcpy(a->v.rtlabelname, "?", 1003 sizeof(a->v.rtlabelname)); 1004 else 1005 strlcpy(a->v.rtlabelname, name, 1006 sizeof(a->v.rtlabelname)); 1007 } 1008#endif 1009} 1010 1011#ifdef ALTQ 1012u_int32_t 1013pf_qname2qid(char *qname) 1014{ 1015 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 1016} 1017 1018void 1019pf_qid2qname(u_int32_t qid, char *p) 1020{ 1021 return (tag2tagname(&pf_qids, (u_int16_t)qid, p)); 1022} 1023 1024void 1025pf_qid_unref(u_int32_t qid) 1026{ 1027 return (tag_unref(&pf_qids, (u_int16_t)qid)); 1028} 1029 1030int 1031pf_begin_altq(u_int32_t *ticket) 1032{ 1033 struct pf_altq *altq; 1034 int error = 0; 1035 1036 /* Purge the old altq list */ 1037 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1038 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1039 if (altq->qname[0] == 0) { 1040 /* detach and destroy the discipline */ 1041 error = altq_remove(altq); 1042 } else 1043 pf_qid_unref(altq->qid); 1044 pool_put(&pf_altq_pl, altq); 1045 } 1046 if (error) 1047 return (error); 1048 *ticket = ++ticket_altqs_inactive; 1049 altqs_inactive_open = 1; 1050 return (0); 1051} 1052 1053int 1054pf_rollback_altq(u_int32_t ticket) 1055{ 1056 struct pf_altq *altq; 1057 int error = 0; 1058 1059 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 1060 return (0); 1061 /* Purge the old altq list */ 1062 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1063 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1064 if (altq->qname[0] == 0) { 1065 /* detach and destroy the discipline */ 1066 error = altq_remove(altq); 1067 } else 1068 pf_qid_unref(altq->qid); 1069 pool_put(&pf_altq_pl, altq); 1070 } 1071 altqs_inactive_open = 0; 1072 return (error); 1073} 1074 1075int 1076pf_commit_altq(u_int32_t ticket) 1077{ 1078 struct pf_altqqueue *old_altqs; 1079 struct pf_altq *altq; 1080 int s, err, error = 0; 1081 1082 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 1083 return (EBUSY); 1084 1085 /* swap altqs, keep the old. */ 1086 s = splsoftnet(); 1087 old_altqs = pf_altqs_active; 1088 pf_altqs_active = pf_altqs_inactive; 1089 pf_altqs_inactive = old_altqs; 1090 ticket_altqs_active = ticket_altqs_inactive; 1091 1092 /* Attach new disciplines */ 1093 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 1094 if (altq->qname[0] == 0) { 1095 /* attach the discipline */ 1096 error = altq_pfattach(altq); 1097 if (error == 0 && pf_altq_running) 1098 error = pf_enable_altq(altq); 1099 if (error != 0) { 1100 splx(s); 1101 return (error); 1102 } 1103 } 1104 } 1105 1106 /* Purge the old altq list */ 1107 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1108 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1109 if (altq->qname[0] == 0) { 1110 /* detach and destroy the discipline */ 1111 if (pf_altq_running) 1112 error = pf_disable_altq(altq); 1113 err = altq_pfdetach(altq); 1114 if (err != 0 && error == 0) 1115 error = err; 1116 err = altq_remove(altq); 1117 if (err != 0 && error == 0) 1118 error = err; 1119 } else 1120 pf_qid_unref(altq->qid); 1121 pool_put(&pf_altq_pl, altq); 1122 } 1123 splx(s); 1124 1125 altqs_inactive_open = 0; 1126 return (error); 1127} 1128 1129int 1130pf_enable_altq(struct pf_altq *altq) 1131{ 1132 struct ifnet *ifp; 1133 struct tb_profile tb; 1134 int s, error = 0; 1135 1136 if ((ifp = ifunit(altq->ifname)) == NULL) 1137 return (EINVAL); 1138 1139 if (ifp->if_snd.altq_type != ALTQT_NONE) 1140 error = altq_enable(&ifp->if_snd); 1141 1142 /* set tokenbucket regulator */ 1143 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1144 tb.rate = altq->ifbandwidth; 1145 tb.depth = altq->tbrsize; 1146 s = splimp(); 1147#ifdef __FreeBSD__ 1148 PF_UNLOCK(); 1149#endif 1150 error = tbr_set(&ifp->if_snd, &tb); 1151#ifdef __FreeBSD__ 1152 PF_LOCK(); 1153#endif 1154 splx(s); 1155 } 1156 1157 return (error); 1158} 1159 1160int 1161pf_disable_altq(struct pf_altq *altq) 1162{ 1163 struct ifnet *ifp; 1164 struct tb_profile tb; 1165 int s, error; 1166 1167 if ((ifp = ifunit(altq->ifname)) == NULL) 1168 return (EINVAL); 1169 1170 /* 1171 * when the discipline is no longer referenced, it was overridden 1172 * by a new one. if so, just return. 1173 */ 1174 if (altq->altq_disc != ifp->if_snd.altq_disc) 1175 return (0); 1176 1177 error = altq_disable(&ifp->if_snd); 1178 1179 if (error == 0) { 1180 /* clear tokenbucket regulator */ 1181 tb.rate = 0; 1182 s = splimp(); 1183#ifdef __FreeBSD__ 1184 PF_UNLOCK(); 1185#endif 1186 error = tbr_set(&ifp->if_snd, &tb); 1187#ifdef __FreeBSD__ 1188 PF_LOCK(); 1189#endif 1190 splx(s); 1191 } 1192 1193 return (error); 1194} 1195#endif /* ALTQ */ 1196 1197int 1198pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1199{ 1200 struct pf_ruleset *rs; 1201 struct pf_rule *rule; 1202 1203 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1204 return (EINVAL); 1205 rs = pf_find_or_create_ruleset(anchor); 1206 if (rs == NULL) 1207 return (EINVAL); 1208 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 1209 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1210 *ticket = ++rs->rules[rs_num].inactive.ticket; 1211 rs->rules[rs_num].inactive.open = 1; 1212 return (0); 1213} 1214 1215int 1216pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1217{ 1218 struct pf_ruleset *rs; 1219 struct pf_rule *rule; 1220 1221 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1222 return (EINVAL); 1223 rs = pf_find_ruleset(anchor); 1224 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1225 rs->rules[rs_num].inactive.ticket != ticket) 1226 return (0); 1227 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 1228 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1229 rs->rules[rs_num].inactive.open = 0; 1230 return (0); 1231} 1232 1233int 1234pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1235{ 1236 struct pf_ruleset *rs; 1237 struct pf_rule *rule; 1238 struct pf_rulequeue *old_rules; 1239 int s; 1240 1241 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1242 return (EINVAL); 1243 rs = pf_find_ruleset(anchor); 1244 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1245 ticket != rs->rules[rs_num].inactive.ticket) 1246 return (EBUSY); 1247 1248 /* Swap rules, keep the old. */ 1249 s = splsoftnet(); 1250 old_rules = rs->rules[rs_num].active.ptr; 1251 rs->rules[rs_num].active.ptr = 1252 rs->rules[rs_num].inactive.ptr; 1253 rs->rules[rs_num].inactive.ptr = old_rules; 1254 rs->rules[rs_num].active.ticket = 1255 rs->rules[rs_num].inactive.ticket; 1256 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1257 1258 /* Purge the old rule list. */ 1259 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1260 pf_rm_rule(old_rules, rule); 1261 rs->rules[rs_num].inactive.open = 0; 1262 pf_remove_if_empty_ruleset(rs); 1263 splx(s); 1264 return (0); 1265} 1266 1267#ifdef __FreeBSD__ 1268int 1269pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1270#else 1271int 1272pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1273#endif 1274{ 1275 struct pf_pooladdr *pa = NULL; 1276 struct pf_pool *pool = NULL; 1277#ifndef __FreeBSD__ 1278 int s; 1279#endif 1280 int error = 0; 1281 1282 /* XXX keep in sync with switch() below */ 1283#ifdef __FreeBSD__ 1284 if (securelevel_gt(td->td_ucred, 2)) 1285#else 1286 if (securelevel > 1) 1287#endif 1288 switch (cmd) { 1289 case DIOCGETRULES: 1290 case DIOCGETRULE: 1291 case DIOCGETADDRS: 1292 case DIOCGETADDR: 1293 case DIOCGETSTATE: 1294 case DIOCSETSTATUSIF: 1295 case DIOCGETSTATUS: 1296 case DIOCCLRSTATUS: 1297 case DIOCNATLOOK: 1298 case DIOCSETDEBUG: 1299 case DIOCGETSTATES: 1300 case DIOCGETTIMEOUT: 1301 case DIOCCLRRULECTRS: 1302 case DIOCGETLIMIT: 1303 case DIOCGETALTQS: 1304 case DIOCGETALTQ: 1305 case DIOCGETQSTATS: 1306 case DIOCGETRULESETS: 1307 case DIOCGETRULESET: 1308 case DIOCRGETTABLES: 1309 case DIOCRGETTSTATS: 1310 case DIOCRCLRTSTATS: 1311 case DIOCRCLRADDRS: 1312 case DIOCRADDADDRS: 1313 case DIOCRDELADDRS: 1314 case DIOCRSETADDRS: 1315 case DIOCRGETADDRS: 1316 case DIOCRGETASTATS: 1317 case DIOCRCLRASTATS: 1318 case DIOCRTSTADDRS: 1319 case DIOCOSFPGET: 1320 case DIOCGETSRCNODES: 1321 case DIOCCLRSRCNODES: 1322 case DIOCIGETIFACES: 1323 case DIOCICLRISTATS: 1324#ifdef __FreeBSD__ 1325 case DIOCGIFSPEED: 1326#endif 1327 case DIOCSETIFFLAG: 1328 case DIOCCLRIFFLAG: 1329 break; 1330 case DIOCRCLRTABLES: 1331 case DIOCRADDTABLES: 1332 case DIOCRDELTABLES: 1333 case DIOCRSETTFLAGS: 1334 if (((struct pfioc_table *)addr)->pfrio_flags & 1335 PFR_FLAG_DUMMY) 1336 break; /* dummy operation ok */ 1337 return (EPERM); 1338 default: 1339 return (EPERM); 1340 } 1341 1342 if (!(flags & FWRITE)) 1343 switch (cmd) { 1344 case DIOCGETRULES: 1345 case DIOCGETRULE: 1346 case DIOCGETADDRS: 1347 case DIOCGETADDR: 1348 case DIOCGETSTATE: 1349 case DIOCGETSTATUS: 1350 case DIOCGETSTATES: 1351 case DIOCGETTIMEOUT: 1352 case DIOCGETLIMIT: 1353 case DIOCGETALTQS: 1354 case DIOCGETALTQ: 1355 case DIOCGETQSTATS: 1356 case DIOCGETRULESETS: 1357 case DIOCGETRULESET: 1358 case DIOCRGETTABLES: 1359 case DIOCRGETTSTATS: 1360 case DIOCRGETADDRS: 1361 case DIOCRGETASTATS: 1362 case DIOCRTSTADDRS: 1363 case DIOCOSFPGET: 1364 case DIOCGETSRCNODES: 1365 case DIOCIGETIFACES: 1366#ifdef __FreeBSD__ 1367 case DIOCGIFSPEED: 1368#endif 1369 break; 1370 case DIOCRCLRTABLES: 1371 case DIOCRADDTABLES: 1372 case DIOCRDELTABLES: 1373 case DIOCRCLRTSTATS: 1374 case DIOCRCLRADDRS: 1375 case DIOCRADDADDRS: 1376 case DIOCRDELADDRS: 1377 case DIOCRSETADDRS: 1378 case DIOCRSETTFLAGS: 1379 if (((struct pfioc_table *)addr)->pfrio_flags & 1380 PFR_FLAG_DUMMY) 1381 break; /* dummy operation ok */ 1382 return (EACCES); 1383 default: 1384 return (EACCES); 1385 } 1386 1387#ifdef __FreeBSD__ 1388 PF_LOCK(); 1389#else 1390 s = splsoftnet(); 1391#endif 1392 switch (cmd) { 1393 1394 case DIOCSTART: 1395 if (pf_status.running) 1396 error = EEXIST; 1397 else { 1398#ifdef __FreeBSD__ 1399 PF_UNLOCK(); 1400 error = hook_pf(); 1401 PF_LOCK(); 1402 if (error) { 1403 DPFPRINTF(PF_DEBUG_MISC, 1404 ("pf: pfil registeration fail\n")); 1405 break; 1406 } 1407#endif 1408 pf_status.running = 1; 1409 pf_status.since = time_second; 1410 if (pf_status.stateid == 0) { 1411 pf_status.stateid = time_second; 1412 pf_status.stateid = pf_status.stateid << 32; 1413 } 1414 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1415 } 1416 break; 1417 1418 case DIOCSTOP: 1419 if (!pf_status.running) 1420 error = ENOENT; 1421 else { 1422 pf_status.running = 0; 1423#ifdef __FreeBSD__ 1424 PF_UNLOCK(); 1425 error = dehook_pf(); 1426 PF_LOCK(); 1427 if (error) { 1428 pf_status.running = 1; 1429 DPFPRINTF(PF_DEBUG_MISC, 1430 ("pf: pfil unregisteration failed\n")); 1431 } 1432#endif 1433 pf_status.since = time_second; 1434 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1435 } 1436 break; 1437 1438 case DIOCADDRULE: { 1439 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1440 struct pf_ruleset *ruleset; 1441 struct pf_rule *rule, *tail; 1442 struct pf_pooladdr *pa; 1443 int rs_num; 1444 1445 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1446 ruleset = pf_find_ruleset(pr->anchor); 1447 if (ruleset == NULL) { 1448 error = EINVAL; 1449 break; 1450 } 1451 rs_num = pf_get_ruleset_number(pr->rule.action); 1452 if (rs_num >= PF_RULESET_MAX) { 1453 error = EINVAL; 1454 break; 1455 } 1456 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1457 error = EINVAL; 1458 break; 1459 } 1460 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1461 printf("ticket: %d != [%d]%d\n", pr->ticket, 1462 rs_num, ruleset->rules[rs_num].inactive.ticket); 1463 error = EBUSY; 1464 break; 1465 } 1466 if (pr->pool_ticket != ticket_pabuf) { 1467 printf("pool_ticket: %d != %d\n", pr->pool_ticket, 1468 ticket_pabuf); 1469 error = EBUSY; 1470 break; 1471 } 1472 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1473 if (rule == NULL) { 1474 error = ENOMEM; 1475 break; 1476 } 1477 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1478 rule->anchor = NULL; 1479 rule->kif = NULL; 1480 TAILQ_INIT(&rule->rpool.list); 1481 /* initialize refcounting */ 1482 rule->states = 0; 1483 rule->src_nodes = 0; 1484 rule->entries.tqe_prev = NULL; 1485#ifndef INET 1486 if (rule->af == AF_INET) { 1487 pool_put(&pf_rule_pl, rule); 1488 error = EAFNOSUPPORT; 1489 break; 1490 } 1491#endif /* INET */ 1492#ifndef INET6 1493 if (rule->af == AF_INET6) { 1494 pool_put(&pf_rule_pl, rule); 1495 error = EAFNOSUPPORT; 1496 break; 1497 } 1498#endif /* INET6 */ 1499 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1500 pf_rulequeue); 1501 if (tail) 1502 rule->nr = tail->nr + 1; 1503 else 1504 rule->nr = 0; 1505 if (rule->ifname[0]) { 1506 rule->kif = pfi_attach_rule(rule->ifname); 1507 if (rule->kif == NULL) { 1508 pool_put(&pf_rule_pl, rule); 1509 error = EINVAL; 1510 break; 1511 } 1512 } 1513 1514#ifdef ALTQ 1515 /* set queue IDs */ 1516 if (rule->qname[0] != 0) { 1517 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1518 error = EBUSY; 1519 else if (rule->pqname[0] != 0) { 1520 if ((rule->pqid = 1521 pf_qname2qid(rule->pqname)) == 0) 1522 error = EBUSY; 1523 } else 1524 rule->pqid = rule->qid; 1525 } 1526#endif 1527 if (rule->tagname[0]) 1528 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1529 error = EBUSY; 1530 if (rule->match_tagname[0]) 1531 if ((rule->match_tag = 1532 pf_tagname2tag(rule->match_tagname)) == 0) 1533 error = EBUSY; 1534 if (rule->rt && !rule->direction) 1535 error = EINVAL; 1536 if (pf_rtlabel_add(&rule->src.addr) || 1537 pf_rtlabel_add(&rule->dst.addr)) 1538 error = EBUSY; 1539 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1540 error = EINVAL; 1541 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1542 error = EINVAL; 1543 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1544 error = EINVAL; 1545 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1546 error = EINVAL; 1547 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1548 error = EINVAL; 1549 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1550 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1551 error = EINVAL; 1552 1553 if (rule->overload_tblname[0]) { 1554 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1555 rule->overload_tblname)) == NULL) 1556 error = EINVAL; 1557 else 1558 rule->overload_tbl->pfrkt_flags |= 1559 PFR_TFLAG_ACTIVE; 1560 } 1561 1562 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1563 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1564 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1565 (rule->rt > PF_FASTROUTE)) && 1566 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1567 error = EINVAL; 1568 1569 if (error) { 1570 pf_rm_rule(NULL, rule); 1571 break; 1572 } 1573 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1574 rule->evaluations = rule->packets = rule->bytes = 0; 1575 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1576 rule, entries); 1577 break; 1578 } 1579 1580 case DIOCGETRULES: { 1581 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1582 struct pf_ruleset *ruleset; 1583 struct pf_rule *tail; 1584 int rs_num; 1585 1586 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1587 ruleset = pf_find_ruleset(pr->anchor); 1588 if (ruleset == NULL) { 1589 error = EINVAL; 1590 break; 1591 } 1592 rs_num = pf_get_ruleset_number(pr->rule.action); 1593 if (rs_num >= PF_RULESET_MAX) { 1594 error = EINVAL; 1595 break; 1596 } 1597 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1598 pf_rulequeue); 1599 if (tail) 1600 pr->nr = tail->nr + 1; 1601 else 1602 pr->nr = 0; 1603 pr->ticket = ruleset->rules[rs_num].active.ticket; 1604 break; 1605 } 1606 1607 case DIOCGETRULE: { 1608 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1609 struct pf_ruleset *ruleset; 1610 struct pf_rule *rule; 1611 int rs_num, i; 1612 1613 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1614 ruleset = pf_find_ruleset(pr->anchor); 1615 if (ruleset == NULL) { 1616 error = EINVAL; 1617 break; 1618 } 1619 rs_num = pf_get_ruleset_number(pr->rule.action); 1620 if (rs_num >= PF_RULESET_MAX) { 1621 error = EINVAL; 1622 break; 1623 } 1624 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1625 error = EBUSY; 1626 break; 1627 } 1628 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1629 while ((rule != NULL) && (rule->nr != pr->nr)) 1630 rule = TAILQ_NEXT(rule, entries); 1631 if (rule == NULL) { 1632 error = EBUSY; 1633 break; 1634 } 1635 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1636 if (pf_anchor_copyout(ruleset, rule, pr)) { 1637 error = EBUSY; 1638 break; 1639 } 1640 pfi_dynaddr_copyout(&pr->rule.src.addr); 1641 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1642 pf_tbladdr_copyout(&pr->rule.src.addr); 1643 pf_tbladdr_copyout(&pr->rule.dst.addr); 1644 pf_rtlabel_copyout(&pr->rule.src.addr); 1645 pf_rtlabel_copyout(&pr->rule.dst.addr); 1646 for (i = 0; i < PF_SKIP_COUNT; ++i) 1647 if (rule->skip[i].ptr == NULL) 1648 pr->rule.skip[i].nr = -1; 1649 else 1650 pr->rule.skip[i].nr = 1651 rule->skip[i].ptr->nr; 1652 break; 1653 } 1654 1655 case DIOCCHANGERULE: { 1656 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1657 struct pf_ruleset *ruleset; 1658 struct pf_rule *oldrule = NULL, *newrule = NULL; 1659 u_int32_t nr = 0; 1660 int rs_num; 1661 1662 if (!(pcr->action == PF_CHANGE_REMOVE || 1663 pcr->action == PF_CHANGE_GET_TICKET) && 1664 pcr->pool_ticket != ticket_pabuf) { 1665 error = EBUSY; 1666 break; 1667 } 1668 1669 if (pcr->action < PF_CHANGE_ADD_HEAD || 1670 pcr->action > PF_CHANGE_GET_TICKET) { 1671 error = EINVAL; 1672 break; 1673 } 1674 ruleset = pf_find_ruleset(pcr->anchor); 1675 if (ruleset == NULL) { 1676 error = EINVAL; 1677 break; 1678 } 1679 rs_num = pf_get_ruleset_number(pcr->rule.action); 1680 if (rs_num >= PF_RULESET_MAX) { 1681 error = EINVAL; 1682 break; 1683 } 1684 1685 if (pcr->action == PF_CHANGE_GET_TICKET) { 1686 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1687 break; 1688 } else { 1689 if (pcr->ticket != 1690 ruleset->rules[rs_num].active.ticket) { 1691 error = EINVAL; 1692 break; 1693 } 1694 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1695 error = EINVAL; 1696 break; 1697 } 1698 } 1699 1700 if (pcr->action != PF_CHANGE_REMOVE) { 1701 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1702 if (newrule == NULL) { 1703 error = ENOMEM; 1704 break; 1705 } 1706 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1707 TAILQ_INIT(&newrule->rpool.list); 1708 /* initialize refcounting */ 1709 newrule->states = 0; 1710 newrule->entries.tqe_prev = NULL; 1711#ifndef INET 1712 if (newrule->af == AF_INET) { 1713 pool_put(&pf_rule_pl, newrule); 1714 error = EAFNOSUPPORT; 1715 break; 1716 } 1717#endif /* INET */ 1718#ifndef INET6 1719 if (newrule->af == AF_INET6) { 1720 pool_put(&pf_rule_pl, newrule); 1721 error = EAFNOSUPPORT; 1722 break; 1723 } 1724#endif /* INET6 */ 1725 if (newrule->ifname[0]) { 1726 newrule->kif = pfi_attach_rule(newrule->ifname); 1727 if (newrule->kif == NULL) { 1728 pool_put(&pf_rule_pl, newrule); 1729 error = EINVAL; 1730 break; 1731 } 1732 } else 1733 newrule->kif = NULL; 1734 1735#ifdef ALTQ 1736 /* set queue IDs */ 1737 if (newrule->qname[0] != 0) { 1738 if ((newrule->qid = 1739 pf_qname2qid(newrule->qname)) == 0) 1740 error = EBUSY; 1741 else if (newrule->pqname[0] != 0) { 1742 if ((newrule->pqid = 1743 pf_qname2qid(newrule->pqname)) == 0) 1744 error = EBUSY; 1745 } else 1746 newrule->pqid = newrule->qid; 1747 } 1748#endif /* ALTQ */ 1749 if (newrule->tagname[0]) 1750 if ((newrule->tag = 1751 pf_tagname2tag(newrule->tagname)) == 0) 1752 error = EBUSY; 1753 if (newrule->match_tagname[0]) 1754 if ((newrule->match_tag = pf_tagname2tag( 1755 newrule->match_tagname)) == 0) 1756 error = EBUSY; 1757 if (newrule->rt && !newrule->direction) 1758 error = EINVAL; 1759 if (pf_rtlabel_add(&newrule->src.addr) || 1760 pf_rtlabel_add(&newrule->dst.addr)) 1761 error = EBUSY; 1762 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1763 error = EINVAL; 1764 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1765 error = EINVAL; 1766 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1767 error = EINVAL; 1768 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1769 error = EINVAL; 1770 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1771 error = EINVAL; 1772 1773 if (newrule->overload_tblname[0]) { 1774 if ((newrule->overload_tbl = pfr_attach_table( 1775 ruleset, newrule->overload_tblname)) == 1776 NULL) 1777 error = EINVAL; 1778 else 1779 newrule->overload_tbl->pfrkt_flags |= 1780 PFR_TFLAG_ACTIVE; 1781 } 1782 1783 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1784 if (((((newrule->action == PF_NAT) || 1785 (newrule->action == PF_RDR) || 1786 (newrule->action == PF_BINAT) || 1787 (newrule->rt > PF_FASTROUTE)) && 1788 !pcr->anchor[0])) && 1789 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1790 error = EINVAL; 1791 1792 if (error) { 1793 pf_rm_rule(NULL, newrule); 1794 break; 1795 } 1796 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1797 newrule->evaluations = newrule->packets = 0; 1798 newrule->bytes = 0; 1799 } 1800 pf_empty_pool(&pf_pabuf); 1801 1802 if (pcr->action == PF_CHANGE_ADD_HEAD) 1803 oldrule = TAILQ_FIRST( 1804 ruleset->rules[rs_num].active.ptr); 1805 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1806 oldrule = TAILQ_LAST( 1807 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1808 else { 1809 oldrule = TAILQ_FIRST( 1810 ruleset->rules[rs_num].active.ptr); 1811 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1812 oldrule = TAILQ_NEXT(oldrule, entries); 1813 if (oldrule == NULL) { 1814 if (newrule != NULL) 1815 pf_rm_rule(NULL, newrule); 1816 error = EINVAL; 1817 break; 1818 } 1819 } 1820 1821 if (pcr->action == PF_CHANGE_REMOVE) 1822 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1823 else { 1824 if (oldrule == NULL) 1825 TAILQ_INSERT_TAIL( 1826 ruleset->rules[rs_num].active.ptr, 1827 newrule, entries); 1828 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1829 pcr->action == PF_CHANGE_ADD_BEFORE) 1830 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1831 else 1832 TAILQ_INSERT_AFTER( 1833 ruleset->rules[rs_num].active.ptr, 1834 oldrule, newrule, entries); 1835 } 1836 1837 nr = 0; 1838 TAILQ_FOREACH(oldrule, 1839 ruleset->rules[rs_num].active.ptr, entries) 1840 oldrule->nr = nr++; 1841 1842 ruleset->rules[rs_num].active.ticket++; 1843 1844 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1845 pf_remove_if_empty_ruleset(ruleset); 1846 1847 break; 1848 } 1849 1850 case DIOCCLRSTATES: { 1851 struct pf_state *state; 1852 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1853 int killed = 0; 1854 1855 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1856 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1857 state->u.s.kif->pfik_name)) { 1858 state->timeout = PFTM_PURGE; 1859#if NPFSYNC 1860 /* don't send out individual delete messages */ 1861 state->sync_flags = PFSTATE_NOSYNC; 1862#endif 1863 killed++; 1864 } 1865 } 1866 pf_purge_expired_states(); 1867 pf_status.states = 0; 1868 psk->psk_af = killed; 1869#if NPFSYNC 1870 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1871#endif 1872 break; 1873 } 1874 1875 case DIOCKILLSTATES: { 1876 struct pf_state *state; 1877 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1878 int killed = 0; 1879 1880 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1881 if ((!psk->psk_af || state->af == psk->psk_af) 1882 && (!psk->psk_proto || psk->psk_proto == 1883 state->proto) && 1884 PF_MATCHA(psk->psk_src.neg, 1885 &psk->psk_src.addr.v.a.addr, 1886 &psk->psk_src.addr.v.a.mask, 1887 &state->lan.addr, state->af) && 1888 PF_MATCHA(psk->psk_dst.neg, 1889 &psk->psk_dst.addr.v.a.addr, 1890 &psk->psk_dst.addr.v.a.mask, 1891 &state->ext.addr, state->af) && 1892 (psk->psk_src.port_op == 0 || 1893 pf_match_port(psk->psk_src.port_op, 1894 psk->psk_src.port[0], psk->psk_src.port[1], 1895 state->lan.port)) && 1896 (psk->psk_dst.port_op == 0 || 1897 pf_match_port(psk->psk_dst.port_op, 1898 psk->psk_dst.port[0], psk->psk_dst.port[1], 1899 state->ext.port)) && 1900 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1901 state->u.s.kif->pfik_name))) { 1902 state->timeout = PFTM_PURGE; 1903 killed++; 1904 } 1905 } 1906 pf_purge_expired_states(); 1907 psk->psk_af = killed; 1908 break; 1909 } 1910 1911 case DIOCADDSTATE: { 1912 struct pfioc_state *ps = (struct pfioc_state *)addr; 1913 struct pf_state *state; 1914 struct pfi_kif *kif; 1915 1916 if (ps->state.timeout >= PFTM_MAX && 1917 ps->state.timeout != PFTM_UNTIL_PACKET) { 1918 error = EINVAL; 1919 break; 1920 } 1921 state = pool_get(&pf_state_pl, PR_NOWAIT); 1922 if (state == NULL) { 1923 error = ENOMEM; 1924 break; 1925 } 1926 kif = pfi_lookup_create(ps->state.u.ifname); 1927 if (kif == NULL) { 1928 pool_put(&pf_state_pl, state); 1929 error = ENOENT; 1930 break; 1931 } 1932 bcopy(&ps->state, state, sizeof(struct pf_state)); 1933 bzero(&state->u, sizeof(state->u)); 1934 state->rule.ptr = &pf_default_rule; 1935 state->nat_rule.ptr = NULL; 1936 state->anchor.ptr = NULL; 1937 state->rt_kif = NULL; 1938 state->creation = time_second; 1939 state->pfsync_time = 0; 1940 state->packets[0] = state->packets[1] = 0; 1941 state->bytes[0] = state->bytes[1] = 0; 1942 1943 if (pf_insert_state(kif, state)) { 1944 pfi_maybe_destroy(kif); 1945 pool_put(&pf_state_pl, state); 1946 error = ENOMEM; 1947 } 1948 break; 1949 } 1950 1951 case DIOCGETSTATE: { 1952 struct pfioc_state *ps = (struct pfioc_state *)addr; 1953 struct pf_state *state; 1954 u_int32_t nr; 1955 1956 nr = 0; 1957 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1958 if (nr >= ps->nr) 1959 break; 1960 nr++; 1961 } 1962 if (state == NULL) { 1963 error = EBUSY; 1964 break; 1965 } 1966 bcopy(state, &ps->state, sizeof(struct pf_state)); 1967 ps->state.rule.nr = state->rule.ptr->nr; 1968 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ? 1969 -1 : state->nat_rule.ptr->nr; 1970 ps->state.anchor.nr = (state->anchor.ptr == NULL) ? 1971 -1 : state->anchor.ptr->nr; 1972 ps->state.expire = pf_state_expires(state); 1973 if (ps->state.expire > time_second) 1974 ps->state.expire -= time_second; 1975 else 1976 ps->state.expire = 0; 1977 break; 1978 } 1979 1980 case DIOCGETSTATES: { 1981 struct pfioc_states *ps = (struct pfioc_states *)addr; 1982 struct pf_state *state; 1983 struct pf_state *p, pstore; 1984 struct pfi_kif *kif; 1985 u_int32_t nr = 0; 1986 int space = ps->ps_len; 1987 1988 if (space == 0) { 1989 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 1990 nr += kif->pfik_states; 1991 ps->ps_len = sizeof(struct pf_state) * nr; 1992 break; 1993 } 1994 1995 p = ps->ps_states; 1996 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 1997 RB_FOREACH(state, pf_state_tree_ext_gwy, 1998 &kif->pfik_ext_gwy) { 1999 int secs = time_second; 2000 2001 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 2002 break; 2003 2004 bcopy(state, &pstore, sizeof(pstore)); 2005 strlcpy(pstore.u.ifname, kif->pfik_name, 2006 sizeof(pstore.u.ifname)); 2007 pstore.rule.nr = state->rule.ptr->nr; 2008 pstore.nat_rule.nr = (state->nat_rule.ptr == 2009 NULL) ? -1 : state->nat_rule.ptr->nr; 2010 pstore.anchor.nr = (state->anchor.ptr == 2011 NULL) ? -1 : state->anchor.ptr->nr; 2012 pstore.creation = secs - pstore.creation; 2013 pstore.expire = pf_state_expires(state); 2014 if (pstore.expire > secs) 2015 pstore.expire -= secs; 2016 else 2017 pstore.expire = 0; 2018#ifdef __FreeBSD__ 2019 PF_COPYOUT(&pstore, p, sizeof(*p), error); 2020#else 2021 error = copyout(&pstore, p, sizeof(*p)); 2022#endif 2023 if (error) 2024 goto fail; 2025 p++; 2026 nr++; 2027 } 2028 ps->ps_len = sizeof(struct pf_state) * nr; 2029 break; 2030 } 2031 2032 case DIOCGETSTATUS: { 2033 struct pf_status *s = (struct pf_status *)addr; 2034 bcopy(&pf_status, s, sizeof(struct pf_status)); 2035 pfi_fill_oldstatus(s); 2036 break; 2037 } 2038 2039 case DIOCSETSTATUSIF: { 2040 struct pfioc_if *pi = (struct pfioc_if *)addr; 2041 2042 if (pi->ifname[0] == 0) { 2043 bzero(pf_status.ifname, IFNAMSIZ); 2044 break; 2045 } 2046 if (ifunit(pi->ifname) == NULL) { 2047 error = EINVAL; 2048 break; 2049 } 2050 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2051 break; 2052 } 2053 2054 case DIOCCLRSTATUS: { 2055 bzero(pf_status.counters, sizeof(pf_status.counters)); 2056 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2057 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2058 if (*pf_status.ifname) 2059 pfi_clr_istats(pf_status.ifname, NULL, 2060 PFI_FLAG_INSTANCE); 2061 break; 2062 } 2063 2064 case DIOCNATLOOK: { 2065 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2066 struct pf_state *state; 2067 struct pf_state key; 2068 int m = 0, direction = pnl->direction; 2069 2070 key.af = pnl->af; 2071 key.proto = pnl->proto; 2072 2073 if (!pnl->proto || 2074 PF_AZERO(&pnl->saddr, pnl->af) || 2075 PF_AZERO(&pnl->daddr, pnl->af) || 2076 !pnl->dport || !pnl->sport) 2077 error = EINVAL; 2078 else { 2079 /* 2080 * userland gives us source and dest of connection, 2081 * reverse the lookup so we ask for what happens with 2082 * the return traffic, enabling us to find it in the 2083 * state tree. 2084 */ 2085 if (direction == PF_IN) { 2086 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2087 key.ext.port = pnl->dport; 2088 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2089 key.gwy.port = pnl->sport; 2090 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2091 } else { 2092 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2093 key.lan.port = pnl->dport; 2094 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2095 key.ext.port = pnl->sport; 2096 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2097 } 2098 if (m > 1) 2099 error = E2BIG; /* more than one state */ 2100 else if (state != NULL) { 2101 if (direction == PF_IN) { 2102 PF_ACPY(&pnl->rsaddr, &state->lan.addr, 2103 state->af); 2104 pnl->rsport = state->lan.port; 2105 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2106 pnl->af); 2107 pnl->rdport = pnl->dport; 2108 } else { 2109 PF_ACPY(&pnl->rdaddr, &state->gwy.addr, 2110 state->af); 2111 pnl->rdport = state->gwy.port; 2112 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2113 pnl->af); 2114 pnl->rsport = pnl->sport; 2115 } 2116 } else 2117 error = ENOENT; 2118 } 2119 break; 2120 } 2121 2122 case DIOCSETTIMEOUT: { 2123 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2124 int old; 2125 2126 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2127 pt->seconds < 0) { 2128 error = EINVAL; 2129 goto fail; 2130 } 2131 old = pf_default_rule.timeout[pt->timeout]; 2132 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2133 pt->seconds = old; 2134 break; 2135 } 2136 2137 case DIOCGETTIMEOUT: { 2138 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2139 2140 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2141 error = EINVAL; 2142 goto fail; 2143 } 2144 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2145 break; 2146 } 2147 2148 case DIOCGETLIMIT: { 2149 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2150 2151 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2152 error = EINVAL; 2153 goto fail; 2154 } 2155 pl->limit = pf_pool_limits[pl->index].limit; 2156 break; 2157 } 2158 2159 case DIOCSETLIMIT: { 2160 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2161 int old_limit; 2162 2163 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2164 pf_pool_limits[pl->index].pp == NULL) { 2165 error = EINVAL; 2166 goto fail; 2167 } 2168#ifdef __FreeBSD__ 2169 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit); 2170#else 2171 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2172 pl->limit, NULL, 0) != 0) { 2173 error = EBUSY; 2174 goto fail; 2175 } 2176#endif 2177 old_limit = pf_pool_limits[pl->index].limit; 2178 pf_pool_limits[pl->index].limit = pl->limit; 2179 pl->limit = old_limit; 2180 break; 2181 } 2182 2183 case DIOCSETDEBUG: { 2184 u_int32_t *level = (u_int32_t *)addr; 2185 2186 pf_status.debug = *level; 2187 break; 2188 } 2189 2190 case DIOCCLRRULECTRS: { 2191 struct pf_ruleset *ruleset = &pf_main_ruleset; 2192 struct pf_rule *rule; 2193 2194 TAILQ_FOREACH(rule, 2195 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) 2196 rule->evaluations = rule->packets = 2197 rule->bytes = 0; 2198 break; 2199 } 2200 2201#ifdef __FreeBSD__ 2202 case DIOCGIFSPEED: { 2203 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2204 struct pf_ifspeed ps; 2205 struct ifnet *ifp; 2206 2207 if (psp->ifname[0] != 0) { 2208 /* Can we completely trust user-land? */ 2209 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2210 ifp = ifunit(ps.ifname); 2211 if (ifp != NULL) 2212 psp->baudrate = ifp->if_baudrate; 2213 else 2214 error = EINVAL; 2215 } else 2216 error = EINVAL; 2217 break; 2218 } 2219#endif /* __FreeBSD__ */ 2220 2221#ifdef ALTQ 2222 case DIOCSTARTALTQ: { 2223 struct pf_altq *altq; 2224 2225 /* enable all altq interfaces on active list */ 2226 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2227 if (altq->qname[0] == 0) { 2228 error = pf_enable_altq(altq); 2229 if (error != 0) 2230 break; 2231 } 2232 } 2233 if (error == 0) 2234 pf_altq_running = 1; 2235 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2236 break; 2237 } 2238 2239 case DIOCSTOPALTQ: { 2240 struct pf_altq *altq; 2241 2242 /* disable all altq interfaces on active list */ 2243 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2244 if (altq->qname[0] == 0) { 2245 error = pf_disable_altq(altq); 2246 if (error != 0) 2247 break; 2248 } 2249 } 2250 if (error == 0) 2251 pf_altq_running = 0; 2252 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2253 break; 2254 } 2255 2256 case DIOCADDALTQ: { 2257 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2258 struct pf_altq *altq, *a; 2259 2260 if (pa->ticket != ticket_altqs_inactive) { 2261 error = EBUSY; 2262 break; 2263 } 2264 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2265 if (altq == NULL) { 2266 error = ENOMEM; 2267 break; 2268 } 2269 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2270 2271 /* 2272 * if this is for a queue, find the discipline and 2273 * copy the necessary fields 2274 */ 2275 if (altq->qname[0] != 0) { 2276 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2277 error = EBUSY; 2278 pool_put(&pf_altq_pl, altq); 2279 break; 2280 } 2281 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2282 if (strncmp(a->ifname, altq->ifname, 2283 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2284 altq->altq_disc = a->altq_disc; 2285 break; 2286 } 2287 } 2288 } 2289 2290#ifdef __FreeBSD__ 2291 PF_UNLOCK(); 2292#endif 2293 error = altq_add(altq); 2294#ifdef __FreeBSD__ 2295 PF_LOCK(); 2296#endif 2297 if (error) { 2298 pool_put(&pf_altq_pl, altq); 2299 break; 2300 } 2301 2302 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2303 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2304 break; 2305 } 2306 2307 case DIOCGETALTQS: { 2308 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2309 struct pf_altq *altq; 2310 2311 pa->nr = 0; 2312 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2313 pa->nr++; 2314 pa->ticket = ticket_altqs_active; 2315 break; 2316 } 2317 2318 case DIOCGETALTQ: { 2319 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2320 struct pf_altq *altq; 2321 u_int32_t nr; 2322 2323 if (pa->ticket != ticket_altqs_active) { 2324 error = EBUSY; 2325 break; 2326 } 2327 nr = 0; 2328 altq = TAILQ_FIRST(pf_altqs_active); 2329 while ((altq != NULL) && (nr < pa->nr)) { 2330 altq = TAILQ_NEXT(altq, entries); 2331 nr++; 2332 } 2333 if (altq == NULL) { 2334 error = EBUSY; 2335 break; 2336 } 2337 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2338 break; 2339 } 2340 2341 case DIOCCHANGEALTQ: 2342 /* CHANGEALTQ not supported yet! */ 2343 error = ENODEV; 2344 break; 2345 2346 case DIOCGETQSTATS: { 2347 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2348 struct pf_altq *altq; 2349 u_int32_t nr; 2350 int nbytes; 2351 2352 if (pq->ticket != ticket_altqs_active) { 2353 error = EBUSY; 2354 break; 2355 } 2356 nbytes = pq->nbytes; 2357 nr = 0; 2358 altq = TAILQ_FIRST(pf_altqs_active); 2359 while ((altq != NULL) && (nr < pq->nr)) { 2360 altq = TAILQ_NEXT(altq, entries); 2361 nr++; 2362 } 2363 if (altq == NULL) { 2364 error = EBUSY; 2365 break; 2366 } 2367#ifdef __FreeBSD__ 2368 PF_UNLOCK(); 2369#endif 2370 error = altq_getqstats(altq, pq->buf, &nbytes); 2371#ifdef __FreeBSD__ 2372 PF_LOCK(); 2373#endif 2374 if (error == 0) { 2375 pq->scheduler = altq->scheduler; 2376 pq->nbytes = nbytes; 2377 } 2378 break; 2379 } 2380#endif /* ALTQ */ 2381 2382 case DIOCBEGINADDRS: { 2383 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2384 2385 pf_empty_pool(&pf_pabuf); 2386 pp->ticket = ++ticket_pabuf; 2387 break; 2388 } 2389 2390 case DIOCADDADDR: { 2391 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2392 2393#ifndef INET 2394 if (pp->af == AF_INET) { 2395 error = EAFNOSUPPORT; 2396 break; 2397 } 2398#endif /* INET */ 2399#ifndef INET6 2400 if (pp->af == AF_INET6) { 2401 error = EAFNOSUPPORT; 2402 break; 2403 } 2404#endif /* INET6 */ 2405 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2406 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2407 pp->addr.addr.type != PF_ADDR_TABLE) { 2408 error = EINVAL; 2409 break; 2410 } 2411 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2412 if (pa == NULL) { 2413 error = ENOMEM; 2414 break; 2415 } 2416 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2417 if (pa->ifname[0]) { 2418 pa->kif = pfi_attach_rule(pa->ifname); 2419 if (pa->kif == NULL) { 2420 pool_put(&pf_pooladdr_pl, pa); 2421 error = EINVAL; 2422 break; 2423 } 2424 } 2425 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2426 pfi_dynaddr_remove(&pa->addr); 2427 pfi_detach_rule(pa->kif); 2428 pool_put(&pf_pooladdr_pl, pa); 2429 error = EINVAL; 2430 break; 2431 } 2432 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2433 break; 2434 } 2435 2436 case DIOCGETADDRS: { 2437 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2438 2439 pp->nr = 0; 2440 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2441 pp->r_num, 0, 1, 0); 2442 if (pool == NULL) { 2443 error = EBUSY; 2444 break; 2445 } 2446 TAILQ_FOREACH(pa, &pool->list, entries) 2447 pp->nr++; 2448 break; 2449 } 2450 2451 case DIOCGETADDR: { 2452 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2453 u_int32_t nr = 0; 2454 2455 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2456 pp->r_num, 0, 1, 1); 2457 if (pool == NULL) { 2458 error = EBUSY; 2459 break; 2460 } 2461 pa = TAILQ_FIRST(&pool->list); 2462 while ((pa != NULL) && (nr < pp->nr)) { 2463 pa = TAILQ_NEXT(pa, entries); 2464 nr++; 2465 } 2466 if (pa == NULL) { 2467 error = EBUSY; 2468 break; 2469 } 2470 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2471 pfi_dynaddr_copyout(&pp->addr.addr); 2472 pf_tbladdr_copyout(&pp->addr.addr); 2473 pf_rtlabel_copyout(&pp->addr.addr); 2474 break; 2475 } 2476 2477 case DIOCCHANGEADDR: { 2478 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2479 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2480 struct pf_ruleset *ruleset; 2481 2482 if (pca->action < PF_CHANGE_ADD_HEAD || 2483 pca->action > PF_CHANGE_REMOVE) { 2484 error = EINVAL; 2485 break; 2486 } 2487 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2488 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2489 pca->addr.addr.type != PF_ADDR_TABLE) { 2490 error = EINVAL; 2491 break; 2492 } 2493 2494 ruleset = pf_find_ruleset(pca->anchor); 2495 if (ruleset == NULL) { 2496 error = EBUSY; 2497 break; 2498 } 2499 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2500 pca->r_num, pca->r_last, 1, 1); 2501 if (pool == NULL) { 2502 error = EBUSY; 2503 break; 2504 } 2505 if (pca->action != PF_CHANGE_REMOVE) { 2506 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2507 if (newpa == NULL) { 2508 error = ENOMEM; 2509 break; 2510 } 2511 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2512#ifndef INET 2513 if (pca->af == AF_INET) { 2514 pool_put(&pf_pooladdr_pl, newpa); 2515 error = EAFNOSUPPORT; 2516 break; 2517 } 2518#endif /* INET */ 2519#ifndef INET6 2520 if (pca->af == AF_INET6) { 2521 pool_put(&pf_pooladdr_pl, newpa); 2522 error = EAFNOSUPPORT; 2523 break; 2524 } 2525#endif /* INET6 */ 2526 if (newpa->ifname[0]) { 2527 newpa->kif = pfi_attach_rule(newpa->ifname); 2528 if (newpa->kif == NULL) { 2529 pool_put(&pf_pooladdr_pl, newpa); 2530 error = EINVAL; 2531 break; 2532 } 2533 } else 2534 newpa->kif = NULL; 2535 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2536 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2537 pfi_dynaddr_remove(&newpa->addr); 2538 pfi_detach_rule(newpa->kif); 2539 pool_put(&pf_pooladdr_pl, newpa); 2540 error = EINVAL; 2541 break; 2542 } 2543 } 2544 2545 if (pca->action == PF_CHANGE_ADD_HEAD) 2546 oldpa = TAILQ_FIRST(&pool->list); 2547 else if (pca->action == PF_CHANGE_ADD_TAIL) 2548 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2549 else { 2550 int i = 0; 2551 2552 oldpa = TAILQ_FIRST(&pool->list); 2553 while ((oldpa != NULL) && (i < pca->nr)) { 2554 oldpa = TAILQ_NEXT(oldpa, entries); 2555 i++; 2556 } 2557 if (oldpa == NULL) { 2558 error = EINVAL; 2559 break; 2560 } 2561 } 2562 2563 if (pca->action == PF_CHANGE_REMOVE) { 2564 TAILQ_REMOVE(&pool->list, oldpa, entries); 2565 pfi_dynaddr_remove(&oldpa->addr); 2566 pf_tbladdr_remove(&oldpa->addr); 2567 pfi_detach_rule(oldpa->kif); 2568 pool_put(&pf_pooladdr_pl, oldpa); 2569 } else { 2570 if (oldpa == NULL) 2571 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2572 else if (pca->action == PF_CHANGE_ADD_HEAD || 2573 pca->action == PF_CHANGE_ADD_BEFORE) 2574 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2575 else 2576 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2577 newpa, entries); 2578 } 2579 2580 pool->cur = TAILQ_FIRST(&pool->list); 2581 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2582 pca->af); 2583 break; 2584 } 2585 2586 case DIOCGETRULESETS: { 2587 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2588 struct pf_ruleset *ruleset; 2589 struct pf_anchor *anchor; 2590 2591 pr->path[sizeof(pr->path) - 1] = 0; 2592 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2593 error = EINVAL; 2594 break; 2595 } 2596 pr->nr = 0; 2597 if (ruleset->anchor == NULL) { 2598 /* XXX kludge for pf_main_ruleset */ 2599 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2600 if (anchor->parent == NULL) 2601 pr->nr++; 2602 } else { 2603 RB_FOREACH(anchor, pf_anchor_node, 2604 &ruleset->anchor->children) 2605 pr->nr++; 2606 } 2607 break; 2608 } 2609 2610 case DIOCGETRULESET: { 2611 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2612 struct pf_ruleset *ruleset; 2613 struct pf_anchor *anchor; 2614 u_int32_t nr = 0; 2615 2616 pr->path[sizeof(pr->path) - 1] = 0; 2617 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2618 error = EINVAL; 2619 break; 2620 } 2621 pr->name[0] = 0; 2622 if (ruleset->anchor == NULL) { 2623 /* XXX kludge for pf_main_ruleset */ 2624 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2625 if (anchor->parent == NULL && nr++ == pr->nr) { 2626 strlcpy(pr->name, anchor->name, 2627 sizeof(pr->name)); 2628 break; 2629 } 2630 } else { 2631 RB_FOREACH(anchor, pf_anchor_node, 2632 &ruleset->anchor->children) 2633 if (nr++ == pr->nr) { 2634 strlcpy(pr->name, anchor->name, 2635 sizeof(pr->name)); 2636 break; 2637 } 2638 } 2639 if (!pr->name[0]) 2640 error = EBUSY; 2641 break; 2642 } 2643 2644 case DIOCRCLRTABLES: { 2645 struct pfioc_table *io = (struct pfioc_table *)addr; 2646 2647 if (io->pfrio_esize != 0) { 2648 error = ENODEV; 2649 break; 2650 } 2651 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2652 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2653 break; 2654 } 2655 2656 case DIOCRADDTABLES: { 2657 struct pfioc_table *io = (struct pfioc_table *)addr; 2658 2659 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2660 error = ENODEV; 2661 break; 2662 } 2663 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2664 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2665 break; 2666 } 2667 2668 case DIOCRDELTABLES: { 2669 struct pfioc_table *io = (struct pfioc_table *)addr; 2670 2671 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2672 error = ENODEV; 2673 break; 2674 } 2675 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2676 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2677 break; 2678 } 2679 2680 case DIOCRGETTABLES: { 2681 struct pfioc_table *io = (struct pfioc_table *)addr; 2682 2683 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2684 error = ENODEV; 2685 break; 2686 } 2687 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2688 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2689 break; 2690 } 2691 2692 case DIOCRGETTSTATS: { 2693 struct pfioc_table *io = (struct pfioc_table *)addr; 2694 2695 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2696 error = ENODEV; 2697 break; 2698 } 2699 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2700 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2701 break; 2702 } 2703 2704 case DIOCRCLRTSTATS: { 2705 struct pfioc_table *io = (struct pfioc_table *)addr; 2706 2707 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2708 error = ENODEV; 2709 break; 2710 } 2711 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2712 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2713 break; 2714 } 2715 2716 case DIOCRSETTFLAGS: { 2717 struct pfioc_table *io = (struct pfioc_table *)addr; 2718 2719 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2720 error = ENODEV; 2721 break; 2722 } 2723 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2724 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2725 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2726 break; 2727 } 2728 2729 case DIOCRCLRADDRS: { 2730 struct pfioc_table *io = (struct pfioc_table *)addr; 2731 2732 if (io->pfrio_esize != 0) { 2733 error = ENODEV; 2734 break; 2735 } 2736 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2737 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2738 break; 2739 } 2740 2741 case DIOCRADDADDRS: { 2742 struct pfioc_table *io = (struct pfioc_table *)addr; 2743 2744 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2745 error = ENODEV; 2746 break; 2747 } 2748 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2749 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2750 PFR_FLAG_USERIOCTL); 2751 break; 2752 } 2753 2754 case DIOCRDELADDRS: { 2755 struct pfioc_table *io = (struct pfioc_table *)addr; 2756 2757 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2758 error = ENODEV; 2759 break; 2760 } 2761 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2762 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2763 PFR_FLAG_USERIOCTL); 2764 break; 2765 } 2766 2767 case DIOCRSETADDRS: { 2768 struct pfioc_table *io = (struct pfioc_table *)addr; 2769 2770 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2771 error = ENODEV; 2772 break; 2773 } 2774 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2775 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2776 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2777 PFR_FLAG_USERIOCTL); 2778 break; 2779 } 2780 2781 case DIOCRGETADDRS: { 2782 struct pfioc_table *io = (struct pfioc_table *)addr; 2783 2784 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2785 error = ENODEV; 2786 break; 2787 } 2788 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2789 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2790 break; 2791 } 2792 2793 case DIOCRGETASTATS: { 2794 struct pfioc_table *io = (struct pfioc_table *)addr; 2795 2796 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2797 error = ENODEV; 2798 break; 2799 } 2800 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2801 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2802 break; 2803 } 2804 2805 case DIOCRCLRASTATS: { 2806 struct pfioc_table *io = (struct pfioc_table *)addr; 2807 2808 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2809 error = ENODEV; 2810 break; 2811 } 2812 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2813 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2814 PFR_FLAG_USERIOCTL); 2815 break; 2816 } 2817 2818 case DIOCRTSTADDRS: { 2819 struct pfioc_table *io = (struct pfioc_table *)addr; 2820 2821 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2822 error = ENODEV; 2823 break; 2824 } 2825 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2826 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2827 PFR_FLAG_USERIOCTL); 2828 break; 2829 } 2830 2831 case DIOCRINADEFINE: { 2832 struct pfioc_table *io = (struct pfioc_table *)addr; 2833 2834 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2835 error = ENODEV; 2836 break; 2837 } 2838 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2839 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2840 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2841 break; 2842 } 2843 2844 case DIOCOSFPADD: { 2845 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2846 error = pf_osfp_add(io); 2847 break; 2848 } 2849 2850 case DIOCOSFPGET: { 2851 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2852 error = pf_osfp_get(io); 2853 break; 2854 } 2855 2856 case DIOCXBEGIN: { 2857 struct pfioc_trans *io = (struct pfioc_trans *) 2858 addr; 2859 static struct pfioc_trans_e ioe; 2860 static struct pfr_table table; 2861 int i; 2862 2863 if (io->esize != sizeof(ioe)) { 2864 error = ENODEV; 2865 goto fail; 2866 } 2867 for (i = 0; i < io->size; i++) { 2868#ifdef __FreeBSD__ 2869 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2870 if (error) { 2871#else 2872 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2873#endif 2874 error = EFAULT; 2875 goto fail; 2876 } 2877 switch (ioe.rs_num) { 2878#ifdef ALTQ 2879 case PF_RULESET_ALTQ: 2880 if (ioe.anchor[0]) { 2881 error = EINVAL; 2882 goto fail; 2883 } 2884 if ((error = pf_begin_altq(&ioe.ticket))) 2885 goto fail; 2886 break; 2887#endif /* ALTQ */ 2888 case PF_RULESET_TABLE: 2889 bzero(&table, sizeof(table)); 2890 strlcpy(table.pfrt_anchor, ioe.anchor, 2891 sizeof(table.pfrt_anchor)); 2892 if ((error = pfr_ina_begin(&table, 2893 &ioe.ticket, NULL, 0))) 2894 goto fail; 2895 break; 2896 default: 2897 if ((error = pf_begin_rules(&ioe.ticket, 2898 ioe.rs_num, ioe.anchor))) 2899 goto fail; 2900 break; 2901 } 2902#ifdef __FreeBSD__ 2903 PF_COPYOUT(&ioe, io->array+i, sizeof(io->array[i]), 2904 error); 2905 if (error) { 2906#else 2907 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) { 2908#endif 2909 error = EFAULT; 2910 goto fail; 2911 } 2912 } 2913 break; 2914 } 2915 2916 case DIOCXROLLBACK: { 2917 struct pfioc_trans *io = (struct pfioc_trans *) 2918 addr; 2919 static struct pfioc_trans_e ioe; 2920 static struct pfr_table table; 2921 int i; 2922 2923 if (io->esize != sizeof(ioe)) { 2924 error = ENODEV; 2925 goto fail; 2926 } 2927 for (i = 0; i < io->size; i++) { 2928#ifdef __FreeBSD__ 2929 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2930 if (error) { 2931#else 2932 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2933#endif 2934 error = EFAULT; 2935 goto fail; 2936 } 2937 switch (ioe.rs_num) { 2938#ifdef ALTQ 2939 case PF_RULESET_ALTQ: 2940 if (ioe.anchor[0]) { 2941 error = EINVAL; 2942 goto fail; 2943 } 2944 if ((error = pf_rollback_altq(ioe.ticket))) 2945 goto fail; /* really bad */ 2946 break; 2947#endif /* ALTQ */ 2948 case PF_RULESET_TABLE: 2949 bzero(&table, sizeof(table)); 2950 strlcpy(table.pfrt_anchor, ioe.anchor, 2951 sizeof(table.pfrt_anchor)); 2952 if ((error = pfr_ina_rollback(&table, 2953 ioe.ticket, NULL, 0))) 2954 goto fail; /* really bad */ 2955 break; 2956 default: 2957 if ((error = pf_rollback_rules(ioe.ticket, 2958 ioe.rs_num, ioe.anchor))) 2959 goto fail; /* really bad */ 2960 break; 2961 } 2962 } 2963 break; 2964 } 2965 2966 case DIOCXCOMMIT: { 2967 struct pfioc_trans *io = (struct pfioc_trans *) 2968 addr; 2969 static struct pfioc_trans_e ioe; 2970 static struct pfr_table table; 2971 struct pf_ruleset *rs; 2972 int i; 2973 2974 if (io->esize != sizeof(ioe)) { 2975 error = ENODEV; 2976 goto fail; 2977 } 2978 /* first makes sure everything will succeed */ 2979 for (i = 0; i < io->size; i++) { 2980#ifdef __FreeBSD__ 2981 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2982 if (error) { 2983#else 2984 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2985#endif 2986 error = EFAULT; 2987 goto fail; 2988 } 2989 switch (ioe.rs_num) { 2990#ifdef ALTQ 2991 case PF_RULESET_ALTQ: 2992 if (ioe.anchor[0]) { 2993 error = EINVAL; 2994 goto fail; 2995 } 2996 if (!altqs_inactive_open || ioe.ticket != 2997 ticket_altqs_inactive) { 2998 error = EBUSY; 2999 goto fail; 3000 } 3001 break; 3002#endif /* ALTQ */ 3003 case PF_RULESET_TABLE: 3004 rs = pf_find_ruleset(ioe.anchor); 3005 if (rs == NULL || !rs->topen || ioe.ticket != 3006 rs->tticket) { 3007 error = EBUSY; 3008 goto fail; 3009 } 3010 break; 3011 default: 3012 if (ioe.rs_num < 0 || ioe.rs_num >= 3013 PF_RULESET_MAX) { 3014 error = EINVAL; 3015 goto fail; 3016 } 3017 rs = pf_find_ruleset(ioe.anchor); 3018 if (rs == NULL || 3019 !rs->rules[ioe.rs_num].inactive.open || 3020 rs->rules[ioe.rs_num].inactive.ticket != 3021 ioe.ticket) { 3022 error = EBUSY; 3023 goto fail; 3024 } 3025 break; 3026 } 3027 } 3028 /* now do the commit - no errors should happen here */ 3029 for (i = 0; i < io->size; i++) { 3030#ifdef __FreeBSD__ 3031 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 3032 if (error) { 3033#else 3034 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 3035#endif 3036 error = EFAULT; 3037 goto fail; 3038 } 3039 switch (ioe.rs_num) { 3040#ifdef ALTQ 3041 case PF_RULESET_ALTQ: 3042 if ((error = pf_commit_altq(ioe.ticket))) 3043 goto fail; /* really bad */ 3044 break; 3045#endif /* ALTQ */ 3046 case PF_RULESET_TABLE: 3047 bzero(&table, sizeof(table)); 3048 strlcpy(table.pfrt_anchor, ioe.anchor, 3049 sizeof(table.pfrt_anchor)); 3050 if ((error = pfr_ina_commit(&table, ioe.ticket, 3051 NULL, NULL, 0))) 3052 goto fail; /* really bad */ 3053 break; 3054 default: 3055 if ((error = pf_commit_rules(ioe.ticket, 3056 ioe.rs_num, ioe.anchor))) 3057 goto fail; /* really bad */ 3058 break; 3059 } 3060 } 3061 break; 3062 } 3063 3064 case DIOCGETSRCNODES: { 3065 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3066 struct pf_src_node *n; 3067 struct pf_src_node *p, pstore; 3068 u_int32_t nr = 0; 3069 int space = psn->psn_len; 3070 3071 if (space == 0) { 3072 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3073 nr++; 3074 psn->psn_len = sizeof(struct pf_src_node) * nr; 3075 break; 3076 } 3077 3078 p = psn->psn_src_nodes; 3079 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3080 int secs = time_second, diff; 3081 3082 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3083 break; 3084 3085 bcopy(n, &pstore, sizeof(pstore)); 3086 if (n->rule.ptr != NULL) 3087 pstore.rule.nr = n->rule.ptr->nr; 3088 pstore.creation = secs - pstore.creation; 3089 if (pstore.expire > secs) 3090 pstore.expire -= secs; 3091 else 3092 pstore.expire = 0; 3093 3094 /* adjust the connection rate estimate */ 3095 diff = secs - n->conn_rate.last; 3096 if (diff >= n->conn_rate.seconds) 3097 pstore.conn_rate.count = 0; 3098 else 3099 pstore.conn_rate.count -= 3100 n->conn_rate.count * diff / 3101 n->conn_rate.seconds; 3102 3103#ifdef __FreeBSD__ 3104 PF_COPYOUT(&pstore, p, sizeof(*p), error); 3105#else 3106 error = copyout(&pstore, p, sizeof(*p)); 3107#endif 3108 if (error) 3109 goto fail; 3110 p++; 3111 nr++; 3112 } 3113 psn->psn_len = sizeof(struct pf_src_node) * nr; 3114 break; 3115 } 3116 3117 case DIOCCLRSRCNODES: { 3118 struct pf_src_node *n; 3119 struct pf_state *state; 3120 3121 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3122 state->src_node = NULL; 3123 state->nat_src_node = NULL; 3124 } 3125 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3126 n->expire = 1; 3127 n->states = 0; 3128 } 3129 pf_purge_expired_src_nodes(); 3130 pf_status.src_nodes = 0; 3131 break; 3132 } 3133 3134 case DIOCSETHOSTID: { 3135 u_int32_t *hostid = (u_int32_t *)addr; 3136 3137 if (*hostid == 0) 3138 pf_status.hostid = arc4random(); 3139 else 3140 pf_status.hostid = *hostid; 3141 break; 3142 } 3143 3144 case DIOCOSFPFLUSH: 3145 pf_osfp_flush(); 3146 break; 3147 3148 case DIOCIGETIFACES: { 3149 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3150 3151 if (io->pfiio_esize != sizeof(struct pfi_if)) { 3152 error = ENODEV; 3153 break; 3154 } 3155 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3156 &io->pfiio_size, io->pfiio_flags); 3157 break; 3158 } 3159 3160 case DIOCICLRISTATS: { 3161 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3162 3163 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero, 3164 io->pfiio_flags); 3165 break; 3166 } 3167 3168 case DIOCSETIFFLAG: { 3169 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3170 3171 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3172 break; 3173 } 3174 3175 case DIOCCLRIFFLAG: { 3176 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3177 3178 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3179 break; 3180 } 3181 3182 default: 3183 error = ENODEV; 3184 break; 3185 } 3186fail: 3187#ifdef __FreeBSD__ 3188 PF_UNLOCK(); 3189#else 3190 splx(s); 3191#endif 3192 return (error); 3193} 3194 3195#ifdef __FreeBSD__ 3196/* 3197 * XXX - Check for version missmatch!!! 3198 */ 3199static void 3200pf_clear_states(void) 3201{ 3202 struct pf_state *state; 3203 3204 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3205 state->timeout = PFTM_PURGE; 3206#if NPFSYNC 3207 /* don't send out individual delete messages */ 3208 state->sync_flags = PFSTATE_NOSYNC; 3209#endif 3210 } 3211 pf_purge_expired_states(); 3212 pf_status.states = 0; 3213#if 0 /* NPFSYNC */ 3214/* 3215 * XXX This is called on module unload, we do not want to sync that over? */ 3216 */ 3217 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 3218#endif 3219} 3220 3221static int 3222pf_clear_tables(void) 3223{ 3224 struct pfioc_table io; 3225 int error; 3226 3227 bzero(&io, sizeof(io)); 3228 3229 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3230 io.pfrio_flags); 3231 3232 return (error); 3233} 3234 3235static void 3236pf_clear_srcnodes(void) 3237{ 3238 struct pf_src_node *n; 3239 struct pf_state *state; 3240 3241 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3242 state->src_node = NULL; 3243 state->nat_src_node = NULL; 3244 } 3245 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3246 n->expire = 1; 3247 n->states = 0; 3248 } 3249 pf_purge_expired_src_nodes(); 3250 pf_status.src_nodes = 0; 3251} 3252/* 3253 * XXX - Check for version missmatch!!! 3254 */ 3255 3256/* 3257 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3258 */ 3259static int 3260shutdown_pf(void) 3261{ 3262 int error = 0; 3263 u_int32_t t[5]; 3264 char nn = '\0'; 3265 3266 callout_stop(&pf_expire_to); 3267 3268 pf_status.running = 0; 3269 do { 3270 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 3271 != 0) { 3272 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3273 break; 3274 } 3275 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 3276 != 0) { 3277 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3278 break; /* XXX: rollback? */ 3279 } 3280 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 3281 != 0) { 3282 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3283 break; /* XXX: rollback? */ 3284 } 3285 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3286 != 0) { 3287 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3288 break; /* XXX: rollback? */ 3289 } 3290 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3291 != 0) { 3292 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3293 break; /* XXX: rollback? */ 3294 } 3295 3296 /* XXX: these should always succeed here */ 3297 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3298 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3299 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3300 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3301 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3302 3303 if ((error = pf_clear_tables()) != 0) 3304 break; 3305 3306#ifdef ALTQ 3307 if ((error = pf_begin_altq(&t[0])) != 0) { 3308 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3309 break; 3310 } 3311 pf_commit_altq(t[0]); 3312#endif 3313 3314 pf_clear_states(); 3315 3316 pf_clear_srcnodes(); 3317 3318 /* status does not use malloced mem so no need to cleanup */ 3319 /* fingerprints and interfaces have thier own cleanup code */ 3320 } while(0); 3321 3322 return (error); 3323} 3324 3325static int 3326pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3327 struct inpcb *inp) 3328{ 3329 /* 3330 * XXX Wed Jul 9 22:03:16 2003 UTC 3331 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3332 * in network stack. OpenBSD's network stack have converted 3333 * ip_len/ip_off to host byte order frist as FreeBSD. 3334 * Now this is not true anymore , so we should convert back to network 3335 * byte order. 3336 */ 3337 struct ip *h = NULL; 3338 int chk; 3339 3340 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) { 3341 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3342 h = mtod(*m, struct ip *); 3343 HTONS(h->ip_len); 3344 HTONS(h->ip_off); 3345 } 3346 chk = pf_test(PF_IN, ifp, m, NULL, inp); 3347 if (chk && *m) { 3348 m_freem(*m); 3349 *m = NULL; 3350 } 3351 if (*m != NULL) { 3352 /* pf_test can change ip header location */ 3353 h = mtod(*m, struct ip *); 3354 NTOHS(h->ip_len); 3355 NTOHS(h->ip_off); 3356 } 3357 return chk; 3358} 3359 3360static int 3361pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3362 struct inpcb *inp) 3363{ 3364 /* 3365 * XXX Wed Jul 9 22:03:16 2003 UTC 3366 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3367 * in network stack. OpenBSD's network stack have converted 3368 * ip_len/ip_off to host byte order frist as FreeBSD. 3369 * Now this is not true anymore , so we should convert back to network 3370 * byte order. 3371 */ 3372 struct ip *h = NULL; 3373 int chk; 3374 3375 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3376 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3377 in_delayed_cksum(*m); 3378 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3379 } 3380 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) { 3381 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3382 h = mtod(*m, struct ip *); 3383 HTONS(h->ip_len); 3384 HTONS(h->ip_off); 3385 } 3386 chk = pf_test(PF_OUT, ifp, m, NULL, inp); 3387 if (chk && *m) { 3388 m_freem(*m); 3389 *m = NULL; 3390 } 3391 if (*m != NULL) { 3392 /* pf_test can change ip header location */ 3393 h = mtod(*m, struct ip *); 3394 NTOHS(h->ip_len); 3395 NTOHS(h->ip_off); 3396 } 3397 return chk; 3398} 3399 3400#ifdef INET6 3401static int 3402pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3403 struct inpcb *inp) 3404{ 3405 /* 3406 * IPv6 does not affected ip_len/ip_off byte order changes. 3407 */ 3408 int chk; 3409 3410 chk = pf_test6(PF_IN, ifp, m, NULL, inp); 3411 if (chk && *m) { 3412 m_freem(*m); 3413 *m = NULL; 3414 } 3415 return chk; 3416} 3417 3418static int 3419pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3420 struct inpcb *inp) 3421{ 3422 /* 3423 * IPv6 does not affected ip_len/ip_off byte order changes. 3424 */ 3425 int chk; 3426 3427 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3428 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3429 in_delayed_cksum(*m); 3430 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3431 } 3432 chk = pf_test6(PF_OUT, ifp, m, NULL, inp); 3433 if (chk && *m) { 3434 m_freem(*m); 3435 *m = NULL; 3436 } 3437 return chk; 3438} 3439#endif /* INET6 */ 3440 3441static int 3442hook_pf(void) 3443{ 3444 struct pfil_head *pfh_inet; 3445#ifdef INET6 3446 struct pfil_head *pfh_inet6; 3447#endif 3448 3449 PF_ASSERT(MA_NOTOWNED); 3450 3451 if (pf_pfil_hooked) 3452 return (0); 3453 3454 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3455 if (pfh_inet == NULL) 3456 return (ESRCH); /* XXX */ 3457 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3458 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3459#ifdef INET6 3460 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3461 if (pfh_inet6 == NULL) { 3462 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3463 pfh_inet); 3464 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3465 pfh_inet); 3466 return (ESRCH); /* XXX */ 3467 } 3468 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3469 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3470#endif 3471 3472 pf_pfil_hooked = 1; 3473 return (0); 3474} 3475 3476static int 3477dehook_pf(void) 3478{ 3479 struct pfil_head *pfh_inet; 3480#ifdef INET6 3481 struct pfil_head *pfh_inet6; 3482#endif 3483 3484 PF_ASSERT(MA_NOTOWNED); 3485 3486 if (pf_pfil_hooked == 0) 3487 return (0); 3488 3489 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3490 if (pfh_inet == NULL) 3491 return (ESRCH); /* XXX */ 3492 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3493 pfh_inet); 3494 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3495 pfh_inet); 3496#ifdef INET6 3497 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3498 if (pfh_inet6 == NULL) 3499 return (ESRCH); /* XXX */ 3500 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3501 pfh_inet6); 3502 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3503 pfh_inet6); 3504#endif 3505 3506 pf_pfil_hooked = 0; 3507 return (0); 3508} 3509 3510static int 3511pf_load(void) 3512{ 3513 init_zone_var(); 3514 init_pf_mutex(); 3515 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3516 if (pfattach() < 0) { 3517 destroy_dev(pf_dev); 3518 destroy_pf_mutex(); 3519 return (ENOMEM); 3520 } 3521 return (0); 3522} 3523 3524static int 3525pf_unload(void) 3526{ 3527 int error = 0; 3528 3529 PF_LOCK(); 3530 pf_status.running = 0; 3531 PF_UNLOCK(); 3532 error = dehook_pf(); 3533 if (error) { 3534 /* 3535 * Should not happen! 3536 * XXX Due to error code ESRCH, kldunload will show 3537 * a message like 'No such process'. 3538 */ 3539 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 3540 return error; 3541 } 3542 PF_LOCK(); 3543 shutdown_pf(); 3544 pfi_cleanup(); 3545 pf_osfp_flush(); 3546 pf_osfp_cleanup(); 3547 cleanup_pf_zone(); 3548 PF_UNLOCK(); 3549 destroy_dev(pf_dev); 3550 destroy_pf_mutex(); 3551 return error; 3552} 3553 3554static int 3555pf_modevent(module_t mod, int type, void *data) 3556{ 3557 int error = 0; 3558 3559 switch(type) { 3560 case MOD_LOAD: 3561 error = pf_load(); 3562 break; 3563 3564 case MOD_UNLOAD: 3565 error = pf_unload(); 3566 break; 3567 default: 3568 error = EINVAL; 3569 break; 3570 } 3571 return error; 3572} 3573 3574static moduledata_t pf_mod = { 3575 "pf", 3576 pf_modevent, 3577 0 3578}; 3579 3580DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST); 3581MODULE_VERSION(pf, PF_MODVER); 3582#endif /* __FreeBSD__ */ 3583