pf_ioctl.c revision 149884
1/* $FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 149884 2005-09-08 15:06:52Z mlaier $ */ 2/* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */ 3 4/* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39#ifdef __FreeBSD__ 40#include "opt_inet.h" 41#include "opt_inet6.h" 42#endif 43 44#ifdef __FreeBSD__ 45#include "opt_bpf.h" 46#include "opt_pf.h" 47#define NBPFILTER DEV_BPF 48#define NPFLOG DEV_PFLOG 49#define NPFSYNC DEV_PFSYNC 50#else 51#include "bpfilter.h" 52#include "pflog.h" 53#include "pfsync.h" 54#endif 55 56#include <sys/param.h> 57#include <sys/systm.h> 58#include <sys/mbuf.h> 59#include <sys/filio.h> 60#include <sys/fcntl.h> 61#include <sys/socket.h> 62#include <sys/socketvar.h> 63#include <sys/kernel.h> 64#include <sys/time.h> 65#include <sys/malloc.h> 66#ifdef __FreeBSD__ 67#include <sys/module.h> 68#include <sys/conf.h> 69#include <sys/proc.h> 70#else 71#include <sys/timeout.h> 72#include <sys/pool.h> 73#endif 74 75#include <net/if.h> 76#include <net/if_types.h> 77#include <net/route.h> 78 79#include <netinet/in.h> 80#include <netinet/in_var.h> 81#include <netinet/in_systm.h> 82#include <netinet/ip.h> 83#include <netinet/ip_var.h> 84#include <netinet/ip_icmp.h> 85 86#ifndef __FreeBSD__ 87#include <dev/rndvar.h> 88#endif 89#include <net/pfvar.h> 90 91#if NPFSYNC > 0 92#include <net/if_pfsync.h> 93#endif /* NPFSYNC > 0 */ 94 95#ifdef INET6 96#include <netinet/ip6.h> 97#include <netinet/in_pcb.h> 98#endif /* INET6 */ 99 100#ifdef ALTQ 101#include <altq/altq.h> 102#endif 103 104#ifdef __FreeBSD__ 105#include <sys/limits.h> 106#include <sys/lock.h> 107#include <sys/mutex.h> 108#include <net/pfil.h> 109#endif /* __FreeBSD__ */ 110 111#ifdef __FreeBSD__ 112void init_zone_var(void); 113void cleanup_pf_zone(void); 114int pfattach(void); 115#else 116void pfattach(int); 117int pfopen(dev_t, int, int, struct proc *); 118int pfclose(dev_t, int, int, struct proc *); 119#endif 120struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 121 u_int8_t, u_int8_t, u_int8_t); 122int pf_get_ruleset_number(u_int8_t); 123void pf_init_ruleset(struct pf_ruleset *); 124int pf_anchor_setup(struct pf_rule *, 125 const struct pf_ruleset *, const char *); 126int pf_anchor_copyout(const struct pf_ruleset *, 127 const struct pf_rule *, struct pfioc_rule *); 128void pf_anchor_remove(struct pf_rule *); 129 130void pf_mv_pool(struct pf_palist *, struct pf_palist *); 131void pf_empty_pool(struct pf_palist *); 132#ifdef __FreeBSD__ 133int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 134#else 135int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *); 136#endif 137#ifdef ALTQ 138int pf_begin_altq(u_int32_t *); 139int pf_rollback_altq(u_int32_t); 140int pf_commit_altq(u_int32_t); 141int pf_enable_altq(struct pf_altq *); 142int pf_disable_altq(struct pf_altq *); 143#endif /* ALTQ */ 144int pf_begin_rules(u_int32_t *, int, const char *); 145int pf_rollback_rules(u_int32_t, int, char *); 146int pf_commit_rules(u_int32_t, int, char *); 147 148#ifdef __FreeBSD__ 149extern struct callout pf_expire_to; 150#else 151extern struct timeout pf_expire_to; 152#endif 153 154struct pf_rule pf_default_rule; 155#ifdef ALTQ 156static int pf_altq_running; 157#endif 158 159#define TAGID_MAX 50000 160TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 161 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 162 163#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 164#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 165#endif 166static u_int16_t tagname2tag(struct pf_tags *, char *); 167static void tag2tagname(struct pf_tags *, u_int16_t, char *); 168static void tag_unref(struct pf_tags *, u_int16_t); 169int pf_rtlabel_add(struct pf_addr_wrap *); 170void pf_rtlabel_remove(struct pf_addr_wrap *); 171void pf_rtlabel_copyout(struct pf_addr_wrap *); 172 173#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 174 175 176#ifdef __FreeBSD__ 177static struct cdev *pf_dev; 178 179/* 180 * XXX - These are new and need to be checked when moveing to a new version 181 */ 182static void pf_clear_states(void); 183static int pf_clear_tables(void); 184static void pf_clear_srcnodes(void); 185/* 186 * XXX - These are new and need to be checked when moveing to a new version 187 */ 188 189/* 190 * Wrapper functions for pfil(9) hooks 191 */ 192static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 193 int dir, struct inpcb *inp); 194static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 195 int dir, struct inpcb *inp); 196#ifdef INET6 197static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 198 int dir, struct inpcb *inp); 199static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 200 int dir, struct inpcb *inp); 201#endif 202 203static int hook_pf(void); 204static int dehook_pf(void); 205static int shutdown_pf(void); 206static int pf_load(void); 207static int pf_unload(void); 208 209static struct cdevsw pf_cdevsw = { 210 .d_ioctl = pfioctl, 211 .d_name = PF_NAME, 212 .d_version = D_VERSION, 213}; 214 215static volatile int pf_pfil_hooked = 0; 216struct mtx pf_task_mtx; 217 218void 219init_pf_mutex(void) 220{ 221 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF); 222} 223 224void 225destroy_pf_mutex(void) 226{ 227 mtx_destroy(&pf_task_mtx); 228} 229 230void 231init_zone_var(void) 232{ 233 pf_src_tree_pl = pf_rule_pl = NULL; 234 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 235 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 236 pf_state_scrub_pl = NULL; 237 pfr_ktable_pl = pfr_kentry_pl = NULL; 238} 239 240void 241cleanup_pf_zone(void) 242{ 243 UMA_DESTROY(pf_src_tree_pl); 244 UMA_DESTROY(pf_rule_pl); 245 UMA_DESTROY(pf_state_pl); 246 UMA_DESTROY(pf_altq_pl); 247 UMA_DESTROY(pf_pooladdr_pl); 248 UMA_DESTROY(pf_frent_pl); 249 UMA_DESTROY(pf_frag_pl); 250 UMA_DESTROY(pf_cache_pl); 251 UMA_DESTROY(pf_cent_pl); 252 UMA_DESTROY(pfr_ktable_pl); 253 UMA_DESTROY(pfr_kentry_pl); 254 UMA_DESTROY(pf_state_scrub_pl); 255 UMA_DESTROY(pfi_addr_pl); 256} 257 258int 259pfattach(void) 260{ 261 u_int32_t *my_timeout = pf_default_rule.timeout; 262 int error = 1; 263 264 do { 265 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 266 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 267 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 268 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 269 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl"); 270 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 271 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 272 UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2"); 273 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 274 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 275 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 276 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 277 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 278 "pfstatescrub"); 279 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 280 error = 0; 281 } while(0); 282 if (error) { 283 cleanup_pf_zone(); 284 return (error); 285 } 286 pfr_initialize(); 287 pfi_initialize(); 288 if ( (error = pf_osfp_initialize()) ) { 289 cleanup_pf_zone(); 290 pf_osfp_cleanup(); 291 return (error); 292 } 293 294 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 295 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 296 pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl; 297 pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 298 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 299 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 300 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 301 pf_pool_limits[PF_LIMIT_STATES].limit); 302 303 RB_INIT(&tree_src_tracking); 304 RB_INIT(&pf_anchors); 305 pf_init_ruleset(&pf_main_ruleset); 306 TAILQ_INIT(&pf_altqs[0]); 307 TAILQ_INIT(&pf_altqs[1]); 308 TAILQ_INIT(&pf_pabuf); 309 pf_altqs_active = &pf_altqs[0]; 310 pf_altqs_inactive = &pf_altqs[1]; 311 TAILQ_INIT(&state_updates); 312 313 /* default rule should never be garbage collected */ 314 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 315 pf_default_rule.action = PF_PASS; 316 pf_default_rule.nr = -1; 317 318 /* initialize default timeouts */ 319 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 320 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 321 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 322 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 323 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 324 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 325 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 326 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 327 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 328 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 329 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 330 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 331 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 332 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 333 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 334 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 335 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 336 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 337 338 callout_init(&pf_expire_to, NET_CALLOUT_MPSAFE); 339 callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz, 340 pf_purge_timeout, &pf_expire_to); 341 342 pf_normalize_init(); 343 bzero(&pf_status, sizeof(pf_status)); 344 pf_pfil_hooked = 0; 345 346 /* XXX do our best to avoid a conflict */ 347 pf_status.hostid = arc4random(); 348 349 return (error); 350} 351#else /* !__FreeBSD__ */ 352void 353pfattach(int num) 354{ 355 u_int32_t *timeout = pf_default_rule.timeout; 356 357 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 358 &pool_allocator_nointr); 359 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 360 "pfsrctrpl", NULL); 361 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 362 NULL); 363 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 364 &pool_allocator_nointr); 365 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 366 "pfpooladdrpl", &pool_allocator_nointr); 367 pfr_initialize(); 368 pfi_initialize(); 369 pf_osfp_initialize(); 370 371 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 372 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 373 374 RB_INIT(&tree_src_tracking); 375 RB_INIT(&pf_anchors); 376 pf_init_ruleset(&pf_main_ruleset); 377 TAILQ_INIT(&pf_altqs[0]); 378 TAILQ_INIT(&pf_altqs[1]); 379 TAILQ_INIT(&pf_pabuf); 380 pf_altqs_active = &pf_altqs[0]; 381 pf_altqs_inactive = &pf_altqs[1]; 382 TAILQ_INIT(&state_updates); 383 384 /* default rule should never be garbage collected */ 385 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 386 pf_default_rule.action = PF_PASS; 387 pf_default_rule.nr = -1; 388 389 /* initialize default timeouts */ 390 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 391 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 392 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 393 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 394 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 395 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 396 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 397 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 398 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 399 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 400 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 401 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 402 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 403 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 404 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 405 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 406 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 407 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 408 409 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to); 410 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz); 411 412 pf_normalize_init(); 413 bzero(&pf_status, sizeof(pf_status)); 414 pf_status.debug = PF_DEBUG_URGENT; 415 416 /* XXX do our best to avoid a conflict */ 417 pf_status.hostid = arc4random(); 418} 419 420int 421pfopen(struct cdev *dev, int flags, int fmt, struct proc *p) 422{ 423 if (minor(dev) >= 1) 424 return (ENXIO); 425 return (0); 426} 427 428int 429pfclose(struct cdev *dev, int flags, int fmt, struct proc *p) 430{ 431 if (minor(dev) >= 1) 432 return (ENXIO); 433 return (0); 434} 435#endif /* __FreeBSD__ */ 436 437struct pf_pool * 438pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 439 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 440 u_int8_t check_ticket) 441{ 442 struct pf_ruleset *ruleset; 443 struct pf_rule *rule; 444 int rs_num; 445 446 ruleset = pf_find_ruleset(anchor); 447 if (ruleset == NULL) 448 return (NULL); 449 rs_num = pf_get_ruleset_number(rule_action); 450 if (rs_num >= PF_RULESET_MAX) 451 return (NULL); 452 if (active) { 453 if (check_ticket && ticket != 454 ruleset->rules[rs_num].active.ticket) 455 return (NULL); 456 if (r_last) 457 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 458 pf_rulequeue); 459 else 460 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 461 } else { 462 if (check_ticket && ticket != 463 ruleset->rules[rs_num].inactive.ticket) 464 return (NULL); 465 if (r_last) 466 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 467 pf_rulequeue); 468 else 469 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 470 } 471 if (!r_last) { 472 while ((rule != NULL) && (rule->nr != rule_number)) 473 rule = TAILQ_NEXT(rule, entries); 474 } 475 if (rule == NULL) 476 return (NULL); 477 478 return (&rule->rpool); 479} 480 481int 482pf_get_ruleset_number(u_int8_t action) 483{ 484 switch (action) { 485 case PF_SCRUB: 486 case PF_NOSCRUB: 487 return (PF_RULESET_SCRUB); 488 break; 489 case PF_PASS: 490 case PF_DROP: 491 return (PF_RULESET_FILTER); 492 break; 493 case PF_NAT: 494 case PF_NONAT: 495 return (PF_RULESET_NAT); 496 break; 497 case PF_BINAT: 498 case PF_NOBINAT: 499 return (PF_RULESET_BINAT); 500 break; 501 case PF_RDR: 502 case PF_NORDR: 503 return (PF_RULESET_RDR); 504 break; 505 default: 506 return (PF_RULESET_MAX); 507 break; 508 } 509} 510 511void 512pf_init_ruleset(struct pf_ruleset *ruleset) 513{ 514 int i; 515 516 memset(ruleset, 0, sizeof(struct pf_ruleset)); 517 for (i = 0; i < PF_RULESET_MAX; i++) { 518 TAILQ_INIT(&ruleset->rules[i].queues[0]); 519 TAILQ_INIT(&ruleset->rules[i].queues[1]); 520 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0]; 521 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1]; 522 } 523} 524 525struct pf_anchor * 526pf_find_anchor(const char *path) 527{ 528 static struct pf_anchor key; 529 530 memset(&key, 0, sizeof(key)); 531 strlcpy(key.path, path, sizeof(key.path)); 532 return (RB_FIND(pf_anchor_global, &pf_anchors, &key)); 533} 534 535struct pf_ruleset * 536pf_find_ruleset(const char *path) 537{ 538 struct pf_anchor *anchor; 539 540 while (*path == '/') 541 path++; 542 if (!*path) 543 return (&pf_main_ruleset); 544 anchor = pf_find_anchor(path); 545 if (anchor == NULL) 546 return (NULL); 547 else 548 return (&anchor->ruleset); 549} 550 551struct pf_ruleset * 552pf_find_or_create_ruleset(const char *path) 553{ 554 static char p[MAXPATHLEN]; 555 char *q = NULL, *r; /* make the compiler happy */ 556 struct pf_ruleset *ruleset; 557 struct pf_anchor *anchor = NULL, *dup, *parent = NULL; 558 559 while (*path == '/') 560 path++; 561 ruleset = pf_find_ruleset(path); 562 if (ruleset != NULL) 563 return (ruleset); 564 strlcpy(p, path, sizeof(p)); 565#ifdef __FreeBSD__ 566 while (parent == NULL && (q = rindex(p, '/')) != NULL) { 567#else 568 while (parent == NULL && (q = strrchr(p, '/')) != NULL) { 569#endif 570 *q = 0; 571 if ((ruleset = pf_find_ruleset(p)) != NULL) { 572 parent = ruleset->anchor; 573 break; 574 } 575 } 576 if (q == NULL) 577 q = p; 578 else 579 q++; 580 strlcpy(p, path, sizeof(p)); 581 if (!*q) 582 return (NULL); 583#ifdef __FreeBSD__ 584 while ((r = index(q, '/')) != NULL || *q) { 585#else 586 while ((r = strchr(q, '/')) != NULL || *q) { 587#endif 588 if (r != NULL) 589 *r = 0; 590 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE || 591 (parent != NULL && strlen(parent->path) >= 592 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1)) 593 return (NULL); 594 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP, 595 M_NOWAIT); 596 if (anchor == NULL) 597 return (NULL); 598 memset(anchor, 0, sizeof(*anchor)); 599 RB_INIT(&anchor->children); 600 strlcpy(anchor->name, q, sizeof(anchor->name)); 601 if (parent != NULL) { 602 strlcpy(anchor->path, parent->path, 603 sizeof(anchor->path)); 604 strlcat(anchor->path, "/", sizeof(anchor->path)); 605 } 606 strlcat(anchor->path, anchor->name, sizeof(anchor->path)); 607 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) != 608 NULL) { 609 printf("pf_find_or_create_ruleset: RB_INSERT1 " 610 "'%s' '%s' collides with '%s' '%s'\n", 611 anchor->path, anchor->name, dup->path, dup->name); 612 free(anchor, M_TEMP); 613 return (NULL); 614 } 615 if (parent != NULL) { 616 anchor->parent = parent; 617 if ((dup = RB_INSERT(pf_anchor_node, &parent->children, 618 anchor)) != NULL) { 619 printf("pf_find_or_create_ruleset: " 620 "RB_INSERT2 '%s' '%s' collides with " 621 "'%s' '%s'\n", anchor->path, anchor->name, 622 dup->path, dup->name); 623 RB_REMOVE(pf_anchor_global, &pf_anchors, 624 anchor); 625 free(anchor, M_TEMP); 626 return (NULL); 627 } 628 } 629 pf_init_ruleset(&anchor->ruleset); 630 anchor->ruleset.anchor = anchor; 631 parent = anchor; 632 if (r != NULL) 633 q = r + 1; 634 else 635 *q = 0; 636 } 637 return (&anchor->ruleset); 638} 639 640void 641pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset) 642{ 643 struct pf_anchor *parent; 644 int i; 645 646 while (ruleset != NULL) { 647 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL || 648 !RB_EMPTY(&ruleset->anchor->children) || 649 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 || 650 ruleset->topen) 651 return; 652 for (i = 0; i < PF_RULESET_MAX; ++i) 653 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) || 654 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) || 655 ruleset->rules[i].inactive.open) 656 return; 657 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor); 658 if ((parent = ruleset->anchor->parent) != NULL) 659 RB_REMOVE(pf_anchor_node, &parent->children, 660 ruleset->anchor); 661 free(ruleset->anchor, M_TEMP); 662 if (parent == NULL) 663 return; 664 ruleset = &parent->ruleset; 665 } 666} 667 668int 669pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s, 670 const char *name) 671{ 672 static char *p, path[MAXPATHLEN]; 673 struct pf_ruleset *ruleset; 674 675 r->anchor = NULL; 676 r->anchor_relative = 0; 677 r->anchor_wildcard = 0; 678 if (!name[0]) 679 return (0); 680 if (name[0] == '/') 681 strlcpy(path, name + 1, sizeof(path)); 682 else { 683 /* relative path */ 684 r->anchor_relative = 1; 685 if (s->anchor == NULL || !s->anchor->path[0]) 686 path[0] = 0; 687 else 688 strlcpy(path, s->anchor->path, sizeof(path)); 689 while (name[0] == '.' && name[1] == '.' && name[2] == '/') { 690 if (!path[0]) { 691 printf("pf_anchor_setup: .. beyond root\n"); 692 return (1); 693 } 694#ifdef __FreeBSD__ 695 if ((p = rindex(path, '/')) != NULL) 696#else 697 if ((p = strrchr(path, '/')) != NULL) 698#endif 699 *p = 0; 700 else 701 path[0] = 0; 702 r->anchor_relative++; 703 name += 3; 704 } 705 if (path[0]) 706 strlcat(path, "/", sizeof(path)); 707 strlcat(path, name, sizeof(path)); 708 } 709#ifdef __FreeBSD__ 710 if ((p = rindex(path, '/')) != NULL && !strcmp(p, "/*")) { 711#else 712 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) { 713#endif 714 r->anchor_wildcard = 1; 715 *p = 0; 716 } 717 ruleset = pf_find_or_create_ruleset(path); 718 if (ruleset == NULL || ruleset->anchor == NULL) { 719 printf("pf_anchor_setup: ruleset\n"); 720 return (1); 721 } 722 r->anchor = ruleset->anchor; 723 r->anchor->refcnt++; 724 return (0); 725} 726 727int 728pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r, 729 struct pfioc_rule *pr) 730{ 731 pr->anchor_call[0] = 0; 732 if (r->anchor == NULL) 733 return (0); 734 if (!r->anchor_relative) { 735 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call)); 736 strlcat(pr->anchor_call, r->anchor->path, 737 sizeof(pr->anchor_call)); 738 } else { 739 char a[MAXPATHLEN], b[MAXPATHLEN], *p; 740 int i; 741 742 if (rs->anchor == NULL) 743 a[0] = 0; 744 else 745 strlcpy(a, rs->anchor->path, sizeof(a)); 746 strlcpy(b, r->anchor->path, sizeof(b)); 747 for (i = 1; i < r->anchor_relative; ++i) { 748#ifdef __FreeBSD__ 749 if ((p = rindex(a, '/')) == NULL) 750#else 751 if ((p = strrchr(a, '/')) == NULL) 752#endif 753 p = a; 754 *p = 0; 755 strlcat(pr->anchor_call, "../", 756 sizeof(pr->anchor_call)); 757 } 758 if (strncmp(a, b, strlen(a))) { 759 printf("pf_anchor_copyout: '%s' '%s'\n", a, b); 760 return (1); 761 } 762 if (strlen(b) > strlen(a)) 763 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0), 764 sizeof(pr->anchor_call)); 765 } 766 if (r->anchor_wildcard) 767 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*", 768 sizeof(pr->anchor_call)); 769 return (0); 770} 771 772void 773pf_anchor_remove(struct pf_rule *r) 774{ 775 if (r->anchor == NULL) 776 return; 777 if (r->anchor->refcnt <= 0) { 778 printf("pf_anchor_remove: broken refcount"); 779 r->anchor = NULL; 780 return; 781 } 782 if (!--r->anchor->refcnt) 783 pf_remove_if_empty_ruleset(&r->anchor->ruleset); 784 r->anchor = NULL; 785} 786 787void 788pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 789{ 790 struct pf_pooladdr *mv_pool_pa; 791 792 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 793 TAILQ_REMOVE(poola, mv_pool_pa, entries); 794 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 795 } 796} 797 798void 799pf_empty_pool(struct pf_palist *poola) 800{ 801 struct pf_pooladdr *empty_pool_pa; 802 803 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 804 pfi_dynaddr_remove(&empty_pool_pa->addr); 805 pf_tbladdr_remove(&empty_pool_pa->addr); 806 pfi_detach_rule(empty_pool_pa->kif); 807 TAILQ_REMOVE(poola, empty_pool_pa, entries); 808 pool_put(&pf_pooladdr_pl, empty_pool_pa); 809 } 810} 811 812void 813pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 814{ 815 if (rulequeue != NULL) { 816 if (rule->states <= 0) { 817 /* 818 * XXX - we need to remove the table *before* detaching 819 * the rule to make sure the table code does not delete 820 * the anchor under our feet. 821 */ 822 pf_tbladdr_remove(&rule->src.addr); 823 pf_tbladdr_remove(&rule->dst.addr); 824 if (rule->overload_tbl) 825 pfr_detach_table(rule->overload_tbl); 826 } 827 TAILQ_REMOVE(rulequeue, rule, entries); 828 rule->entries.tqe_prev = NULL; 829 rule->nr = -1; 830 } 831 832 if (rule->states > 0 || rule->src_nodes > 0 || 833 rule->entries.tqe_prev != NULL) 834 return; 835 pf_tag_unref(rule->tag); 836 pf_tag_unref(rule->match_tag); 837#ifdef ALTQ 838 if (rule->pqid != rule->qid) 839 pf_qid_unref(rule->pqid); 840 pf_qid_unref(rule->qid); 841#endif 842 pf_rtlabel_remove(&rule->src.addr); 843 pf_rtlabel_remove(&rule->dst.addr); 844 pfi_dynaddr_remove(&rule->src.addr); 845 pfi_dynaddr_remove(&rule->dst.addr); 846 if (rulequeue == NULL) { 847 pf_tbladdr_remove(&rule->src.addr); 848 pf_tbladdr_remove(&rule->dst.addr); 849 if (rule->overload_tbl) 850 pfr_detach_table(rule->overload_tbl); 851 } 852 pfi_detach_rule(rule->kif); 853 pf_anchor_remove(rule); 854 pf_empty_pool(&rule->rpool.list); 855 pool_put(&pf_rule_pl, rule); 856} 857 858static u_int16_t 859tagname2tag(struct pf_tags *head, char *tagname) 860{ 861 struct pf_tagname *tag, *p = NULL; 862 u_int16_t new_tagid = 1; 863 864 TAILQ_FOREACH(tag, head, entries) 865 if (strcmp(tagname, tag->name) == 0) { 866 tag->ref++; 867 return (tag->tag); 868 } 869 870 /* 871 * to avoid fragmentation, we do a linear search from the beginning 872 * and take the first free slot we find. if there is none or the list 873 * is empty, append a new entry at the end. 874 */ 875 876 /* new entry */ 877 if (!TAILQ_EMPTY(head)) 878 for (p = TAILQ_FIRST(head); p != NULL && 879 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 880 new_tagid = p->tag + 1; 881 882 if (new_tagid > TAGID_MAX) 883 return (0); 884 885 /* allocate and fill new struct pf_tagname */ 886 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 887 M_TEMP, M_NOWAIT); 888 if (tag == NULL) 889 return (0); 890 bzero(tag, sizeof(struct pf_tagname)); 891 strlcpy(tag->name, tagname, sizeof(tag->name)); 892 tag->tag = new_tagid; 893 tag->ref++; 894 895 if (p != NULL) /* insert new entry before p */ 896 TAILQ_INSERT_BEFORE(p, tag, entries); 897 else /* either list empty or no free slot in between */ 898 TAILQ_INSERT_TAIL(head, tag, entries); 899 900 return (tag->tag); 901} 902 903static void 904tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 905{ 906 struct pf_tagname *tag; 907 908 TAILQ_FOREACH(tag, head, entries) 909 if (tag->tag == tagid) { 910 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 911 return; 912 } 913} 914 915static void 916tag_unref(struct pf_tags *head, u_int16_t tag) 917{ 918 struct pf_tagname *p, *next; 919 920 if (tag == 0) 921 return; 922 923 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 924 next = TAILQ_NEXT(p, entries); 925 if (tag == p->tag) { 926 if (--p->ref == 0) { 927 TAILQ_REMOVE(head, p, entries); 928 free(p, M_TEMP); 929 } 930 break; 931 } 932 } 933} 934 935u_int16_t 936pf_tagname2tag(char *tagname) 937{ 938 return (tagname2tag(&pf_tags, tagname)); 939} 940 941void 942pf_tag2tagname(u_int16_t tagid, char *p) 943{ 944 return (tag2tagname(&pf_tags, tagid, p)); 945} 946 947void 948pf_tag_ref(u_int16_t tag) 949{ 950 struct pf_tagname *t; 951 952 TAILQ_FOREACH(t, &pf_tags, entries) 953 if (t->tag == tag) 954 break; 955 if (t != NULL) 956 t->ref++; 957} 958 959void 960pf_tag_unref(u_int16_t tag) 961{ 962 return (tag_unref(&pf_tags, tag)); 963} 964 965int 966pf_rtlabel_add(struct pf_addr_wrap *a) 967{ 968#ifdef __FreeBSD__ 969 /* XXX_IMPORT: later */ 970 return (0); 971#else 972 if (a->type == PF_ADDR_RTLABEL && 973 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 974 return (-1); 975 return (0); 976#endif 977} 978 979void 980pf_rtlabel_remove(struct pf_addr_wrap *a) 981{ 982#ifdef __FreeBSD__ 983 /* XXX_IMPORT: later */ 984#else 985 if (a->type == PF_ADDR_RTLABEL) 986 rtlabel_unref(a->v.rtlabel); 987#endif 988} 989 990void 991pf_rtlabel_copyout(struct pf_addr_wrap *a) 992{ 993#ifdef __FreeBSD__ 994 /* XXX_IMPORT: later */ 995 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 996 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 997#else 998 const char *name; 999 1000 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 1001 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 1002 strlcpy(a->v.rtlabelname, "?", 1003 sizeof(a->v.rtlabelname)); 1004 else 1005 strlcpy(a->v.rtlabelname, name, 1006 sizeof(a->v.rtlabelname)); 1007 } 1008#endif 1009} 1010 1011#ifdef ALTQ 1012u_int32_t 1013pf_qname2qid(char *qname) 1014{ 1015 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 1016} 1017 1018void 1019pf_qid2qname(u_int32_t qid, char *p) 1020{ 1021 return (tag2tagname(&pf_qids, (u_int16_t)qid, p)); 1022} 1023 1024void 1025pf_qid_unref(u_int32_t qid) 1026{ 1027 return (tag_unref(&pf_qids, (u_int16_t)qid)); 1028} 1029 1030int 1031pf_begin_altq(u_int32_t *ticket) 1032{ 1033 struct pf_altq *altq; 1034 int error = 0; 1035 1036 /* Purge the old altq list */ 1037 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1038 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1039 if (altq->qname[0] == 0) { 1040 /* detach and destroy the discipline */ 1041 error = altq_remove(altq); 1042 } else 1043 pf_qid_unref(altq->qid); 1044 pool_put(&pf_altq_pl, altq); 1045 } 1046 if (error) 1047 return (error); 1048 *ticket = ++ticket_altqs_inactive; 1049 altqs_inactive_open = 1; 1050 return (0); 1051} 1052 1053int 1054pf_rollback_altq(u_int32_t ticket) 1055{ 1056 struct pf_altq *altq; 1057 int error = 0; 1058 1059 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 1060 return (0); 1061 /* Purge the old altq list */ 1062 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1063 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1064 if (altq->qname[0] == 0) { 1065 /* detach and destroy the discipline */ 1066 error = altq_remove(altq); 1067 } else 1068 pf_qid_unref(altq->qid); 1069 pool_put(&pf_altq_pl, altq); 1070 } 1071 altqs_inactive_open = 0; 1072 return (error); 1073} 1074 1075int 1076pf_commit_altq(u_int32_t ticket) 1077{ 1078 struct pf_altqqueue *old_altqs; 1079 struct pf_altq *altq; 1080 int s, err, error = 0; 1081 1082 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 1083 return (EBUSY); 1084 1085 /* swap altqs, keep the old. */ 1086 s = splsoftnet(); 1087 old_altqs = pf_altqs_active; 1088 pf_altqs_active = pf_altqs_inactive; 1089 pf_altqs_inactive = old_altqs; 1090 ticket_altqs_active = ticket_altqs_inactive; 1091 1092 /* Attach new disciplines */ 1093 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 1094 if (altq->qname[0] == 0) { 1095 /* attach the discipline */ 1096 error = altq_pfattach(altq); 1097 if (error == 0 && pf_altq_running) 1098 error = pf_enable_altq(altq); 1099 if (error != 0) { 1100 splx(s); 1101 return (error); 1102 } 1103 } 1104 } 1105 1106 /* Purge the old altq list */ 1107 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1108 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1109 if (altq->qname[0] == 0) { 1110 /* detach and destroy the discipline */ 1111 if (pf_altq_running) 1112 error = pf_disable_altq(altq); 1113 err = altq_pfdetach(altq); 1114 if (err != 0 && error == 0) 1115 error = err; 1116 err = altq_remove(altq); 1117 if (err != 0 && error == 0) 1118 error = err; 1119 } else 1120 pf_qid_unref(altq->qid); 1121 pool_put(&pf_altq_pl, altq); 1122 } 1123 splx(s); 1124 1125 altqs_inactive_open = 0; 1126 return (error); 1127} 1128 1129int 1130pf_enable_altq(struct pf_altq *altq) 1131{ 1132 struct ifnet *ifp; 1133 struct tb_profile tb; 1134 int s, error = 0; 1135 1136 if ((ifp = ifunit(altq->ifname)) == NULL) 1137 return (EINVAL); 1138 1139 if (ifp->if_snd.altq_type != ALTQT_NONE) 1140 error = altq_enable(&ifp->if_snd); 1141 1142 /* set tokenbucket regulator */ 1143 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1144 tb.rate = altq->ifbandwidth; 1145 tb.depth = altq->tbrsize; 1146 s = splimp(); 1147#ifdef __FreeBSD__ 1148 PF_UNLOCK(); 1149#endif 1150 error = tbr_set(&ifp->if_snd, &tb); 1151#ifdef __FreeBSD__ 1152 PF_LOCK(); 1153#endif 1154 splx(s); 1155 } 1156 1157 return (error); 1158} 1159 1160int 1161pf_disable_altq(struct pf_altq *altq) 1162{ 1163 struct ifnet *ifp; 1164 struct tb_profile tb; 1165 int s, error; 1166 1167 if ((ifp = ifunit(altq->ifname)) == NULL) 1168 return (EINVAL); 1169 1170 /* 1171 * when the discipline is no longer referenced, it was overridden 1172 * by a new one. if so, just return. 1173 */ 1174 if (altq->altq_disc != ifp->if_snd.altq_disc) 1175 return (0); 1176 1177 error = altq_disable(&ifp->if_snd); 1178 1179 if (error == 0) { 1180 /* clear tokenbucket regulator */ 1181 tb.rate = 0; 1182 s = splimp(); 1183#ifdef __FreeBSD__ 1184 PF_UNLOCK(); 1185#endif 1186 error = tbr_set(&ifp->if_snd, &tb); 1187#ifdef __FreeBSD__ 1188 PF_LOCK(); 1189#endif 1190 splx(s); 1191 } 1192 1193 return (error); 1194} 1195#endif /* ALTQ */ 1196 1197int 1198pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1199{ 1200 struct pf_ruleset *rs; 1201 struct pf_rule *rule; 1202 1203 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1204 return (EINVAL); 1205 rs = pf_find_or_create_ruleset(anchor); 1206 if (rs == NULL) 1207 return (EINVAL); 1208 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 1209 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1210 *ticket = ++rs->rules[rs_num].inactive.ticket; 1211 rs->rules[rs_num].inactive.open = 1; 1212 return (0); 1213} 1214 1215int 1216pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1217{ 1218 struct pf_ruleset *rs; 1219 struct pf_rule *rule; 1220 1221 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1222 return (EINVAL); 1223 rs = pf_find_ruleset(anchor); 1224 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1225 rs->rules[rs_num].inactive.ticket != ticket) 1226 return (0); 1227 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 1228 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1229 rs->rules[rs_num].inactive.open = 0; 1230 return (0); 1231} 1232 1233int 1234pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1235{ 1236 struct pf_ruleset *rs; 1237 struct pf_rule *rule; 1238 struct pf_rulequeue *old_rules; 1239 int s; 1240 1241 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1242 return (EINVAL); 1243 rs = pf_find_ruleset(anchor); 1244 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1245 ticket != rs->rules[rs_num].inactive.ticket) 1246 return (EBUSY); 1247 1248 /* Swap rules, keep the old. */ 1249 s = splsoftnet(); 1250 old_rules = rs->rules[rs_num].active.ptr; 1251 rs->rules[rs_num].active.ptr = 1252 rs->rules[rs_num].inactive.ptr; 1253 rs->rules[rs_num].inactive.ptr = old_rules; 1254 rs->rules[rs_num].active.ticket = 1255 rs->rules[rs_num].inactive.ticket; 1256 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1257 1258 /* Purge the old rule list. */ 1259 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1260 pf_rm_rule(old_rules, rule); 1261 rs->rules[rs_num].inactive.open = 0; 1262 pf_remove_if_empty_ruleset(rs); 1263 splx(s); 1264 return (0); 1265} 1266 1267#ifdef __FreeBSD__ 1268int 1269pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1270#else 1271int 1272pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1273#endif 1274{ 1275 struct pf_pooladdr *pa = NULL; 1276 struct pf_pool *pool = NULL; 1277#ifndef __FreeBSD__ 1278 int s; 1279#endif 1280 int error = 0; 1281 1282 /* XXX keep in sync with switch() below */ 1283#ifdef __FreeBSD__ 1284 if (securelevel_gt(td->td_ucred, 2)) 1285#else 1286 if (securelevel > 1) 1287#endif 1288 switch (cmd) { 1289 case DIOCGETRULES: 1290 case DIOCGETRULE: 1291 case DIOCGETADDRS: 1292 case DIOCGETADDR: 1293 case DIOCGETSTATE: 1294 case DIOCSETSTATUSIF: 1295 case DIOCGETSTATUS: 1296 case DIOCCLRSTATUS: 1297 case DIOCNATLOOK: 1298 case DIOCSETDEBUG: 1299 case DIOCGETSTATES: 1300 case DIOCGETTIMEOUT: 1301 case DIOCCLRRULECTRS: 1302 case DIOCGETLIMIT: 1303 case DIOCGETALTQS: 1304 case DIOCGETALTQ: 1305 case DIOCGETQSTATS: 1306 case DIOCGETRULESETS: 1307 case DIOCGETRULESET: 1308 case DIOCRGETTABLES: 1309 case DIOCRGETTSTATS: 1310 case DIOCRCLRTSTATS: 1311 case DIOCRCLRADDRS: 1312 case DIOCRADDADDRS: 1313 case DIOCRDELADDRS: 1314 case DIOCRSETADDRS: 1315 case DIOCRGETADDRS: 1316 case DIOCRGETASTATS: 1317 case DIOCRCLRASTATS: 1318 case DIOCRTSTADDRS: 1319 case DIOCOSFPGET: 1320 case DIOCGETSRCNODES: 1321 case DIOCCLRSRCNODES: 1322 case DIOCIGETIFACES: 1323 case DIOCICLRISTATS: 1324#ifdef __FreeBSD__ 1325 case DIOCGIFSPEED: 1326#endif 1327 case DIOCSETIFFLAG: 1328 case DIOCCLRIFFLAG: 1329 break; 1330 case DIOCRCLRTABLES: 1331 case DIOCRADDTABLES: 1332 case DIOCRDELTABLES: 1333 case DIOCRSETTFLAGS: 1334 if (((struct pfioc_table *)addr)->pfrio_flags & 1335 PFR_FLAG_DUMMY) 1336 break; /* dummy operation ok */ 1337 return (EPERM); 1338 default: 1339 return (EPERM); 1340 } 1341 1342 if (!(flags & FWRITE)) 1343 switch (cmd) { 1344 case DIOCGETRULES: 1345 case DIOCGETRULE: 1346 case DIOCGETADDRS: 1347 case DIOCGETADDR: 1348 case DIOCGETSTATE: 1349 case DIOCGETSTATUS: 1350 case DIOCGETSTATES: 1351 case DIOCGETTIMEOUT: 1352 case DIOCGETLIMIT: 1353 case DIOCGETALTQS: 1354 case DIOCGETALTQ: 1355 case DIOCGETQSTATS: 1356 case DIOCGETRULESETS: 1357 case DIOCGETRULESET: 1358 case DIOCRGETTABLES: 1359 case DIOCRGETTSTATS: 1360 case DIOCRGETADDRS: 1361 case DIOCRGETASTATS: 1362 case DIOCRTSTADDRS: 1363 case DIOCOSFPGET: 1364 case DIOCGETSRCNODES: 1365 case DIOCIGETIFACES: 1366#ifdef __FreeBSD__ 1367 case DIOCGIFSPEED: 1368#endif 1369 break; 1370 case DIOCRCLRTABLES: 1371 case DIOCRADDTABLES: 1372 case DIOCRDELTABLES: 1373 case DIOCRCLRTSTATS: 1374 case DIOCRCLRADDRS: 1375 case DIOCRADDADDRS: 1376 case DIOCRDELADDRS: 1377 case DIOCRSETADDRS: 1378 case DIOCRSETTFLAGS: 1379 if (((struct pfioc_table *)addr)->pfrio_flags & 1380 PFR_FLAG_DUMMY) 1381 break; /* dummy operation ok */ 1382 return (EACCES); 1383 default: 1384 return (EACCES); 1385 } 1386 1387#ifdef __FreeBSD__ 1388 PF_LOCK(); 1389#else 1390 s = splsoftnet(); 1391#endif 1392 switch (cmd) { 1393 1394 case DIOCSTART: 1395 if (pf_status.running) 1396 error = EEXIST; 1397 else { 1398#ifdef __FreeBSD__ 1399 PF_UNLOCK(); 1400 error = hook_pf(); 1401 PF_LOCK(); 1402 if (error) { 1403 DPFPRINTF(PF_DEBUG_MISC, 1404 ("pf: pfil registeration fail\n")); 1405 break; 1406 } 1407#endif 1408 pf_status.running = 1; 1409 pf_status.since = time_second; 1410 if (pf_status.stateid == 0) { 1411 pf_status.stateid = time_second; 1412 pf_status.stateid = pf_status.stateid << 32; 1413 } 1414 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1415 } 1416 break; 1417 1418 case DIOCSTOP: 1419 if (!pf_status.running) 1420 error = ENOENT; 1421 else { 1422 pf_status.running = 0; 1423#ifdef __FreeBSD__ 1424 PF_UNLOCK(); 1425 error = dehook_pf(); 1426 PF_LOCK(); 1427 if (error) { 1428 pf_status.running = 1; 1429 DPFPRINTF(PF_DEBUG_MISC, 1430 ("pf: pfil unregisteration failed\n")); 1431 } 1432#endif 1433 pf_status.since = time_second; 1434 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1435 } 1436 break; 1437 1438 case DIOCADDRULE: { 1439 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1440 struct pf_ruleset *ruleset; 1441 struct pf_rule *rule, *tail; 1442 struct pf_pooladdr *pa; 1443 int rs_num; 1444 1445 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1446 ruleset = pf_find_ruleset(pr->anchor); 1447 if (ruleset == NULL) { 1448 error = EINVAL; 1449 break; 1450 } 1451 rs_num = pf_get_ruleset_number(pr->rule.action); 1452 if (rs_num >= PF_RULESET_MAX) { 1453 error = EINVAL; 1454 break; 1455 } 1456 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1457 error = EINVAL; 1458 break; 1459 } 1460 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1461 printf("ticket: %d != [%d]%d\n", pr->ticket, 1462 rs_num, ruleset->rules[rs_num].inactive.ticket); 1463 error = EBUSY; 1464 break; 1465 } 1466 if (pr->pool_ticket != ticket_pabuf) { 1467 printf("pool_ticket: %d != %d\n", pr->pool_ticket, 1468 ticket_pabuf); 1469 error = EBUSY; 1470 break; 1471 } 1472 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1473 if (rule == NULL) { 1474 error = ENOMEM; 1475 break; 1476 } 1477 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1478 rule->anchor = NULL; 1479 rule->kif = NULL; 1480 TAILQ_INIT(&rule->rpool.list); 1481 /* initialize refcounting */ 1482 rule->states = 0; 1483 rule->src_nodes = 0; 1484 rule->entries.tqe_prev = NULL; 1485#ifndef INET 1486 if (rule->af == AF_INET) { 1487 pool_put(&pf_rule_pl, rule); 1488 error = EAFNOSUPPORT; 1489 break; 1490 } 1491#endif /* INET */ 1492#ifndef INET6 1493 if (rule->af == AF_INET6) { 1494 pool_put(&pf_rule_pl, rule); 1495 error = EAFNOSUPPORT; 1496 break; 1497 } 1498#endif /* INET6 */ 1499 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1500 pf_rulequeue); 1501 if (tail) 1502 rule->nr = tail->nr + 1; 1503 else 1504 rule->nr = 0; 1505 if (rule->ifname[0]) { 1506 rule->kif = pfi_attach_rule(rule->ifname); 1507 if (rule->kif == NULL) { 1508 pool_put(&pf_rule_pl, rule); 1509 error = EINVAL; 1510 break; 1511 } 1512 } 1513 1514#ifdef ALTQ 1515 /* set queue IDs */ 1516 if (rule->qname[0] != 0) { 1517 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1518 error = EBUSY; 1519 else if (rule->pqname[0] != 0) { 1520 if ((rule->pqid = 1521 pf_qname2qid(rule->pqname)) == 0) 1522 error = EBUSY; 1523 } else 1524 rule->pqid = rule->qid; 1525 } 1526#endif 1527 if (rule->tagname[0]) 1528 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1529 error = EBUSY; 1530 if (rule->match_tagname[0]) 1531 if ((rule->match_tag = 1532 pf_tagname2tag(rule->match_tagname)) == 0) 1533 error = EBUSY; 1534 if (rule->rt && !rule->direction) 1535 error = EINVAL; 1536 if (pf_rtlabel_add(&rule->src.addr) || 1537 pf_rtlabel_add(&rule->dst.addr)) 1538 error = EBUSY; 1539 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1540 error = EINVAL; 1541 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1542 error = EINVAL; 1543 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1544 error = EINVAL; 1545 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1546 error = EINVAL; 1547 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1548 error = EINVAL; 1549 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1550 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1551 error = EINVAL; 1552 1553 if (rule->overload_tblname[0]) { 1554 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1555 rule->overload_tblname)) == NULL) 1556 error = EINVAL; 1557 else 1558 rule->overload_tbl->pfrkt_flags |= 1559 PFR_TFLAG_ACTIVE; 1560 } 1561 1562 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1563 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1564 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1565 (rule->rt > PF_FASTROUTE)) && 1566 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1567 error = EINVAL; 1568 1569 if (error) { 1570 pf_rm_rule(NULL, rule); 1571 break; 1572 } 1573 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1574 rule->evaluations = rule->packets = rule->bytes = 0; 1575 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1576 rule, entries); 1577 break; 1578 } 1579 1580 case DIOCGETRULES: { 1581 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1582 struct pf_ruleset *ruleset; 1583 struct pf_rule *tail; 1584 int rs_num; 1585 1586 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1587 ruleset = pf_find_ruleset(pr->anchor); 1588 if (ruleset == NULL) { 1589 error = EINVAL; 1590 break; 1591 } 1592 rs_num = pf_get_ruleset_number(pr->rule.action); 1593 if (rs_num >= PF_RULESET_MAX) { 1594 error = EINVAL; 1595 break; 1596 } 1597 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1598 pf_rulequeue); 1599 if (tail) 1600 pr->nr = tail->nr + 1; 1601 else 1602 pr->nr = 0; 1603 pr->ticket = ruleset->rules[rs_num].active.ticket; 1604 break; 1605 } 1606 1607 case DIOCGETRULE: { 1608 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1609 struct pf_ruleset *ruleset; 1610 struct pf_rule *rule; 1611 int rs_num, i; 1612 1613 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1614 ruleset = pf_find_ruleset(pr->anchor); 1615 if (ruleset == NULL) { 1616 error = EINVAL; 1617 break; 1618 } 1619 rs_num = pf_get_ruleset_number(pr->rule.action); 1620 if (rs_num >= PF_RULESET_MAX) { 1621 error = EINVAL; 1622 break; 1623 } 1624 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1625 error = EBUSY; 1626 break; 1627 } 1628 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1629 while ((rule != NULL) && (rule->nr != pr->nr)) 1630 rule = TAILQ_NEXT(rule, entries); 1631 if (rule == NULL) { 1632 error = EBUSY; 1633 break; 1634 } 1635 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1636 if (pf_anchor_copyout(ruleset, rule, pr)) { 1637 error = EBUSY; 1638 break; 1639 } 1640 pfi_dynaddr_copyout(&pr->rule.src.addr); 1641 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1642 pf_tbladdr_copyout(&pr->rule.src.addr); 1643 pf_tbladdr_copyout(&pr->rule.dst.addr); 1644 pf_rtlabel_copyout(&pr->rule.src.addr); 1645 pf_rtlabel_copyout(&pr->rule.dst.addr); 1646 for (i = 0; i < PF_SKIP_COUNT; ++i) 1647 if (rule->skip[i].ptr == NULL) 1648 pr->rule.skip[i].nr = -1; 1649 else 1650 pr->rule.skip[i].nr = 1651 rule->skip[i].ptr->nr; 1652 break; 1653 } 1654 1655 case DIOCCHANGERULE: { 1656 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1657 struct pf_ruleset *ruleset; 1658 struct pf_rule *oldrule = NULL, *newrule = NULL; 1659 u_int32_t nr = 0; 1660 int rs_num; 1661 1662 if (!(pcr->action == PF_CHANGE_REMOVE || 1663 pcr->action == PF_CHANGE_GET_TICKET) && 1664 pcr->pool_ticket != ticket_pabuf) { 1665 error = EBUSY; 1666 break; 1667 } 1668 1669 if (pcr->action < PF_CHANGE_ADD_HEAD || 1670 pcr->action > PF_CHANGE_GET_TICKET) { 1671 error = EINVAL; 1672 break; 1673 } 1674 ruleset = pf_find_ruleset(pcr->anchor); 1675 if (ruleset == NULL) { 1676 error = EINVAL; 1677 break; 1678 } 1679 rs_num = pf_get_ruleset_number(pcr->rule.action); 1680 if (rs_num >= PF_RULESET_MAX) { 1681 error = EINVAL; 1682 break; 1683 } 1684 1685 if (pcr->action == PF_CHANGE_GET_TICKET) { 1686 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1687 break; 1688 } else { 1689 if (pcr->ticket != 1690 ruleset->rules[rs_num].active.ticket) { 1691 error = EINVAL; 1692 break; 1693 } 1694 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1695 error = EINVAL; 1696 break; 1697 } 1698 } 1699 1700 if (pcr->action != PF_CHANGE_REMOVE) { 1701 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1702 if (newrule == NULL) { 1703 error = ENOMEM; 1704 break; 1705 } 1706 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1707 TAILQ_INIT(&newrule->rpool.list); 1708 /* initialize refcounting */ 1709 newrule->states = 0; 1710 newrule->entries.tqe_prev = NULL; 1711#ifndef INET 1712 if (newrule->af == AF_INET) { 1713 pool_put(&pf_rule_pl, newrule); 1714 error = EAFNOSUPPORT; 1715 break; 1716 } 1717#endif /* INET */ 1718#ifndef INET6 1719 if (newrule->af == AF_INET6) { 1720 pool_put(&pf_rule_pl, newrule); 1721 error = EAFNOSUPPORT; 1722 break; 1723 } 1724#endif /* INET6 */ 1725 if (newrule->ifname[0]) { 1726 newrule->kif = pfi_attach_rule(newrule->ifname); 1727 if (newrule->kif == NULL) { 1728 pool_put(&pf_rule_pl, newrule); 1729 error = EINVAL; 1730 break; 1731 } 1732 } else 1733 newrule->kif = NULL; 1734 1735#ifdef ALTQ 1736 /* set queue IDs */ 1737 if (newrule->qname[0] != 0) { 1738 if ((newrule->qid = 1739 pf_qname2qid(newrule->qname)) == 0) 1740 error = EBUSY; 1741 else if (newrule->pqname[0] != 0) { 1742 if ((newrule->pqid = 1743 pf_qname2qid(newrule->pqname)) == 0) 1744 error = EBUSY; 1745 } else 1746 newrule->pqid = newrule->qid; 1747 } 1748#endif /* ALTQ */ 1749 if (newrule->tagname[0]) 1750 if ((newrule->tag = 1751 pf_tagname2tag(newrule->tagname)) == 0) 1752 error = EBUSY; 1753 if (newrule->match_tagname[0]) 1754 if ((newrule->match_tag = pf_tagname2tag( 1755 newrule->match_tagname)) == 0) 1756 error = EBUSY; 1757 if (newrule->rt && !newrule->direction) 1758 error = EINVAL; 1759 if (pf_rtlabel_add(&newrule->src.addr) || 1760 pf_rtlabel_add(&newrule->dst.addr)) 1761 error = EBUSY; 1762 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1763 error = EINVAL; 1764 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1765 error = EINVAL; 1766 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1767 error = EINVAL; 1768 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1769 error = EINVAL; 1770 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1771 error = EINVAL; 1772 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1773 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1774 error = EINVAL; 1775 1776 if (newrule->overload_tblname[0]) { 1777 if ((newrule->overload_tbl = pfr_attach_table( 1778 ruleset, newrule->overload_tblname)) == 1779 NULL) 1780 error = EINVAL; 1781 else 1782 newrule->overload_tbl->pfrkt_flags |= 1783 PFR_TFLAG_ACTIVE; 1784 } 1785 1786 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1787 if (((((newrule->action == PF_NAT) || 1788 (newrule->action == PF_RDR) || 1789 (newrule->action == PF_BINAT) || 1790 (newrule->rt > PF_FASTROUTE)) && 1791 !pcr->anchor[0])) && 1792 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1793 error = EINVAL; 1794 1795 if (error) { 1796 pf_rm_rule(NULL, newrule); 1797 break; 1798 } 1799 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1800 newrule->evaluations = newrule->packets = 0; 1801 newrule->bytes = 0; 1802 } 1803 pf_empty_pool(&pf_pabuf); 1804 1805 if (pcr->action == PF_CHANGE_ADD_HEAD) 1806 oldrule = TAILQ_FIRST( 1807 ruleset->rules[rs_num].active.ptr); 1808 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1809 oldrule = TAILQ_LAST( 1810 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1811 else { 1812 oldrule = TAILQ_FIRST( 1813 ruleset->rules[rs_num].active.ptr); 1814 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1815 oldrule = TAILQ_NEXT(oldrule, entries); 1816 if (oldrule == NULL) { 1817 if (newrule != NULL) 1818 pf_rm_rule(NULL, newrule); 1819 error = EINVAL; 1820 break; 1821 } 1822 } 1823 1824 if (pcr->action == PF_CHANGE_REMOVE) 1825 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1826 else { 1827 if (oldrule == NULL) 1828 TAILQ_INSERT_TAIL( 1829 ruleset->rules[rs_num].active.ptr, 1830 newrule, entries); 1831 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1832 pcr->action == PF_CHANGE_ADD_BEFORE) 1833 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1834 else 1835 TAILQ_INSERT_AFTER( 1836 ruleset->rules[rs_num].active.ptr, 1837 oldrule, newrule, entries); 1838 } 1839 1840 nr = 0; 1841 TAILQ_FOREACH(oldrule, 1842 ruleset->rules[rs_num].active.ptr, entries) 1843 oldrule->nr = nr++; 1844 1845 ruleset->rules[rs_num].active.ticket++; 1846 1847 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1848 pf_remove_if_empty_ruleset(ruleset); 1849 1850 break; 1851 } 1852 1853 case DIOCCLRSTATES: { 1854 struct pf_state *state; 1855 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1856 int killed = 0; 1857 1858 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1859 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1860 state->u.s.kif->pfik_name)) { 1861 state->timeout = PFTM_PURGE; 1862#if NPFSYNC 1863 /* don't send out individual delete messages */ 1864 state->sync_flags = PFSTATE_NOSYNC; 1865#endif 1866 killed++; 1867 } 1868 } 1869 pf_purge_expired_states(); 1870 pf_status.states = 0; 1871 psk->psk_af = killed; 1872#if NPFSYNC 1873 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1874#endif 1875 break; 1876 } 1877 1878 case DIOCKILLSTATES: { 1879 struct pf_state *state; 1880 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1881 int killed = 0; 1882 1883 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1884 if ((!psk->psk_af || state->af == psk->psk_af) 1885 && (!psk->psk_proto || psk->psk_proto == 1886 state->proto) && 1887 PF_MATCHA(psk->psk_src.neg, 1888 &psk->psk_src.addr.v.a.addr, 1889 &psk->psk_src.addr.v.a.mask, 1890 &state->lan.addr, state->af) && 1891 PF_MATCHA(psk->psk_dst.neg, 1892 &psk->psk_dst.addr.v.a.addr, 1893 &psk->psk_dst.addr.v.a.mask, 1894 &state->ext.addr, state->af) && 1895 (psk->psk_src.port_op == 0 || 1896 pf_match_port(psk->psk_src.port_op, 1897 psk->psk_src.port[0], psk->psk_src.port[1], 1898 state->lan.port)) && 1899 (psk->psk_dst.port_op == 0 || 1900 pf_match_port(psk->psk_dst.port_op, 1901 psk->psk_dst.port[0], psk->psk_dst.port[1], 1902 state->ext.port)) && 1903 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1904 state->u.s.kif->pfik_name))) { 1905 state->timeout = PFTM_PURGE; 1906 killed++; 1907 } 1908 } 1909 pf_purge_expired_states(); 1910 psk->psk_af = killed; 1911 break; 1912 } 1913 1914 case DIOCADDSTATE: { 1915 struct pfioc_state *ps = (struct pfioc_state *)addr; 1916 struct pf_state *state; 1917 struct pfi_kif *kif; 1918 1919 if (ps->state.timeout >= PFTM_MAX && 1920 ps->state.timeout != PFTM_UNTIL_PACKET) { 1921 error = EINVAL; 1922 break; 1923 } 1924 state = pool_get(&pf_state_pl, PR_NOWAIT); 1925 if (state == NULL) { 1926 error = ENOMEM; 1927 break; 1928 } 1929 kif = pfi_lookup_create(ps->state.u.ifname); 1930 if (kif == NULL) { 1931 pool_put(&pf_state_pl, state); 1932 error = ENOENT; 1933 break; 1934 } 1935 bcopy(&ps->state, state, sizeof(struct pf_state)); 1936 bzero(&state->u, sizeof(state->u)); 1937 state->rule.ptr = &pf_default_rule; 1938 state->nat_rule.ptr = NULL; 1939 state->anchor.ptr = NULL; 1940 state->rt_kif = NULL; 1941 state->creation = time_second; 1942 state->pfsync_time = 0; 1943 state->packets[0] = state->packets[1] = 0; 1944 state->bytes[0] = state->bytes[1] = 0; 1945 1946 if (pf_insert_state(kif, state)) { 1947 pfi_maybe_destroy(kif); 1948 pool_put(&pf_state_pl, state); 1949 error = ENOMEM; 1950 } 1951 break; 1952 } 1953 1954 case DIOCGETSTATE: { 1955 struct pfioc_state *ps = (struct pfioc_state *)addr; 1956 struct pf_state *state; 1957 u_int32_t nr; 1958 1959 nr = 0; 1960 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1961 if (nr >= ps->nr) 1962 break; 1963 nr++; 1964 } 1965 if (state == NULL) { 1966 error = EBUSY; 1967 break; 1968 } 1969 bcopy(state, &ps->state, sizeof(struct pf_state)); 1970 ps->state.rule.nr = state->rule.ptr->nr; 1971 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ? 1972 -1 : state->nat_rule.ptr->nr; 1973 ps->state.anchor.nr = (state->anchor.ptr == NULL) ? 1974 -1 : state->anchor.ptr->nr; 1975 ps->state.expire = pf_state_expires(state); 1976 if (ps->state.expire > time_second) 1977 ps->state.expire -= time_second; 1978 else 1979 ps->state.expire = 0; 1980 break; 1981 } 1982 1983 case DIOCGETSTATES: { 1984 struct pfioc_states *ps = (struct pfioc_states *)addr; 1985 struct pf_state *state; 1986 struct pf_state *p, pstore; 1987 struct pfi_kif *kif; 1988 u_int32_t nr = 0; 1989 int space = ps->ps_len; 1990 1991 if (space == 0) { 1992 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 1993 nr += kif->pfik_states; 1994 ps->ps_len = sizeof(struct pf_state) * nr; 1995 break; 1996 } 1997 1998 p = ps->ps_states; 1999 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 2000 RB_FOREACH(state, pf_state_tree_ext_gwy, 2001 &kif->pfik_ext_gwy) { 2002 int secs = time_second; 2003 2004 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 2005 break; 2006 2007 bcopy(state, &pstore, sizeof(pstore)); 2008 strlcpy(pstore.u.ifname, kif->pfik_name, 2009 sizeof(pstore.u.ifname)); 2010 pstore.rule.nr = state->rule.ptr->nr; 2011 pstore.nat_rule.nr = (state->nat_rule.ptr == 2012 NULL) ? -1 : state->nat_rule.ptr->nr; 2013 pstore.anchor.nr = (state->anchor.ptr == 2014 NULL) ? -1 : state->anchor.ptr->nr; 2015 pstore.creation = secs - pstore.creation; 2016 pstore.expire = pf_state_expires(state); 2017 if (pstore.expire > secs) 2018 pstore.expire -= secs; 2019 else 2020 pstore.expire = 0; 2021#ifdef __FreeBSD__ 2022 PF_COPYOUT(&pstore, p, sizeof(*p), error); 2023#else 2024 error = copyout(&pstore, p, sizeof(*p)); 2025#endif 2026 if (error) 2027 goto fail; 2028 p++; 2029 nr++; 2030 } 2031 ps->ps_len = sizeof(struct pf_state) * nr; 2032 break; 2033 } 2034 2035 case DIOCGETSTATUS: { 2036 struct pf_status *s = (struct pf_status *)addr; 2037 bcopy(&pf_status, s, sizeof(struct pf_status)); 2038 pfi_fill_oldstatus(s); 2039 break; 2040 } 2041 2042 case DIOCSETSTATUSIF: { 2043 struct pfioc_if *pi = (struct pfioc_if *)addr; 2044 2045 if (pi->ifname[0] == 0) { 2046 bzero(pf_status.ifname, IFNAMSIZ); 2047 break; 2048 } 2049 if (ifunit(pi->ifname) == NULL) { 2050 error = EINVAL; 2051 break; 2052 } 2053 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2054 break; 2055 } 2056 2057 case DIOCCLRSTATUS: { 2058 bzero(pf_status.counters, sizeof(pf_status.counters)); 2059 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2060 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2061 if (*pf_status.ifname) 2062 pfi_clr_istats(pf_status.ifname, NULL, 2063 PFI_FLAG_INSTANCE); 2064 break; 2065 } 2066 2067 case DIOCNATLOOK: { 2068 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2069 struct pf_state *state; 2070 struct pf_state key; 2071 int m = 0, direction = pnl->direction; 2072 2073 key.af = pnl->af; 2074 key.proto = pnl->proto; 2075 2076 if (!pnl->proto || 2077 PF_AZERO(&pnl->saddr, pnl->af) || 2078 PF_AZERO(&pnl->daddr, pnl->af) || 2079 !pnl->dport || !pnl->sport) 2080 error = EINVAL; 2081 else { 2082 /* 2083 * userland gives us source and dest of connection, 2084 * reverse the lookup so we ask for what happens with 2085 * the return traffic, enabling us to find it in the 2086 * state tree. 2087 */ 2088 if (direction == PF_IN) { 2089 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2090 key.ext.port = pnl->dport; 2091 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2092 key.gwy.port = pnl->sport; 2093 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2094 } else { 2095 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2096 key.lan.port = pnl->dport; 2097 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2098 key.ext.port = pnl->sport; 2099 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2100 } 2101 if (m > 1) 2102 error = E2BIG; /* more than one state */ 2103 else if (state != NULL) { 2104 if (direction == PF_IN) { 2105 PF_ACPY(&pnl->rsaddr, &state->lan.addr, 2106 state->af); 2107 pnl->rsport = state->lan.port; 2108 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2109 pnl->af); 2110 pnl->rdport = pnl->dport; 2111 } else { 2112 PF_ACPY(&pnl->rdaddr, &state->gwy.addr, 2113 state->af); 2114 pnl->rdport = state->gwy.port; 2115 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2116 pnl->af); 2117 pnl->rsport = pnl->sport; 2118 } 2119 } else 2120 error = ENOENT; 2121 } 2122 break; 2123 } 2124 2125 case DIOCSETTIMEOUT: { 2126 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2127 int old; 2128 2129 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2130 pt->seconds < 0) { 2131 error = EINVAL; 2132 goto fail; 2133 } 2134 old = pf_default_rule.timeout[pt->timeout]; 2135 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2136 pt->seconds = old; 2137 break; 2138 } 2139 2140 case DIOCGETTIMEOUT: { 2141 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2142 2143 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2144 error = EINVAL; 2145 goto fail; 2146 } 2147 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2148 break; 2149 } 2150 2151 case DIOCGETLIMIT: { 2152 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2153 2154 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2155 error = EINVAL; 2156 goto fail; 2157 } 2158 pl->limit = pf_pool_limits[pl->index].limit; 2159 break; 2160 } 2161 2162 case DIOCSETLIMIT: { 2163 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2164 int old_limit; 2165 2166 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2167 pf_pool_limits[pl->index].pp == NULL) { 2168 error = EINVAL; 2169 goto fail; 2170 } 2171#ifdef __FreeBSD__ 2172 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit); 2173#else 2174 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2175 pl->limit, NULL, 0) != 0) { 2176 error = EBUSY; 2177 goto fail; 2178 } 2179#endif 2180 old_limit = pf_pool_limits[pl->index].limit; 2181 pf_pool_limits[pl->index].limit = pl->limit; 2182 pl->limit = old_limit; 2183 break; 2184 } 2185 2186 case DIOCSETDEBUG: { 2187 u_int32_t *level = (u_int32_t *)addr; 2188 2189 pf_status.debug = *level; 2190 break; 2191 } 2192 2193 case DIOCCLRRULECTRS: { 2194 struct pf_ruleset *ruleset = &pf_main_ruleset; 2195 struct pf_rule *rule; 2196 2197 TAILQ_FOREACH(rule, 2198 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) 2199 rule->evaluations = rule->packets = 2200 rule->bytes = 0; 2201 break; 2202 } 2203 2204#ifdef __FreeBSD__ 2205 case DIOCGIFSPEED: { 2206 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2207 struct pf_ifspeed ps; 2208 struct ifnet *ifp; 2209 2210 if (psp->ifname[0] != 0) { 2211 /* Can we completely trust user-land? */ 2212 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2213 ifp = ifunit(ps.ifname); 2214 if (ifp != NULL) 2215 psp->baudrate = ifp->if_baudrate; 2216 else 2217 error = EINVAL; 2218 } else 2219 error = EINVAL; 2220 break; 2221 } 2222#endif /* __FreeBSD__ */ 2223 2224#ifdef ALTQ 2225 case DIOCSTARTALTQ: { 2226 struct pf_altq *altq; 2227 2228 /* enable all altq interfaces on active list */ 2229 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2230 if (altq->qname[0] == 0) { 2231 error = pf_enable_altq(altq); 2232 if (error != 0) 2233 break; 2234 } 2235 } 2236 if (error == 0) 2237 pf_altq_running = 1; 2238 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2239 break; 2240 } 2241 2242 case DIOCSTOPALTQ: { 2243 struct pf_altq *altq; 2244 2245 /* disable all altq interfaces on active list */ 2246 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2247 if (altq->qname[0] == 0) { 2248 error = pf_disable_altq(altq); 2249 if (error != 0) 2250 break; 2251 } 2252 } 2253 if (error == 0) 2254 pf_altq_running = 0; 2255 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2256 break; 2257 } 2258 2259 case DIOCADDALTQ: { 2260 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2261 struct pf_altq *altq, *a; 2262 2263 if (pa->ticket != ticket_altqs_inactive) { 2264 error = EBUSY; 2265 break; 2266 } 2267 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2268 if (altq == NULL) { 2269 error = ENOMEM; 2270 break; 2271 } 2272 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2273 2274 /* 2275 * if this is for a queue, find the discipline and 2276 * copy the necessary fields 2277 */ 2278 if (altq->qname[0] != 0) { 2279 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2280 error = EBUSY; 2281 pool_put(&pf_altq_pl, altq); 2282 break; 2283 } 2284 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2285 if (strncmp(a->ifname, altq->ifname, 2286 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2287 altq->altq_disc = a->altq_disc; 2288 break; 2289 } 2290 } 2291 } 2292 2293#ifdef __FreeBSD__ 2294 PF_UNLOCK(); 2295#endif 2296 error = altq_add(altq); 2297#ifdef __FreeBSD__ 2298 PF_LOCK(); 2299#endif 2300 if (error) { 2301 pool_put(&pf_altq_pl, altq); 2302 break; 2303 } 2304 2305 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2306 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2307 break; 2308 } 2309 2310 case DIOCGETALTQS: { 2311 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2312 struct pf_altq *altq; 2313 2314 pa->nr = 0; 2315 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2316 pa->nr++; 2317 pa->ticket = ticket_altqs_active; 2318 break; 2319 } 2320 2321 case DIOCGETALTQ: { 2322 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2323 struct pf_altq *altq; 2324 u_int32_t nr; 2325 2326 if (pa->ticket != ticket_altqs_active) { 2327 error = EBUSY; 2328 break; 2329 } 2330 nr = 0; 2331 altq = TAILQ_FIRST(pf_altqs_active); 2332 while ((altq != NULL) && (nr < pa->nr)) { 2333 altq = TAILQ_NEXT(altq, entries); 2334 nr++; 2335 } 2336 if (altq == NULL) { 2337 error = EBUSY; 2338 break; 2339 } 2340 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2341 break; 2342 } 2343 2344 case DIOCCHANGEALTQ: 2345 /* CHANGEALTQ not supported yet! */ 2346 error = ENODEV; 2347 break; 2348 2349 case DIOCGETQSTATS: { 2350 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2351 struct pf_altq *altq; 2352 u_int32_t nr; 2353 int nbytes; 2354 2355 if (pq->ticket != ticket_altqs_active) { 2356 error = EBUSY; 2357 break; 2358 } 2359 nbytes = pq->nbytes; 2360 nr = 0; 2361 altq = TAILQ_FIRST(pf_altqs_active); 2362 while ((altq != NULL) && (nr < pq->nr)) { 2363 altq = TAILQ_NEXT(altq, entries); 2364 nr++; 2365 } 2366 if (altq == NULL) { 2367 error = EBUSY; 2368 break; 2369 } 2370#ifdef __FreeBSD__ 2371 PF_UNLOCK(); 2372#endif 2373 error = altq_getqstats(altq, pq->buf, &nbytes); 2374#ifdef __FreeBSD__ 2375 PF_LOCK(); 2376#endif 2377 if (error == 0) { 2378 pq->scheduler = altq->scheduler; 2379 pq->nbytes = nbytes; 2380 } 2381 break; 2382 } 2383#endif /* ALTQ */ 2384 2385 case DIOCBEGINADDRS: { 2386 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2387 2388 pf_empty_pool(&pf_pabuf); 2389 pp->ticket = ++ticket_pabuf; 2390 break; 2391 } 2392 2393 case DIOCADDADDR: { 2394 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2395 2396 if (pp->ticket != ticket_pabuf) { 2397 error = EBUSY; 2398 break; 2399 } 2400#ifndef INET 2401 if (pp->af == AF_INET) { 2402 error = EAFNOSUPPORT; 2403 break; 2404 } 2405#endif /* INET */ 2406#ifndef INET6 2407 if (pp->af == AF_INET6) { 2408 error = EAFNOSUPPORT; 2409 break; 2410 } 2411#endif /* INET6 */ 2412 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2413 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2414 pp->addr.addr.type != PF_ADDR_TABLE) { 2415 error = EINVAL; 2416 break; 2417 } 2418 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2419 if (pa == NULL) { 2420 error = ENOMEM; 2421 break; 2422 } 2423 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2424 if (pa->ifname[0]) { 2425 pa->kif = pfi_attach_rule(pa->ifname); 2426 if (pa->kif == NULL) { 2427 pool_put(&pf_pooladdr_pl, pa); 2428 error = EINVAL; 2429 break; 2430 } 2431 } 2432 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2433 pfi_dynaddr_remove(&pa->addr); 2434 pfi_detach_rule(pa->kif); 2435 pool_put(&pf_pooladdr_pl, pa); 2436 error = EINVAL; 2437 break; 2438 } 2439 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2440 break; 2441 } 2442 2443 case DIOCGETADDRS: { 2444 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2445 2446 pp->nr = 0; 2447 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2448 pp->r_num, 0, 1, 0); 2449 if (pool == NULL) { 2450 error = EBUSY; 2451 break; 2452 } 2453 TAILQ_FOREACH(pa, &pool->list, entries) 2454 pp->nr++; 2455 break; 2456 } 2457 2458 case DIOCGETADDR: { 2459 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2460 u_int32_t nr = 0; 2461 2462 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2463 pp->r_num, 0, 1, 1); 2464 if (pool == NULL) { 2465 error = EBUSY; 2466 break; 2467 } 2468 pa = TAILQ_FIRST(&pool->list); 2469 while ((pa != NULL) && (nr < pp->nr)) { 2470 pa = TAILQ_NEXT(pa, entries); 2471 nr++; 2472 } 2473 if (pa == NULL) { 2474 error = EBUSY; 2475 break; 2476 } 2477 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2478 pfi_dynaddr_copyout(&pp->addr.addr); 2479 pf_tbladdr_copyout(&pp->addr.addr); 2480 pf_rtlabel_copyout(&pp->addr.addr); 2481 break; 2482 } 2483 2484 case DIOCCHANGEADDR: { 2485 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2486 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2487 struct pf_ruleset *ruleset; 2488 2489 if (pca->action < PF_CHANGE_ADD_HEAD || 2490 pca->action > PF_CHANGE_REMOVE) { 2491 error = EINVAL; 2492 break; 2493 } 2494 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2495 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2496 pca->addr.addr.type != PF_ADDR_TABLE) { 2497 error = EINVAL; 2498 break; 2499 } 2500 2501 ruleset = pf_find_ruleset(pca->anchor); 2502 if (ruleset == NULL) { 2503 error = EBUSY; 2504 break; 2505 } 2506 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2507 pca->r_num, pca->r_last, 1, 1); 2508 if (pool == NULL) { 2509 error = EBUSY; 2510 break; 2511 } 2512 if (pca->action != PF_CHANGE_REMOVE) { 2513 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2514 if (newpa == NULL) { 2515 error = ENOMEM; 2516 break; 2517 } 2518 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2519#ifndef INET 2520 if (pca->af == AF_INET) { 2521 pool_put(&pf_pooladdr_pl, newpa); 2522 error = EAFNOSUPPORT; 2523 break; 2524 } 2525#endif /* INET */ 2526#ifndef INET6 2527 if (pca->af == AF_INET6) { 2528 pool_put(&pf_pooladdr_pl, newpa); 2529 error = EAFNOSUPPORT; 2530 break; 2531 } 2532#endif /* INET6 */ 2533 if (newpa->ifname[0]) { 2534 newpa->kif = pfi_attach_rule(newpa->ifname); 2535 if (newpa->kif == NULL) { 2536 pool_put(&pf_pooladdr_pl, newpa); 2537 error = EINVAL; 2538 break; 2539 } 2540 } else 2541 newpa->kif = NULL; 2542 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2543 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2544 pfi_dynaddr_remove(&newpa->addr); 2545 pfi_detach_rule(newpa->kif); 2546 pool_put(&pf_pooladdr_pl, newpa); 2547 error = EINVAL; 2548 break; 2549 } 2550 } 2551 2552 if (pca->action == PF_CHANGE_ADD_HEAD) 2553 oldpa = TAILQ_FIRST(&pool->list); 2554 else if (pca->action == PF_CHANGE_ADD_TAIL) 2555 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2556 else { 2557 int i = 0; 2558 2559 oldpa = TAILQ_FIRST(&pool->list); 2560 while ((oldpa != NULL) && (i < pca->nr)) { 2561 oldpa = TAILQ_NEXT(oldpa, entries); 2562 i++; 2563 } 2564 if (oldpa == NULL) { 2565 error = EINVAL; 2566 break; 2567 } 2568 } 2569 2570 if (pca->action == PF_CHANGE_REMOVE) { 2571 TAILQ_REMOVE(&pool->list, oldpa, entries); 2572 pfi_dynaddr_remove(&oldpa->addr); 2573 pf_tbladdr_remove(&oldpa->addr); 2574 pfi_detach_rule(oldpa->kif); 2575 pool_put(&pf_pooladdr_pl, oldpa); 2576 } else { 2577 if (oldpa == NULL) 2578 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2579 else if (pca->action == PF_CHANGE_ADD_HEAD || 2580 pca->action == PF_CHANGE_ADD_BEFORE) 2581 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2582 else 2583 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2584 newpa, entries); 2585 } 2586 2587 pool->cur = TAILQ_FIRST(&pool->list); 2588 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2589 pca->af); 2590 break; 2591 } 2592 2593 case DIOCGETRULESETS: { 2594 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2595 struct pf_ruleset *ruleset; 2596 struct pf_anchor *anchor; 2597 2598 pr->path[sizeof(pr->path) - 1] = 0; 2599 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2600 error = EINVAL; 2601 break; 2602 } 2603 pr->nr = 0; 2604 if (ruleset->anchor == NULL) { 2605 /* XXX kludge for pf_main_ruleset */ 2606 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2607 if (anchor->parent == NULL) 2608 pr->nr++; 2609 } else { 2610 RB_FOREACH(anchor, pf_anchor_node, 2611 &ruleset->anchor->children) 2612 pr->nr++; 2613 } 2614 break; 2615 } 2616 2617 case DIOCGETRULESET: { 2618 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2619 struct pf_ruleset *ruleset; 2620 struct pf_anchor *anchor; 2621 u_int32_t nr = 0; 2622 2623 pr->path[sizeof(pr->path) - 1] = 0; 2624 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2625 error = EINVAL; 2626 break; 2627 } 2628 pr->name[0] = 0; 2629 if (ruleset->anchor == NULL) { 2630 /* XXX kludge for pf_main_ruleset */ 2631 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2632 if (anchor->parent == NULL && nr++ == pr->nr) { 2633 strlcpy(pr->name, anchor->name, 2634 sizeof(pr->name)); 2635 break; 2636 } 2637 } else { 2638 RB_FOREACH(anchor, pf_anchor_node, 2639 &ruleset->anchor->children) 2640 if (nr++ == pr->nr) { 2641 strlcpy(pr->name, anchor->name, 2642 sizeof(pr->name)); 2643 break; 2644 } 2645 } 2646 if (!pr->name[0]) 2647 error = EBUSY; 2648 break; 2649 } 2650 2651 case DIOCRCLRTABLES: { 2652 struct pfioc_table *io = (struct pfioc_table *)addr; 2653 2654 if (io->pfrio_esize != 0) { 2655 error = ENODEV; 2656 break; 2657 } 2658 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2659 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2660 break; 2661 } 2662 2663 case DIOCRADDTABLES: { 2664 struct pfioc_table *io = (struct pfioc_table *)addr; 2665 2666 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2667 error = ENODEV; 2668 break; 2669 } 2670 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2671 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2672 break; 2673 } 2674 2675 case DIOCRDELTABLES: { 2676 struct pfioc_table *io = (struct pfioc_table *)addr; 2677 2678 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2679 error = ENODEV; 2680 break; 2681 } 2682 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2683 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2684 break; 2685 } 2686 2687 case DIOCRGETTABLES: { 2688 struct pfioc_table *io = (struct pfioc_table *)addr; 2689 2690 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2691 error = ENODEV; 2692 break; 2693 } 2694 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2695 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2696 break; 2697 } 2698 2699 case DIOCRGETTSTATS: { 2700 struct pfioc_table *io = (struct pfioc_table *)addr; 2701 2702 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2703 error = ENODEV; 2704 break; 2705 } 2706 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2707 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2708 break; 2709 } 2710 2711 case DIOCRCLRTSTATS: { 2712 struct pfioc_table *io = (struct pfioc_table *)addr; 2713 2714 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2715 error = ENODEV; 2716 break; 2717 } 2718 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2719 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2720 break; 2721 } 2722 2723 case DIOCRSETTFLAGS: { 2724 struct pfioc_table *io = (struct pfioc_table *)addr; 2725 2726 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2727 error = ENODEV; 2728 break; 2729 } 2730 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2731 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2732 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2733 break; 2734 } 2735 2736 case DIOCRCLRADDRS: { 2737 struct pfioc_table *io = (struct pfioc_table *)addr; 2738 2739 if (io->pfrio_esize != 0) { 2740 error = ENODEV; 2741 break; 2742 } 2743 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2744 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2745 break; 2746 } 2747 2748 case DIOCRADDADDRS: { 2749 struct pfioc_table *io = (struct pfioc_table *)addr; 2750 2751 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2752 error = ENODEV; 2753 break; 2754 } 2755 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2756 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2757 PFR_FLAG_USERIOCTL); 2758 break; 2759 } 2760 2761 case DIOCRDELADDRS: { 2762 struct pfioc_table *io = (struct pfioc_table *)addr; 2763 2764 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2765 error = ENODEV; 2766 break; 2767 } 2768 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2769 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2770 PFR_FLAG_USERIOCTL); 2771 break; 2772 } 2773 2774 case DIOCRSETADDRS: { 2775 struct pfioc_table *io = (struct pfioc_table *)addr; 2776 2777 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2778 error = ENODEV; 2779 break; 2780 } 2781 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2782 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2783 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2784 PFR_FLAG_USERIOCTL); 2785 break; 2786 } 2787 2788 case DIOCRGETADDRS: { 2789 struct pfioc_table *io = (struct pfioc_table *)addr; 2790 2791 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2792 error = ENODEV; 2793 break; 2794 } 2795 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2796 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2797 break; 2798 } 2799 2800 case DIOCRGETASTATS: { 2801 struct pfioc_table *io = (struct pfioc_table *)addr; 2802 2803 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2804 error = ENODEV; 2805 break; 2806 } 2807 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2808 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2809 break; 2810 } 2811 2812 case DIOCRCLRASTATS: { 2813 struct pfioc_table *io = (struct pfioc_table *)addr; 2814 2815 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2816 error = ENODEV; 2817 break; 2818 } 2819 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2820 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2821 PFR_FLAG_USERIOCTL); 2822 break; 2823 } 2824 2825 case DIOCRTSTADDRS: { 2826 struct pfioc_table *io = (struct pfioc_table *)addr; 2827 2828 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2829 error = ENODEV; 2830 break; 2831 } 2832 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2833 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2834 PFR_FLAG_USERIOCTL); 2835 break; 2836 } 2837 2838 case DIOCRINADEFINE: { 2839 struct pfioc_table *io = (struct pfioc_table *)addr; 2840 2841 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2842 error = ENODEV; 2843 break; 2844 } 2845 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2846 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2847 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2848 break; 2849 } 2850 2851 case DIOCOSFPADD: { 2852 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2853 error = pf_osfp_add(io); 2854 break; 2855 } 2856 2857 case DIOCOSFPGET: { 2858 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2859 error = pf_osfp_get(io); 2860 break; 2861 } 2862 2863 case DIOCXBEGIN: { 2864 struct pfioc_trans *io = (struct pfioc_trans *) 2865 addr; 2866 static struct pfioc_trans_e ioe; 2867 static struct pfr_table table; 2868 int i; 2869 2870 if (io->esize != sizeof(ioe)) { 2871 error = ENODEV; 2872 goto fail; 2873 } 2874 for (i = 0; i < io->size; i++) { 2875#ifdef __FreeBSD__ 2876 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2877 if (error) { 2878#else 2879 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2880#endif 2881 error = EFAULT; 2882 goto fail; 2883 } 2884 switch (ioe.rs_num) { 2885#ifdef ALTQ 2886 case PF_RULESET_ALTQ: 2887 if (ioe.anchor[0]) { 2888 error = EINVAL; 2889 goto fail; 2890 } 2891 if ((error = pf_begin_altq(&ioe.ticket))) 2892 goto fail; 2893 break; 2894#endif /* ALTQ */ 2895 case PF_RULESET_TABLE: 2896 bzero(&table, sizeof(table)); 2897 strlcpy(table.pfrt_anchor, ioe.anchor, 2898 sizeof(table.pfrt_anchor)); 2899 if ((error = pfr_ina_begin(&table, 2900 &ioe.ticket, NULL, 0))) 2901 goto fail; 2902 break; 2903 default: 2904 if ((error = pf_begin_rules(&ioe.ticket, 2905 ioe.rs_num, ioe.anchor))) 2906 goto fail; 2907 break; 2908 } 2909#ifdef __FreeBSD__ 2910 PF_COPYOUT(&ioe, io->array+i, sizeof(io->array[i]), 2911 error); 2912 if (error) { 2913#else 2914 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) { 2915#endif 2916 error = EFAULT; 2917 goto fail; 2918 } 2919 } 2920 break; 2921 } 2922 2923 case DIOCXROLLBACK: { 2924 struct pfioc_trans *io = (struct pfioc_trans *) 2925 addr; 2926 static struct pfioc_trans_e ioe; 2927 static struct pfr_table table; 2928 int i; 2929 2930 if (io->esize != sizeof(ioe)) { 2931 error = ENODEV; 2932 goto fail; 2933 } 2934 for (i = 0; i < io->size; i++) { 2935#ifdef __FreeBSD__ 2936 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2937 if (error) { 2938#else 2939 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2940#endif 2941 error = EFAULT; 2942 goto fail; 2943 } 2944 switch (ioe.rs_num) { 2945#ifdef ALTQ 2946 case PF_RULESET_ALTQ: 2947 if (ioe.anchor[0]) { 2948 error = EINVAL; 2949 goto fail; 2950 } 2951 if ((error = pf_rollback_altq(ioe.ticket))) 2952 goto fail; /* really bad */ 2953 break; 2954#endif /* ALTQ */ 2955 case PF_RULESET_TABLE: 2956 bzero(&table, sizeof(table)); 2957 strlcpy(table.pfrt_anchor, ioe.anchor, 2958 sizeof(table.pfrt_anchor)); 2959 if ((error = pfr_ina_rollback(&table, 2960 ioe.ticket, NULL, 0))) 2961 goto fail; /* really bad */ 2962 break; 2963 default: 2964 if ((error = pf_rollback_rules(ioe.ticket, 2965 ioe.rs_num, ioe.anchor))) 2966 goto fail; /* really bad */ 2967 break; 2968 } 2969 } 2970 break; 2971 } 2972 2973 case DIOCXCOMMIT: { 2974 struct pfioc_trans *io = (struct pfioc_trans *) 2975 addr; 2976 static struct pfioc_trans_e ioe; 2977 static struct pfr_table table; 2978 struct pf_ruleset *rs; 2979 int i; 2980 2981 if (io->esize != sizeof(ioe)) { 2982 error = ENODEV; 2983 goto fail; 2984 } 2985 /* first makes sure everything will succeed */ 2986 for (i = 0; i < io->size; i++) { 2987#ifdef __FreeBSD__ 2988 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 2989 if (error) { 2990#else 2991 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2992#endif 2993 error = EFAULT; 2994 goto fail; 2995 } 2996 switch (ioe.rs_num) { 2997#ifdef ALTQ 2998 case PF_RULESET_ALTQ: 2999 if (ioe.anchor[0]) { 3000 error = EINVAL; 3001 goto fail; 3002 } 3003 if (!altqs_inactive_open || ioe.ticket != 3004 ticket_altqs_inactive) { 3005 error = EBUSY; 3006 goto fail; 3007 } 3008 break; 3009#endif /* ALTQ */ 3010 case PF_RULESET_TABLE: 3011 rs = pf_find_ruleset(ioe.anchor); 3012 if (rs == NULL || !rs->topen || ioe.ticket != 3013 rs->tticket) { 3014 error = EBUSY; 3015 goto fail; 3016 } 3017 break; 3018 default: 3019 if (ioe.rs_num < 0 || ioe.rs_num >= 3020 PF_RULESET_MAX) { 3021 error = EINVAL; 3022 goto fail; 3023 } 3024 rs = pf_find_ruleset(ioe.anchor); 3025 if (rs == NULL || 3026 !rs->rules[ioe.rs_num].inactive.open || 3027 rs->rules[ioe.rs_num].inactive.ticket != 3028 ioe.ticket) { 3029 error = EBUSY; 3030 goto fail; 3031 } 3032 break; 3033 } 3034 } 3035 /* now do the commit - no errors should happen here */ 3036 for (i = 0; i < io->size; i++) { 3037#ifdef __FreeBSD__ 3038 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error); 3039 if (error) { 3040#else 3041 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 3042#endif 3043 error = EFAULT; 3044 goto fail; 3045 } 3046 switch (ioe.rs_num) { 3047#ifdef ALTQ 3048 case PF_RULESET_ALTQ: 3049 if ((error = pf_commit_altq(ioe.ticket))) 3050 goto fail; /* really bad */ 3051 break; 3052#endif /* ALTQ */ 3053 case PF_RULESET_TABLE: 3054 bzero(&table, sizeof(table)); 3055 strlcpy(table.pfrt_anchor, ioe.anchor, 3056 sizeof(table.pfrt_anchor)); 3057 if ((error = pfr_ina_commit(&table, ioe.ticket, 3058 NULL, NULL, 0))) 3059 goto fail; /* really bad */ 3060 break; 3061 default: 3062 if ((error = pf_commit_rules(ioe.ticket, 3063 ioe.rs_num, ioe.anchor))) 3064 goto fail; /* really bad */ 3065 break; 3066 } 3067 } 3068 break; 3069 } 3070 3071 case DIOCGETSRCNODES: { 3072 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3073 struct pf_src_node *n; 3074 struct pf_src_node *p, pstore; 3075 u_int32_t nr = 0; 3076 int space = psn->psn_len; 3077 3078 if (space == 0) { 3079 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3080 nr++; 3081 psn->psn_len = sizeof(struct pf_src_node) * nr; 3082 break; 3083 } 3084 3085 p = psn->psn_src_nodes; 3086 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3087 int secs = time_second, diff; 3088 3089 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3090 break; 3091 3092 bcopy(n, &pstore, sizeof(pstore)); 3093 if (n->rule.ptr != NULL) 3094 pstore.rule.nr = n->rule.ptr->nr; 3095 pstore.creation = secs - pstore.creation; 3096 if (pstore.expire > secs) 3097 pstore.expire -= secs; 3098 else 3099 pstore.expire = 0; 3100 3101 /* adjust the connection rate estimate */ 3102 diff = secs - n->conn_rate.last; 3103 if (diff >= n->conn_rate.seconds) 3104 pstore.conn_rate.count = 0; 3105 else 3106 pstore.conn_rate.count -= 3107 n->conn_rate.count * diff / 3108 n->conn_rate.seconds; 3109 3110#ifdef __FreeBSD__ 3111 PF_COPYOUT(&pstore, p, sizeof(*p), error); 3112#else 3113 error = copyout(&pstore, p, sizeof(*p)); 3114#endif 3115 if (error) 3116 goto fail; 3117 p++; 3118 nr++; 3119 } 3120 psn->psn_len = sizeof(struct pf_src_node) * nr; 3121 break; 3122 } 3123 3124 case DIOCCLRSRCNODES: { 3125 struct pf_src_node *n; 3126 struct pf_state *state; 3127 3128 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3129 state->src_node = NULL; 3130 state->nat_src_node = NULL; 3131 } 3132 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3133 n->expire = 1; 3134 n->states = 0; 3135 } 3136 pf_purge_expired_src_nodes(); 3137 pf_status.src_nodes = 0; 3138 break; 3139 } 3140 3141 case DIOCSETHOSTID: { 3142 u_int32_t *hostid = (u_int32_t *)addr; 3143 3144 if (*hostid == 0) 3145 pf_status.hostid = arc4random(); 3146 else 3147 pf_status.hostid = *hostid; 3148 break; 3149 } 3150 3151 case DIOCOSFPFLUSH: 3152 pf_osfp_flush(); 3153 break; 3154 3155 case DIOCIGETIFACES: { 3156 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3157 3158 if (io->pfiio_esize != sizeof(struct pfi_if)) { 3159 error = ENODEV; 3160 break; 3161 } 3162 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3163 &io->pfiio_size, io->pfiio_flags); 3164 break; 3165 } 3166 3167 case DIOCICLRISTATS: { 3168 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3169 3170 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero, 3171 io->pfiio_flags); 3172 break; 3173 } 3174 3175 case DIOCSETIFFLAG: { 3176 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3177 3178 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3179 break; 3180 } 3181 3182 case DIOCCLRIFFLAG: { 3183 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3184 3185 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3186 break; 3187 } 3188 3189 default: 3190 error = ENODEV; 3191 break; 3192 } 3193fail: 3194#ifdef __FreeBSD__ 3195 PF_UNLOCK(); 3196#else 3197 splx(s); 3198#endif 3199 return (error); 3200} 3201 3202#ifdef __FreeBSD__ 3203/* 3204 * XXX - Check for version missmatch!!! 3205 */ 3206static void 3207pf_clear_states(void) 3208{ 3209 struct pf_state *state; 3210 3211 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3212 state->timeout = PFTM_PURGE; 3213#if NPFSYNC 3214 /* don't send out individual delete messages */ 3215 state->sync_flags = PFSTATE_NOSYNC; 3216#endif 3217 } 3218 pf_purge_expired_states(); 3219 pf_status.states = 0; 3220#if 0 /* NPFSYNC */ 3221/* 3222 * XXX This is called on module unload, we do not want to sync that over? */ 3223 */ 3224 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 3225#endif 3226} 3227 3228static int 3229pf_clear_tables(void) 3230{ 3231 struct pfioc_table io; 3232 int error; 3233 3234 bzero(&io, sizeof(io)); 3235 3236 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3237 io.pfrio_flags); 3238 3239 return (error); 3240} 3241 3242static void 3243pf_clear_srcnodes(void) 3244{ 3245 struct pf_src_node *n; 3246 struct pf_state *state; 3247 3248 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3249 state->src_node = NULL; 3250 state->nat_src_node = NULL; 3251 } 3252 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3253 n->expire = 1; 3254 n->states = 0; 3255 } 3256 pf_purge_expired_src_nodes(); 3257 pf_status.src_nodes = 0; 3258} 3259/* 3260 * XXX - Check for version missmatch!!! 3261 */ 3262 3263/* 3264 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3265 */ 3266static int 3267shutdown_pf(void) 3268{ 3269 int error = 0; 3270 u_int32_t t[5]; 3271 char nn = '\0'; 3272 3273 callout_stop(&pf_expire_to); 3274 3275 pf_status.running = 0; 3276 do { 3277 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 3278 != 0) { 3279 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3280 break; 3281 } 3282 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 3283 != 0) { 3284 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3285 break; /* XXX: rollback? */ 3286 } 3287 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 3288 != 0) { 3289 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3290 break; /* XXX: rollback? */ 3291 } 3292 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3293 != 0) { 3294 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3295 break; /* XXX: rollback? */ 3296 } 3297 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3298 != 0) { 3299 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3300 break; /* XXX: rollback? */ 3301 } 3302 3303 /* XXX: these should always succeed here */ 3304 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3305 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3306 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3307 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3308 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3309 3310 if ((error = pf_clear_tables()) != 0) 3311 break; 3312 3313#ifdef ALTQ 3314 if ((error = pf_begin_altq(&t[0])) != 0) { 3315 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3316 break; 3317 } 3318 pf_commit_altq(t[0]); 3319#endif 3320 3321 pf_clear_states(); 3322 3323 pf_clear_srcnodes(); 3324 3325 /* status does not use malloced mem so no need to cleanup */ 3326 /* fingerprints and interfaces have thier own cleanup code */ 3327 } while(0); 3328 3329 return (error); 3330} 3331 3332static int 3333pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3334 struct inpcb *inp) 3335{ 3336 /* 3337 * XXX Wed Jul 9 22:03:16 2003 UTC 3338 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3339 * in network stack. OpenBSD's network stack have converted 3340 * ip_len/ip_off to host byte order frist as FreeBSD. 3341 * Now this is not true anymore , so we should convert back to network 3342 * byte order. 3343 */ 3344 struct ip *h = NULL; 3345 int chk; 3346 3347 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) { 3348 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3349 h = mtod(*m, struct ip *); 3350 HTONS(h->ip_len); 3351 HTONS(h->ip_off); 3352 } 3353 chk = pf_test(PF_IN, ifp, m, NULL, inp); 3354 if (chk && *m) { 3355 m_freem(*m); 3356 *m = NULL; 3357 } 3358 if (*m != NULL) { 3359 /* pf_test can change ip header location */ 3360 h = mtod(*m, struct ip *); 3361 NTOHS(h->ip_len); 3362 NTOHS(h->ip_off); 3363 } 3364 return chk; 3365} 3366 3367static int 3368pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3369 struct inpcb *inp) 3370{ 3371 /* 3372 * XXX Wed Jul 9 22:03:16 2003 UTC 3373 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 3374 * in network stack. OpenBSD's network stack have converted 3375 * ip_len/ip_off to host byte order frist as FreeBSD. 3376 * Now this is not true anymore , so we should convert back to network 3377 * byte order. 3378 */ 3379 struct ip *h = NULL; 3380 int chk; 3381 3382 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3383 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3384 in_delayed_cksum(*m); 3385 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3386 } 3387 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) { 3388 /* if m_pkthdr.len is less than ip header, pf will handle. */ 3389 h = mtod(*m, struct ip *); 3390 HTONS(h->ip_len); 3391 HTONS(h->ip_off); 3392 } 3393 chk = pf_test(PF_OUT, ifp, m, NULL, inp); 3394 if (chk && *m) { 3395 m_freem(*m); 3396 *m = NULL; 3397 } 3398 if (*m != NULL) { 3399 /* pf_test can change ip header location */ 3400 h = mtod(*m, struct ip *); 3401 NTOHS(h->ip_len); 3402 NTOHS(h->ip_off); 3403 } 3404 return chk; 3405} 3406 3407#ifdef INET6 3408static int 3409pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3410 struct inpcb *inp) 3411{ 3412 /* 3413 * IPv6 does not affected ip_len/ip_off byte order changes. 3414 */ 3415 int chk; 3416 3417 chk = pf_test6(PF_IN, ifp, m, NULL, inp); 3418 if (chk && *m) { 3419 m_freem(*m); 3420 *m = NULL; 3421 } 3422 return chk; 3423} 3424 3425static int 3426pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3427 struct inpcb *inp) 3428{ 3429 /* 3430 * IPv6 does not affected ip_len/ip_off byte order changes. 3431 */ 3432 int chk; 3433 3434 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3435 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3436 in_delayed_cksum(*m); 3437 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3438 } 3439 chk = pf_test6(PF_OUT, ifp, m, NULL, inp); 3440 if (chk && *m) { 3441 m_freem(*m); 3442 *m = NULL; 3443 } 3444 return chk; 3445} 3446#endif /* INET6 */ 3447 3448static int 3449hook_pf(void) 3450{ 3451 struct pfil_head *pfh_inet; 3452#ifdef INET6 3453 struct pfil_head *pfh_inet6; 3454#endif 3455 3456 PF_ASSERT(MA_NOTOWNED); 3457 3458 if (pf_pfil_hooked) 3459 return (0); 3460 3461 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3462 if (pfh_inet == NULL) 3463 return (ESRCH); /* XXX */ 3464 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3465 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3466#ifdef INET6 3467 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3468 if (pfh_inet6 == NULL) { 3469 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3470 pfh_inet); 3471 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3472 pfh_inet); 3473 return (ESRCH); /* XXX */ 3474 } 3475 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3476 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3477#endif 3478 3479 pf_pfil_hooked = 1; 3480 return (0); 3481} 3482 3483static int 3484dehook_pf(void) 3485{ 3486 struct pfil_head *pfh_inet; 3487#ifdef INET6 3488 struct pfil_head *pfh_inet6; 3489#endif 3490 3491 PF_ASSERT(MA_NOTOWNED); 3492 3493 if (pf_pfil_hooked == 0) 3494 return (0); 3495 3496 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3497 if (pfh_inet == NULL) 3498 return (ESRCH); /* XXX */ 3499 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3500 pfh_inet); 3501 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3502 pfh_inet); 3503#ifdef INET6 3504 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3505 if (pfh_inet6 == NULL) 3506 return (ESRCH); /* XXX */ 3507 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3508 pfh_inet6); 3509 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3510 pfh_inet6); 3511#endif 3512 3513 pf_pfil_hooked = 0; 3514 return (0); 3515} 3516 3517static int 3518pf_load(void) 3519{ 3520 init_zone_var(); 3521 init_pf_mutex(); 3522 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3523 if (pfattach() < 0) { 3524 destroy_dev(pf_dev); 3525 destroy_pf_mutex(); 3526 return (ENOMEM); 3527 } 3528 return (0); 3529} 3530 3531static int 3532pf_unload(void) 3533{ 3534 int error = 0; 3535 3536 PF_LOCK(); 3537 pf_status.running = 0; 3538 PF_UNLOCK(); 3539 error = dehook_pf(); 3540 if (error) { 3541 /* 3542 * Should not happen! 3543 * XXX Due to error code ESRCH, kldunload will show 3544 * a message like 'No such process'. 3545 */ 3546 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 3547 return error; 3548 } 3549 PF_LOCK(); 3550 shutdown_pf(); 3551 pfi_cleanup(); 3552 pf_osfp_flush(); 3553 pf_osfp_cleanup(); 3554 cleanup_pf_zone(); 3555 PF_UNLOCK(); 3556 destroy_dev(pf_dev); 3557 destroy_pf_mutex(); 3558 return error; 3559} 3560 3561static int 3562pf_modevent(module_t mod, int type, void *data) 3563{ 3564 int error = 0; 3565 3566 switch(type) { 3567 case MOD_LOAD: 3568 error = pf_load(); 3569 break; 3570 3571 case MOD_UNLOAD: 3572 error = pf_unload(); 3573 break; 3574 default: 3575 error = EINVAL; 3576 break; 3577 } 3578 return error; 3579} 3580 3581static moduledata_t pf_mod = { 3582 "pf", 3583 pf_modevent, 3584 0 3585}; 3586 3587DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST); 3588MODULE_VERSION(pf, PF_MODVER); 3589#endif /* __FreeBSD__ */ 3590