pf_ioctl.c revision 293896
1/* $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ */ 2 3/* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38#ifdef __FreeBSD__ 39#include <sys/cdefs.h> 40__FBSDID("$FreeBSD: releng/9.3/sys/contrib/pf/net/pf_ioctl.c 293896 2016-01-14 09:11:26Z glebius $"); 41 42#include "opt_inet.h" 43#include "opt_inet6.h" 44#include "opt_bpf.h" 45#include "opt_pf.h" 46 47#define NPFSYNC 1 48 49#ifdef DEV_PFLOG 50#define NPFLOG DEV_PFLOG 51#else 52#define NPFLOG 0 53#endif 54 55#else /* !__FreeBSD__ */ 56#include "pfsync.h" 57#include "pflog.h" 58#endif /* __FreeBSD__ */ 59 60#include <sys/param.h> 61#include <sys/systm.h> 62#include <sys/mbuf.h> 63#include <sys/filio.h> 64#include <sys/fcntl.h> 65#include <sys/socket.h> 66#include <sys/socketvar.h> 67#include <sys/kernel.h> 68#include <sys/time.h> 69#ifdef __FreeBSD__ 70#include <sys/ucred.h> 71#include <sys/jail.h> 72#include <sys/module.h> 73#include <sys/conf.h> 74#include <sys/proc.h> 75#include <sys/sysctl.h> 76#else 77#include <sys/timeout.h> 78#include <sys/pool.h> 79#endif 80#include <sys/proc.h> 81#include <sys/malloc.h> 82#include <sys/kthread.h> 83#ifndef __FreeBSD__ 84#include <sys/rwlock.h> 85#include <uvm/uvm_extern.h> 86#endif 87 88#include <net/if.h> 89#include <net/if_types.h> 90#ifdef __FreeBSD__ 91#include <net/vnet.h> 92#endif 93#include <net/route.h> 94 95#include <netinet/in.h> 96#include <netinet/in_var.h> 97#include <netinet/in_systm.h> 98#include <netinet/ip.h> 99#include <netinet/ip_var.h> 100#include <netinet/ip_icmp.h> 101 102#ifdef __FreeBSD__ 103#include <sys/md5.h> 104#else 105#include <dev/rndvar.h> 106#include <crypto/md5.h> 107#endif 108#include <net/pfvar.h> 109 110#include <net/if_pfsync.h> 111 112#if NPFLOG > 0 113#include <net/if_pflog.h> 114#endif /* NPFLOG > 0 */ 115 116#ifdef INET6 117#include <netinet/ip6.h> 118#include <netinet/in_pcb.h> 119#endif /* INET6 */ 120 121#ifdef ALTQ 122#include <altq/altq.h> 123#endif 124 125#ifdef __FreeBSD__ 126#include <sys/limits.h> 127#include <sys/lock.h> 128#include <sys/mutex.h> 129#include <net/pfil.h> 130#endif /* __FreeBSD__ */ 131 132#ifdef __FreeBSD__ 133void init_zone_var(void); 134void cleanup_pf_zone(void); 135int pfattach(void); 136#else 137void pfattach(int); 138void pf_thread_create(void *); 139int pfopen(dev_t, int, int, struct proc *); 140int pfclose(dev_t, int, int, struct proc *); 141#endif 142struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 143 u_int8_t, u_int8_t, u_int8_t); 144 145void pf_mv_pool(struct pf_palist *, struct pf_palist *); 146void pf_empty_pool(struct pf_palist *); 147#ifdef __FreeBSD__ 148int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 149#else 150int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 151#endif 152#ifdef ALTQ 153int pf_begin_altq(u_int32_t *); 154int pf_rollback_altq(u_int32_t); 155int pf_commit_altq(u_int32_t); 156int pf_enable_altq(struct pf_altq *); 157int pf_disable_altq(struct pf_altq *); 158#endif /* ALTQ */ 159int pf_begin_rules(u_int32_t *, int, const char *); 160int pf_rollback_rules(u_int32_t, int, char *); 161int pf_setup_pfsync_matching(struct pf_ruleset *); 162void pf_hash_rule(MD5_CTX *, struct pf_rule *); 163void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 164int pf_commit_rules(u_int32_t, int, char *); 165int pf_addr_setup(struct pf_ruleset *, 166 struct pf_addr_wrap *, sa_family_t); 167void pf_addr_copyout(struct pf_addr_wrap *); 168 169#define TAGID_MAX 50000 170 171#ifdef __FreeBSD__ 172VNET_DEFINE(struct pf_rule, pf_default_rule); 173VNET_DEFINE(struct sx, pf_consistency_lock); 174 175#ifdef ALTQ 176static VNET_DEFINE(int, pf_altq_running); 177#define V_pf_altq_running VNET(pf_altq_running) 178#endif 179 180TAILQ_HEAD(pf_tags, pf_tagname); 181 182#define V_pf_tags VNET(pf_tags) 183VNET_DEFINE(struct pf_tags, pf_tags); 184#define V_pf_qids VNET(pf_qids) 185VNET_DEFINE(struct pf_tags, pf_qids); 186 187#else /* !__FreeBSD__ */ 188struct pf_rule pf_default_rule; 189struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk"); 190#ifdef ALTQ 191static int pf_altq_running; 192#endif 193 194TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 195 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 196#endif /* __FreeBSD__ */ 197 198#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 199#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 200#endif 201 202u_int16_t tagname2tag(struct pf_tags *, char *); 203void tag2tagname(struct pf_tags *, u_int16_t, char *); 204void tag_unref(struct pf_tags *, u_int16_t); 205int pf_rtlabel_add(struct pf_addr_wrap *); 206void pf_rtlabel_remove(struct pf_addr_wrap *); 207void pf_rtlabel_copyout(struct pf_addr_wrap *); 208 209#ifdef __FreeBSD__ 210#define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 211#else 212#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 213#endif 214 215#ifdef __FreeBSD__ 216struct cdev *pf_dev; 217 218/* 219 * XXX - These are new and need to be checked when moveing to a new version 220 */ 221static void pf_clear_states(void); 222static int pf_clear_tables(void); 223static void pf_clear_srcnodes(void); 224/* 225 * XXX - These are new and need to be checked when moveing to a new version 226 */ 227 228/* 229 * Wrapper functions for pfil(9) hooks 230 */ 231#ifdef INET 232static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 233 int dir, struct inpcb *inp); 234static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 235 int dir, struct inpcb *inp); 236#endif 237#ifdef INET6 238static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 239 int dir, struct inpcb *inp); 240static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 241 int dir, struct inpcb *inp); 242#endif 243 244static int hook_pf(void); 245static int dehook_pf(void); 246static int shutdown_pf(void); 247static int pf_load(void); 248static int pf_unload(void); 249 250static struct cdevsw pf_cdevsw = { 251 .d_ioctl = pfioctl, 252 .d_name = PF_NAME, 253 .d_version = D_VERSION, 254}; 255 256static volatile VNET_DEFINE(int, pf_pfil_hooked); 257#define V_pf_pfil_hooked VNET(pf_pfil_hooked) 258VNET_DEFINE(int, pf_end_threads); 259struct mtx pf_task_mtx; 260 261/* pfsync */ 262pfsync_state_import_t *pfsync_state_import_ptr = NULL; 263pfsync_insert_state_t *pfsync_insert_state_ptr = NULL; 264pfsync_update_state_t *pfsync_update_state_ptr = NULL; 265pfsync_delete_state_t *pfsync_delete_state_ptr = NULL; 266pfsync_clear_states_t *pfsync_clear_states_ptr = NULL; 267pfsync_state_in_use_t *pfsync_state_in_use_ptr = NULL; 268pfsync_defer_t *pfsync_defer_ptr = NULL; 269pfsync_up_t *pfsync_up_ptr = NULL; 270/* pflow */ 271export_pflow_t *export_pflow_ptr = NULL; 272/* pflog */ 273pflog_packet_t *pflog_packet_ptr = NULL; 274 275VNET_DEFINE(int, debug_pfugidhack); 276SYSCTL_VNET_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, 277 &VNET_NAME(debug_pfugidhack), 0, 278 "Enable/disable pf user/group rules mpsafe hack"); 279 280static void 281init_pf_mutex(void) 282{ 283 284 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF); 285} 286 287static void 288destroy_pf_mutex(void) 289{ 290 291 mtx_destroy(&pf_task_mtx); 292} 293void 294init_zone_var(void) 295{ 296 V_pf_src_tree_pl = V_pf_rule_pl = NULL; 297 V_pf_state_pl = V_pf_state_key_pl = V_pf_state_item_pl = NULL; 298 V_pf_altq_pl = V_pf_pooladdr_pl = NULL; 299 V_pf_frent_pl = V_pf_frag_pl = V_pf_cache_pl = V_pf_cent_pl = NULL; 300 V_pf_state_scrub_pl = NULL; 301 V_pfr_ktable_pl = V_pfr_kentry_pl = V_pfr_kcounters_pl = NULL; 302} 303 304void 305cleanup_pf_zone(void) 306{ 307 UMA_DESTROY(V_pf_src_tree_pl); 308 UMA_DESTROY(V_pf_rule_pl); 309 UMA_DESTROY(V_pf_state_pl); 310 UMA_DESTROY(V_pf_state_key_pl); 311 UMA_DESTROY(V_pf_state_item_pl); 312 UMA_DESTROY(V_pf_altq_pl); 313 UMA_DESTROY(V_pf_pooladdr_pl); 314 UMA_DESTROY(V_pf_frent_pl); 315 UMA_DESTROY(V_pf_frag_pl); 316 UMA_DESTROY(V_pf_cache_pl); 317 UMA_DESTROY(V_pf_cent_pl); 318 UMA_DESTROY(V_pfr_ktable_pl); 319 UMA_DESTROY(V_pfr_kentry_pl); 320 UMA_DESTROY(V_pfr_kcounters_pl); 321 UMA_DESTROY(V_pf_state_scrub_pl); 322 UMA_DESTROY(V_pfi_addr_pl); 323} 324 325int 326pfattach(void) 327{ 328 u_int32_t *my_timeout = V_pf_default_rule.timeout; 329 int error = 1; 330 331 do { 332 UMA_CREATE(V_pf_src_tree_pl, struct pf_src_node, "pfsrctrpl"); 333 UMA_CREATE(V_pf_rule_pl, struct pf_rule, "pfrulepl"); 334 UMA_CREATE(V_pf_state_pl, struct pf_state, "pfstatepl"); 335 UMA_CREATE(V_pf_state_key_pl, struct pf_state, "pfstatekeypl"); 336 UMA_CREATE(V_pf_state_item_pl, struct pf_state, "pfstateitempl"); 337 UMA_CREATE(V_pf_altq_pl, struct pf_altq, "pfaltqpl"); 338 UMA_CREATE(V_pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl"); 339 UMA_CREATE(V_pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 340 UMA_CREATE(V_pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 341 UMA_CREATE(V_pfr_kcounters_pl, struct pfr_kcounters, "pfrkcounters"); 342 UMA_CREATE(V_pf_frent_pl, struct pf_frent, "pffrent"); 343 UMA_CREATE(V_pf_frag_pl, struct pf_fragment, "pffrag"); 344 UMA_CREATE(V_pf_cache_pl, struct pf_fragment, "pffrcache"); 345 UMA_CREATE(V_pf_cent_pl, struct pf_frcache, "pffrcent"); 346 UMA_CREATE(V_pf_state_scrub_pl, struct pf_state_scrub, 347 "pfstatescrub"); 348 UMA_CREATE(V_pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 349 error = 0; 350 } while(0); 351 if (error) { 352 cleanup_pf_zone(); 353 return (error); 354 } 355 pfr_initialize(); 356 pfi_initialize(); 357 if ( (error = pf_osfp_initialize()) ) { 358 cleanup_pf_zone(); 359 pf_osfp_cleanup(); 360 return (error); 361 } 362 363 V_pf_pool_limits[PF_LIMIT_STATES].pp = V_pf_state_pl; 364 V_pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 365 V_pf_pool_limits[PF_LIMIT_SRC_NODES].pp = V_pf_src_tree_pl; 366 V_pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 367 V_pf_pool_limits[PF_LIMIT_FRAGS].pp = V_pf_frent_pl; 368 V_pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 369 V_pf_pool_limits[PF_LIMIT_TABLES].pp = V_pfr_ktable_pl; 370 V_pf_pool_limits[PF_LIMIT_TABLES].limit = PFR_KTABLE_HIWAT; 371 V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = V_pfr_kentry_pl; 372 V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 373 uma_zone_set_max(V_pf_pool_limits[PF_LIMIT_STATES].pp, 374 V_pf_pool_limits[PF_LIMIT_STATES].limit); 375 376 RB_INIT(&V_tree_src_tracking); 377 RB_INIT(&V_pf_anchors); 378 pf_init_ruleset(&pf_main_ruleset); 379 380 TAILQ_INIT(&V_pf_altqs[0]); 381 TAILQ_INIT(&V_pf_altqs[1]); 382 TAILQ_INIT(&V_pf_pabuf); 383 V_pf_altqs_active = &V_pf_altqs[0]; 384 V_pf_altqs_inactive = &V_pf_altqs[1]; 385 TAILQ_INIT(&V_state_list); 386 387 /* default rule should never be garbage collected */ 388 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 389 V_pf_default_rule.action = PF_PASS; 390 V_pf_default_rule.nr = -1; 391 V_pf_default_rule.rtableid = -1; 392 393 /* initialize default timeouts */ 394 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 395 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 396 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 397 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 398 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 399 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 400 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 401 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 402 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 403 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 404 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 405 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 406 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 407 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 408 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 409 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 410 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 411 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 412 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 413 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 414 415 pf_normalize_init(); 416 417 bzero(&V_pf_status, sizeof(V_pf_status)); 418 V_pf_status.debug = PF_DEBUG_URGENT; 419 420 V_pf_pfil_hooked = 0; 421 422 /* XXX do our best to avoid a conflict */ 423 V_pf_status.hostid = arc4random(); 424 425 if (kproc_create(pf_purge_thread, curvnet, NULL, 0, 0, "pfpurge")) 426 return (ENXIO); 427 428 m_addr_chg_pf_p = pf_pkt_addr_changed; 429 430 return (error); 431} 432#else /* !__FreeBSD__ */ 433 434void 435pfattach(int num) 436{ 437 u_int32_t *timeout = pf_default_rule.timeout; 438 439 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 440 &pool_allocator_nointr); 441 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 442 "pfsrctrpl", NULL); 443 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 444 NULL); 445 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 446 "pfstatekeypl", NULL); 447 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0, 448 "pfstateitempl", NULL); 449 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 450 &pool_allocator_nointr); 451 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 452 "pfpooladdrpl", &pool_allocator_nointr); 453 pfr_initialize(); 454 pfi_initialize(); 455 pf_osfp_initialize(); 456 457 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 458 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 459 460 if (physmem <= atop(100*1024*1024)) 461 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 462 PFR_KENTRY_HIWAT_SMALL; 463 464 RB_INIT(&tree_src_tracking); 465 RB_INIT(&pf_anchors); 466 pf_init_ruleset(&pf_main_ruleset); 467 TAILQ_INIT(&pf_altqs[0]); 468 TAILQ_INIT(&pf_altqs[1]); 469 TAILQ_INIT(&pf_pabuf); 470 pf_altqs_active = &pf_altqs[0]; 471 pf_altqs_inactive = &pf_altqs[1]; 472 TAILQ_INIT(&state_list); 473 474 /* default rule should never be garbage collected */ 475 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 476 pf_default_rule.action = PF_PASS; 477 pf_default_rule.nr = -1; 478 pf_default_rule.rtableid = -1; 479 480 /* initialize default timeouts */ 481 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 482 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 483 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 484 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 485 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 486 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 487 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 488 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 489 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 490 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 491 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 492 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 493 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 494 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 495 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 496 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 497 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 498 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 499 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 500 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 501 502 pf_normalize_init(); 503 bzero(&pf_status, sizeof(pf_status)); 504 pf_status.debug = PF_DEBUG_URGENT; 505 506 /* XXX do our best to avoid a conflict */ 507 pf_status.hostid = arc4random(); 508 509 /* require process context to purge states, so perform in a thread */ 510 kthread_create_deferred(pf_thread_create, NULL); 511} 512 513void 514pf_thread_create(void *v) 515{ 516 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 517 panic("pfpurge thread"); 518} 519 520int 521pfopen(dev_t dev, int flags, int fmt, struct proc *p) 522{ 523 if (minor(dev) >= 1) 524 return (ENXIO); 525 return (0); 526} 527 528int 529pfclose(dev_t dev, int flags, int fmt, struct proc *p) 530{ 531 if (minor(dev) >= 1) 532 return (ENXIO); 533 return (0); 534} 535#endif 536 537struct pf_pool * 538pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 539 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 540 u_int8_t check_ticket) 541{ 542 struct pf_ruleset *ruleset; 543 struct pf_rule *rule; 544 int rs_num; 545 546 ruleset = pf_find_ruleset(anchor); 547 if (ruleset == NULL) 548 return (NULL); 549 rs_num = pf_get_ruleset_number(rule_action); 550 if (rs_num >= PF_RULESET_MAX) 551 return (NULL); 552 if (active) { 553 if (check_ticket && ticket != 554 ruleset->rules[rs_num].active.ticket) 555 return (NULL); 556 if (r_last) 557 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 558 pf_rulequeue); 559 else 560 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 561 } else { 562 if (check_ticket && ticket != 563 ruleset->rules[rs_num].inactive.ticket) 564 return (NULL); 565 if (r_last) 566 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 567 pf_rulequeue); 568 else 569 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 570 } 571 if (!r_last) { 572 while ((rule != NULL) && (rule->nr != rule_number)) 573 rule = TAILQ_NEXT(rule, entries); 574 } 575 if (rule == NULL) 576 return (NULL); 577 578 return (&rule->rpool); 579} 580 581void 582pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 583{ 584 struct pf_pooladdr *mv_pool_pa; 585 586 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 587 TAILQ_REMOVE(poola, mv_pool_pa, entries); 588 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 589 } 590} 591 592void 593pf_empty_pool(struct pf_palist *poola) 594{ 595 struct pf_pooladdr *empty_pool_pa; 596 597 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 598 pfi_dynaddr_remove(&empty_pool_pa->addr); 599 pf_tbladdr_remove(&empty_pool_pa->addr); 600 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 601 TAILQ_REMOVE(poola, empty_pool_pa, entries); 602#ifdef __FreeBSD__ 603 pool_put(&V_pf_pooladdr_pl, empty_pool_pa); 604#else 605 pool_put(&pf_pooladdr_pl, empty_pool_pa); 606#endif 607 } 608} 609 610void 611pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 612{ 613 if (rulequeue != NULL) { 614 if (rule->states_cur <= 0) { 615 /* 616 * XXX - we need to remove the table *before* detaching 617 * the rule to make sure the table code does not delete 618 * the anchor under our feet. 619 */ 620 pf_tbladdr_remove(&rule->src.addr); 621 pf_tbladdr_remove(&rule->dst.addr); 622 if (rule->overload_tbl) 623 pfr_detach_table(rule->overload_tbl); 624 } 625 TAILQ_REMOVE(rulequeue, rule, entries); 626 rule->entries.tqe_prev = NULL; 627 rule->nr = -1; 628 } 629 630 if (rule->states_cur > 0 || rule->src_nodes > 0 || 631 rule->entries.tqe_prev != NULL) 632 return; 633 pf_tag_unref(rule->tag); 634 pf_tag_unref(rule->match_tag); 635#ifdef ALTQ 636 if (rule->pqid != rule->qid) 637 pf_qid_unref(rule->pqid); 638 pf_qid_unref(rule->qid); 639#endif 640 pf_rtlabel_remove(&rule->src.addr); 641 pf_rtlabel_remove(&rule->dst.addr); 642 pfi_dynaddr_remove(&rule->src.addr); 643 pfi_dynaddr_remove(&rule->dst.addr); 644 if (rulequeue == NULL) { 645 pf_tbladdr_remove(&rule->src.addr); 646 pf_tbladdr_remove(&rule->dst.addr); 647 if (rule->overload_tbl) 648 pfr_detach_table(rule->overload_tbl); 649 } 650 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 651 pf_anchor_remove(rule); 652 pf_empty_pool(&rule->rpool.list); 653#ifdef __FreeBSD__ 654 pool_put(&V_pf_rule_pl, rule); 655#else 656 pool_put(&pf_rule_pl, rule); 657#endif 658} 659 660u_int16_t 661tagname2tag(struct pf_tags *head, char *tagname) 662{ 663 struct pf_tagname *tag, *p = NULL; 664 u_int16_t new_tagid = 1; 665 666 TAILQ_FOREACH(tag, head, entries) 667 if (strcmp(tagname, tag->name) == 0) { 668 tag->ref++; 669 return (tag->tag); 670 } 671 672 /* 673 * to avoid fragmentation, we do a linear search from the beginning 674 * and take the first free slot we find. if there is none or the list 675 * is empty, append a new entry at the end. 676 */ 677 678 /* new entry */ 679 if (!TAILQ_EMPTY(head)) 680 for (p = TAILQ_FIRST(head); p != NULL && 681 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 682 new_tagid = p->tag + 1; 683 684 if (new_tagid > TAGID_MAX) 685 return (0); 686 687 /* allocate and fill new struct pf_tagname */ 688 tag = malloc(sizeof(*tag), M_TEMP, M_NOWAIT|M_ZERO); 689 if (tag == NULL) 690 return (0); 691 strlcpy(tag->name, tagname, sizeof(tag->name)); 692 tag->tag = new_tagid; 693 tag->ref++; 694 695 if (p != NULL) /* insert new entry before p */ 696 TAILQ_INSERT_BEFORE(p, tag, entries); 697 else /* either list empty or no free slot in between */ 698 TAILQ_INSERT_TAIL(head, tag, entries); 699 700 return (tag->tag); 701} 702 703void 704tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 705{ 706 struct pf_tagname *tag; 707 708 TAILQ_FOREACH(tag, head, entries) 709 if (tag->tag == tagid) { 710 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 711 return; 712 } 713} 714 715void 716tag_unref(struct pf_tags *head, u_int16_t tag) 717{ 718 struct pf_tagname *p, *next; 719 720 if (tag == 0) 721 return; 722 723 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 724 next = TAILQ_NEXT(p, entries); 725 if (tag == p->tag) { 726 if (--p->ref == 0) { 727 TAILQ_REMOVE(head, p, entries); 728 free(p, M_TEMP); 729 } 730 break; 731 } 732 } 733} 734 735u_int16_t 736pf_tagname2tag(char *tagname) 737{ 738#ifdef __FreeBSD__ 739 return (tagname2tag(&V_pf_tags, tagname)); 740#else 741 return (tagname2tag(&pf_tags, tagname)); 742#endif 743} 744 745void 746pf_tag2tagname(u_int16_t tagid, char *p) 747{ 748#ifdef __FreeBSD__ 749 tag2tagname(&V_pf_tags, tagid, p); 750#else 751 tag2tagname(&pf_tags, tagid, p); 752#endif 753} 754 755void 756pf_tag_ref(u_int16_t tag) 757{ 758 struct pf_tagname *t; 759 760#ifdef __FreeBSD__ 761 TAILQ_FOREACH(t, &V_pf_tags, entries) 762#else 763 TAILQ_FOREACH(t, &pf_tags, entries) 764#endif 765 if (t->tag == tag) 766 break; 767 if (t != NULL) 768 t->ref++; 769} 770 771void 772pf_tag_unref(u_int16_t tag) 773{ 774#ifdef __FreeBSD__ 775 tag_unref(&V_pf_tags, tag); 776#else 777 tag_unref(&pf_tags, tag); 778#endif 779} 780 781int 782pf_rtlabel_add(struct pf_addr_wrap *a) 783{ 784#ifdef __FreeBSD__ 785 /* XXX_IMPORT: later */ 786 return (0); 787#else 788 if (a->type == PF_ADDR_RTLABEL && 789 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 790 return (-1); 791 return (0); 792#endif 793} 794 795void 796pf_rtlabel_remove(struct pf_addr_wrap *a) 797{ 798#ifdef __FreeBSD__ 799 /* XXX_IMPORT: later */ 800#else 801 if (a->type == PF_ADDR_RTLABEL) 802 rtlabel_unref(a->v.rtlabel); 803#endif 804} 805 806void 807pf_rtlabel_copyout(struct pf_addr_wrap *a) 808{ 809#ifdef __FreeBSD__ 810 /* XXX_IMPORT: later */ 811 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 812 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 813#else 814 const char *name; 815 816 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 817 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 818 strlcpy(a->v.rtlabelname, "?", 819 sizeof(a->v.rtlabelname)); 820 else 821 strlcpy(a->v.rtlabelname, name, 822 sizeof(a->v.rtlabelname)); 823 } 824#endif 825} 826 827#ifdef ALTQ 828u_int32_t 829pf_qname2qid(char *qname) 830{ 831#ifdef __FreeBSD__ 832 return ((u_int32_t)tagname2tag(&V_pf_qids, qname)); 833#else 834 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 835#endif 836} 837 838void 839pf_qid2qname(u_int32_t qid, char *p) 840{ 841#ifdef __FreeBSD__ 842 tag2tagname(&V_pf_qids, (u_int16_t)qid, p); 843#else 844 tag2tagname(&pf_qids, (u_int16_t)qid, p); 845#endif 846} 847 848void 849pf_qid_unref(u_int32_t qid) 850{ 851#ifdef __FreeBSD__ 852 tag_unref(&V_pf_qids, (u_int16_t)qid); 853#else 854 tag_unref(&pf_qids, (u_int16_t)qid); 855#endif 856} 857 858int 859pf_begin_altq(u_int32_t *ticket) 860{ 861 struct pf_altq *altq; 862 int error = 0; 863 864 /* Purge the old altq list */ 865#ifdef __FreeBSD__ 866 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 867 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 868 if (altq->qname[0] == 0 && 869 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 870#else 871 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 872 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 873 if (altq->qname[0] == 0) { 874#endif 875 /* detach and destroy the discipline */ 876 error = altq_remove(altq); 877 } else 878 pf_qid_unref(altq->qid); 879#ifdef __FreeBSD__ 880 pool_put(&V_pf_altq_pl, altq); 881#else 882 pool_put(&pf_altq_pl, altq); 883#endif 884 } 885 if (error) 886 return (error); 887#ifdef __FreeBSD__ 888 *ticket = ++V_ticket_altqs_inactive; 889 V_altqs_inactive_open = 1; 890#else 891 *ticket = ++ticket_altqs_inactive; 892 altqs_inactive_open = 1; 893#endif 894 return (0); 895} 896 897int 898pf_rollback_altq(u_int32_t ticket) 899{ 900 struct pf_altq *altq; 901 int error = 0; 902 903#ifdef __FreeBSD__ 904 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 905 return (0); 906 /* Purge the old altq list */ 907 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 908 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 909 if (altq->qname[0] == 0 && 910 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 911#else 912 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 913 return (0); 914 /* Purge the old altq list */ 915 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 916 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 917 if (altq->qname[0] == 0) { 918#endif 919 /* detach and destroy the discipline */ 920 error = altq_remove(altq); 921 } else 922 pf_qid_unref(altq->qid); 923#ifdef __FreeBSD__ 924 pool_put(&V_pf_altq_pl, altq); 925#else 926 pool_put(&pf_altq_pl, altq); 927#endif 928 } 929#ifdef __FreeBSD__ 930 V_altqs_inactive_open = 0; 931#else 932 altqs_inactive_open = 0; 933#endif 934 return (error); 935} 936 937int 938pf_commit_altq(u_int32_t ticket) 939{ 940 struct pf_altqqueue *old_altqs; 941 struct pf_altq *altq; 942 int s, err, error = 0; 943 944#ifdef __FreeBSD__ 945 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 946#else 947 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 948#endif 949 return (EBUSY); 950 951 /* swap altqs, keep the old. */ 952 s = splsoftnet(); 953#ifdef __FreeBSD__ 954 old_altqs = V_pf_altqs_active; 955 V_pf_altqs_active = V_pf_altqs_inactive; 956 V_pf_altqs_inactive = old_altqs; 957 V_ticket_altqs_active = V_ticket_altqs_inactive; 958#else 959 old_altqs = pf_altqs_active; 960 pf_altqs_active = pf_altqs_inactive; 961 pf_altqs_inactive = old_altqs; 962 ticket_altqs_active = ticket_altqs_inactive; 963#endif 964 965 /* Attach new disciplines */ 966#ifdef __FreeBSD__ 967 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 968 if (altq->qname[0] == 0 && 969 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 970#else 971 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 972 if (altq->qname[0] == 0) { 973#endif 974 /* attach the discipline */ 975 error = altq_pfattach(altq); 976#ifdef __FreeBSD__ 977 if (error == 0 && V_pf_altq_running) 978#else 979 if (error == 0 && pf_altq_running) 980#endif 981 error = pf_enable_altq(altq); 982 if (error != 0) { 983 splx(s); 984 return (error); 985 } 986 } 987 } 988 989 /* Purge the old altq list */ 990#ifdef __FreeBSD__ 991 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 992 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 993 if (altq->qname[0] == 0 && 994 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 995#else 996 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 997 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 998 if (altq->qname[0] == 0) { 999#endif 1000 /* detach and destroy the discipline */ 1001#ifdef __FreeBSD__ 1002 if (V_pf_altq_running) 1003#else 1004 if (pf_altq_running) 1005#endif 1006 error = pf_disable_altq(altq); 1007 err = altq_pfdetach(altq); 1008 if (err != 0 && error == 0) 1009 error = err; 1010 err = altq_remove(altq); 1011 if (err != 0 && error == 0) 1012 error = err; 1013 } else 1014 pf_qid_unref(altq->qid); 1015#ifdef __FreeBSD__ 1016 pool_put(&V_pf_altq_pl, altq); 1017#else 1018 pool_put(&pf_altq_pl, altq); 1019#endif 1020 } 1021 splx(s); 1022 1023#ifdef __FreeBSD__ 1024 V_altqs_inactive_open = 0; 1025#else 1026 altqs_inactive_open = 0; 1027#endif 1028 return (error); 1029} 1030 1031int 1032pf_enable_altq(struct pf_altq *altq) 1033{ 1034 struct ifnet *ifp; 1035 struct tb_profile tb; 1036 int s, error = 0; 1037 1038 if ((ifp = ifunit(altq->ifname)) == NULL) 1039 return (EINVAL); 1040 1041 if (ifp->if_snd.altq_type != ALTQT_NONE) 1042 error = altq_enable(&ifp->if_snd); 1043 1044 /* set tokenbucket regulator */ 1045 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1046 tb.rate = altq->ifbandwidth; 1047 tb.depth = altq->tbrsize; 1048 s = splnet(); 1049#ifdef __FreeBSD__ 1050 PF_UNLOCK(); 1051#endif 1052 error = tbr_set(&ifp->if_snd, &tb); 1053#ifdef __FreeBSD__ 1054 PF_LOCK(); 1055#endif 1056 splx(s); 1057 } 1058 1059 return (error); 1060} 1061 1062int 1063pf_disable_altq(struct pf_altq *altq) 1064{ 1065 struct ifnet *ifp; 1066 struct tb_profile tb; 1067 int s, error; 1068 1069 if ((ifp = ifunit(altq->ifname)) == NULL) 1070 return (EINVAL); 1071 1072 /* 1073 * when the discipline is no longer referenced, it was overridden 1074 * by a new one. if so, just return. 1075 */ 1076 if (altq->altq_disc != ifp->if_snd.altq_disc) 1077 return (0); 1078 1079 error = altq_disable(&ifp->if_snd); 1080 1081 if (error == 0) { 1082 /* clear tokenbucket regulator */ 1083 tb.rate = 0; 1084 s = splnet(); 1085#ifdef __FreeBSD__ 1086 PF_UNLOCK(); 1087#endif 1088 error = tbr_set(&ifp->if_snd, &tb); 1089#ifdef __FreeBSD__ 1090 PF_LOCK(); 1091#endif 1092 splx(s); 1093 } 1094 1095 return (error); 1096} 1097 1098#ifdef __FreeBSD__ 1099void 1100pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1101{ 1102 struct ifnet *ifp1; 1103 struct pf_altq *a1, *a2, *a3; 1104 u_int32_t ticket; 1105 int error = 0; 1106 1107 /* Interrupt userland queue modifications */ 1108#ifdef __FreeBSD__ 1109 if (V_altqs_inactive_open) 1110 pf_rollback_altq(V_ticket_altqs_inactive); 1111#else 1112 if (altqs_inactive_open) 1113 pf_rollback_altq(ticket_altqs_inactive); 1114#endif 1115 1116 /* Start new altq ruleset */ 1117 if (pf_begin_altq(&ticket)) 1118 return; 1119 1120 /* Copy the current active set */ 1121#ifdef __FreeBSD__ 1122 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1123 a2 = pool_get(&V_pf_altq_pl, PR_NOWAIT); 1124#else 1125 TAILQ_FOREACH(a1, pf_altqs_active, entries) { 1126 a2 = pool_get(&pf_altq_pl, PR_NOWAIT); 1127#endif 1128 if (a2 == NULL) { 1129 error = ENOMEM; 1130 break; 1131 } 1132 bcopy(a1, a2, sizeof(struct pf_altq)); 1133 1134 if (a2->qname[0] != 0) { 1135 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1136 error = EBUSY; 1137#ifdef __FreeBSD__ 1138 pool_put(&V_pf_altq_pl, a2); 1139#else 1140 pool_put(&pf_altq_pl, a2); 1141#endif 1142 break; 1143 } 1144 a2->altq_disc = NULL; 1145#ifdef __FreeBSD__ 1146 TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries) { 1147#else 1148 TAILQ_FOREACH(a3, pf_altqs_inactive, entries) { 1149#endif 1150 if (strncmp(a3->ifname, a2->ifname, 1151 IFNAMSIZ) == 0 && a3->qname[0] == 0) { 1152 a2->altq_disc = a3->altq_disc; 1153 break; 1154 } 1155 } 1156 } 1157 /* Deactivate the interface in question */ 1158 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1159 if ((ifp1 = ifunit(a2->ifname)) == NULL || 1160 (remove && ifp1 == ifp)) { 1161 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1162 } else { 1163 PF_UNLOCK(); 1164 error = altq_add(a2); 1165 PF_LOCK(); 1166 1167#ifdef __FreeBSD__ 1168 if (ticket != V_ticket_altqs_inactive) 1169#else 1170 if (ticket != ticket_altqs_inactive) 1171#endif 1172 error = EBUSY; 1173 1174 if (error) { 1175#ifdef __FreeBSD__ 1176 pool_put(&V_pf_altq_pl, a2); 1177#else 1178 pool_put(&pf_altq_pl, a2); 1179#endif 1180 break; 1181 } 1182 } 1183 1184#ifdef __FreeBSD__ 1185 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1186#else 1187 TAILQ_INSERT_TAIL(pf_altqs_inactive, a2, entries); 1188#endif 1189 } 1190 1191 if (error != 0) 1192 pf_rollback_altq(ticket); 1193 else 1194 pf_commit_altq(ticket); 1195 } 1196#endif 1197#endif /* ALTQ */ 1198 1199int 1200pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1201{ 1202 struct pf_ruleset *rs; 1203 struct pf_rule *rule; 1204 1205 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1206 return (EINVAL); 1207 rs = pf_find_or_create_ruleset(anchor); 1208 if (rs == NULL) 1209 return (EINVAL); 1210 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1211 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1212 rs->rules[rs_num].inactive.rcount--; 1213 } 1214 *ticket = ++rs->rules[rs_num].inactive.ticket; 1215 rs->rules[rs_num].inactive.open = 1; 1216 return (0); 1217} 1218 1219int 1220pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1221{ 1222 struct pf_ruleset *rs; 1223 struct pf_rule *rule; 1224 1225 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1226 return (EINVAL); 1227 rs = pf_find_ruleset(anchor); 1228 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1229 rs->rules[rs_num].inactive.ticket != ticket) 1230 return (0); 1231 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1232 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1233 rs->rules[rs_num].inactive.rcount--; 1234 } 1235 rs->rules[rs_num].inactive.open = 0; 1236 return (0); 1237} 1238 1239#define PF_MD5_UPD(st, elm) \ 1240 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1241 1242#define PF_MD5_UPD_STR(st, elm) \ 1243 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1244 1245#define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1246 (stor) = htonl((st)->elm); \ 1247 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1248} while (0) 1249 1250#define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1251 (stor) = htons((st)->elm); \ 1252 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1253} while (0) 1254 1255void 1256pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1257{ 1258 PF_MD5_UPD(pfr, addr.type); 1259 switch (pfr->addr.type) { 1260 case PF_ADDR_DYNIFTL: 1261 PF_MD5_UPD(pfr, addr.v.ifname); 1262 PF_MD5_UPD(pfr, addr.iflags); 1263 break; 1264 case PF_ADDR_TABLE: 1265 PF_MD5_UPD(pfr, addr.v.tblname); 1266 break; 1267 case PF_ADDR_ADDRMASK: 1268 /* XXX ignore af? */ 1269 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1270 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1271 break; 1272 case PF_ADDR_RTLABEL: 1273 PF_MD5_UPD(pfr, addr.v.rtlabelname); 1274 break; 1275 } 1276 1277 PF_MD5_UPD(pfr, port[0]); 1278 PF_MD5_UPD(pfr, port[1]); 1279 PF_MD5_UPD(pfr, neg); 1280 PF_MD5_UPD(pfr, port_op); 1281} 1282 1283void 1284pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 1285{ 1286 u_int16_t x; 1287 u_int32_t y; 1288 1289 pf_hash_rule_addr(ctx, &rule->src); 1290 pf_hash_rule_addr(ctx, &rule->dst); 1291 PF_MD5_UPD_STR(rule, label); 1292 PF_MD5_UPD_STR(rule, ifname); 1293 PF_MD5_UPD_STR(rule, match_tagname); 1294 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1295 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1296 PF_MD5_UPD_HTONL(rule, prob, y); 1297 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1298 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1299 PF_MD5_UPD(rule, uid.op); 1300 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1301 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1302 PF_MD5_UPD(rule, gid.op); 1303 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1304 PF_MD5_UPD(rule, action); 1305 PF_MD5_UPD(rule, direction); 1306 PF_MD5_UPD(rule, af); 1307 PF_MD5_UPD(rule, quick); 1308 PF_MD5_UPD(rule, ifnot); 1309 PF_MD5_UPD(rule, match_tag_not); 1310 PF_MD5_UPD(rule, natpass); 1311 PF_MD5_UPD(rule, keep_state); 1312 PF_MD5_UPD(rule, proto); 1313 PF_MD5_UPD(rule, type); 1314 PF_MD5_UPD(rule, code); 1315 PF_MD5_UPD(rule, flags); 1316 PF_MD5_UPD(rule, flagset); 1317 PF_MD5_UPD(rule, allow_opts); 1318 PF_MD5_UPD(rule, rt); 1319 PF_MD5_UPD(rule, tos); 1320} 1321 1322int 1323pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1324{ 1325 struct pf_ruleset *rs; 1326 struct pf_rule *rule, **old_array; 1327 struct pf_rulequeue *old_rules; 1328 int s, error; 1329 u_int32_t old_rcount; 1330 1331 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1332 return (EINVAL); 1333 rs = pf_find_ruleset(anchor); 1334 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1335 ticket != rs->rules[rs_num].inactive.ticket) 1336 return (EBUSY); 1337 1338 /* Calculate checksum for the main ruleset */ 1339 if (rs == &pf_main_ruleset) { 1340 error = pf_setup_pfsync_matching(rs); 1341 if (error != 0) 1342 return (error); 1343 } 1344 1345 /* Swap rules, keep the old. */ 1346 s = splsoftnet(); 1347 old_rules = rs->rules[rs_num].active.ptr; 1348 old_rcount = rs->rules[rs_num].active.rcount; 1349 old_array = rs->rules[rs_num].active.ptr_array; 1350 1351 rs->rules[rs_num].active.ptr = 1352 rs->rules[rs_num].inactive.ptr; 1353 rs->rules[rs_num].active.ptr_array = 1354 rs->rules[rs_num].inactive.ptr_array; 1355 rs->rules[rs_num].active.rcount = 1356 rs->rules[rs_num].inactive.rcount; 1357 rs->rules[rs_num].inactive.ptr = old_rules; 1358 rs->rules[rs_num].inactive.ptr_array = old_array; 1359 rs->rules[rs_num].inactive.rcount = old_rcount; 1360 1361 rs->rules[rs_num].active.ticket = 1362 rs->rules[rs_num].inactive.ticket; 1363 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1364 1365 1366 /* Purge the old rule list. */ 1367 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1368 pf_rm_rule(old_rules, rule); 1369 if (rs->rules[rs_num].inactive.ptr_array) 1370 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1371 rs->rules[rs_num].inactive.ptr_array = NULL; 1372 rs->rules[rs_num].inactive.rcount = 0; 1373 rs->rules[rs_num].inactive.open = 0; 1374 pf_remove_if_empty_ruleset(rs); 1375 splx(s); 1376 return (0); 1377} 1378 1379int 1380pf_setup_pfsync_matching(struct pf_ruleset *rs) 1381{ 1382 MD5_CTX ctx; 1383 struct pf_rule *rule; 1384 int rs_cnt; 1385 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1386 1387 MD5Init(&ctx); 1388 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1389 /* XXX PF_RULESET_SCRUB as well? */ 1390 if (rs_cnt == PF_RULESET_SCRUB) 1391 continue; 1392 1393 if (rs->rules[rs_cnt].inactive.ptr_array) 1394 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1395 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1396 1397 if (rs->rules[rs_cnt].inactive.rcount) { 1398 rs->rules[rs_cnt].inactive.ptr_array = 1399 malloc(sizeof(caddr_t) * 1400 rs->rules[rs_cnt].inactive.rcount, 1401 M_TEMP, M_NOWAIT); 1402 1403 if (!rs->rules[rs_cnt].inactive.ptr_array) 1404 return (ENOMEM); 1405 } 1406 1407 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1408 entries) { 1409 pf_hash_rule(&ctx, rule); 1410 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1411 } 1412 } 1413 1414 MD5Final(digest, &ctx); 1415#ifdef __FreeBSD__ 1416 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1417#else 1418 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1419#endif 1420 return (0); 1421} 1422 1423int 1424pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 1425 sa_family_t af) 1426{ 1427 if (pfi_dynaddr_setup(addr, af) || 1428 pf_tbladdr_setup(ruleset, addr)) 1429 return (EINVAL); 1430 1431 return (0); 1432} 1433 1434void 1435pf_addr_copyout(struct pf_addr_wrap *addr) 1436{ 1437 pfi_dynaddr_copyout(addr); 1438 pf_tbladdr_copyout(addr); 1439 pf_rtlabel_copyout(addr); 1440} 1441 1442int 1443#ifdef __FreeBSD__ 1444pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1445#else 1446pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1447#endif 1448{ 1449 struct pf_pooladdr *pa = NULL; 1450 struct pf_pool *pool = NULL; 1451#ifndef __FreeBSD__ 1452 int s; 1453#endif 1454 int error = 0; 1455 1456 CURVNET_SET(TD_TO_VNET(td)); 1457 1458 /* XXX keep in sync with switch() below */ 1459#ifdef __FreeBSD__ 1460 if (securelevel_gt(td->td_ucred, 2)) 1461#else 1462 if (securelevel > 1) 1463#endif 1464 switch (cmd) { 1465 case DIOCGETRULES: 1466 case DIOCGETRULE: 1467 case DIOCGETADDRS: 1468 case DIOCGETADDR: 1469 case DIOCGETSTATE: 1470 case DIOCSETSTATUSIF: 1471 case DIOCGETSTATUS: 1472 case DIOCCLRSTATUS: 1473 case DIOCNATLOOK: 1474 case DIOCSETDEBUG: 1475 case DIOCGETSTATES: 1476 case DIOCGETTIMEOUT: 1477 case DIOCCLRRULECTRS: 1478 case DIOCGETLIMIT: 1479 case DIOCGETALTQS: 1480 case DIOCGETALTQ: 1481 case DIOCGETQSTATS: 1482 case DIOCGETRULESETS: 1483 case DIOCGETRULESET: 1484 case DIOCRGETTABLES: 1485 case DIOCRGETTSTATS: 1486 case DIOCRCLRTSTATS: 1487 case DIOCRCLRADDRS: 1488 case DIOCRADDADDRS: 1489 case DIOCRDELADDRS: 1490 case DIOCRSETADDRS: 1491 case DIOCRGETADDRS: 1492 case DIOCRGETASTATS: 1493 case DIOCRCLRASTATS: 1494 case DIOCRTSTADDRS: 1495 case DIOCOSFPGET: 1496 case DIOCGETSRCNODES: 1497 case DIOCCLRSRCNODES: 1498 case DIOCIGETIFACES: 1499#ifdef __FreeBSD__ 1500 case DIOCGIFSPEED: 1501#endif 1502 case DIOCSETIFFLAG: 1503 case DIOCCLRIFFLAG: 1504 break; 1505 case DIOCRCLRTABLES: 1506 case DIOCRADDTABLES: 1507 case DIOCRDELTABLES: 1508 case DIOCRSETTFLAGS: 1509 if (((struct pfioc_table *)addr)->pfrio_flags & 1510 PFR_FLAG_DUMMY) 1511 break; /* dummy operation ok */ 1512 return (EPERM); 1513 default: 1514 return (EPERM); 1515 } 1516 1517 if (!(flags & FWRITE)) 1518 switch (cmd) { 1519 case DIOCGETRULES: 1520 case DIOCGETADDRS: 1521 case DIOCGETADDR: 1522 case DIOCGETSTATE: 1523 case DIOCGETSTATUS: 1524 case DIOCGETSTATES: 1525 case DIOCGETTIMEOUT: 1526 case DIOCGETLIMIT: 1527 case DIOCGETALTQS: 1528 case DIOCGETALTQ: 1529 case DIOCGETQSTATS: 1530 case DIOCGETRULESETS: 1531 case DIOCGETRULESET: 1532 case DIOCNATLOOK: 1533 case DIOCRGETTABLES: 1534 case DIOCRGETTSTATS: 1535 case DIOCRGETADDRS: 1536 case DIOCRGETASTATS: 1537 case DIOCRTSTADDRS: 1538 case DIOCOSFPGET: 1539 case DIOCGETSRCNODES: 1540 case DIOCIGETIFACES: 1541#ifdef __FreeBSD__ 1542 case DIOCGIFSPEED: 1543#endif 1544 break; 1545 case DIOCRCLRTABLES: 1546 case DIOCRADDTABLES: 1547 case DIOCRDELTABLES: 1548 case DIOCRCLRTSTATS: 1549 case DIOCRCLRADDRS: 1550 case DIOCRADDADDRS: 1551 case DIOCRDELADDRS: 1552 case DIOCRSETADDRS: 1553 case DIOCRSETTFLAGS: 1554 if (((struct pfioc_table *)addr)->pfrio_flags & 1555 PFR_FLAG_DUMMY) { 1556 flags |= FWRITE; /* need write lock for dummy */ 1557 break; /* dummy operation ok */ 1558 } 1559 return (EACCES); 1560 case DIOCGETRULE: 1561 if (((struct pfioc_rule *)addr)->action == 1562 PF_GET_CLR_CNTR) 1563 return (EACCES); 1564 break; 1565 default: 1566 return (EACCES); 1567 } 1568 1569 if (flags & FWRITE) 1570#ifdef __FreeBSD__ 1571 sx_xlock(&V_pf_consistency_lock); 1572 else 1573 sx_slock(&V_pf_consistency_lock); 1574#else 1575 rw_enter_write(&pf_consistency_lock); 1576 else 1577 rw_enter_read(&pf_consistency_lock); 1578#endif 1579 1580#ifdef __FreeBSD__ 1581 PF_LOCK(); 1582#else 1583 s = splsoftnet(); 1584#endif 1585 switch (cmd) { 1586 1587 case DIOCSTART: 1588#ifdef __FreeBSD__ 1589 if (V_pf_status.running) 1590#else 1591 if (pf_status.running) 1592#endif 1593 error = EEXIST; 1594 else { 1595#ifdef __FreeBSD__ 1596 PF_UNLOCK(); 1597 error = hook_pf(); 1598 PF_LOCK(); 1599 if (error) { 1600 DPFPRINTF(PF_DEBUG_MISC, 1601 ("pf: pfil registeration fail\n")); 1602 break; 1603 } 1604 V_pf_status.running = 1; 1605 V_pf_status.since = time_second; 1606 1607 if (V_pf_status.stateid == 0) { 1608 V_pf_status.stateid = time_second; 1609 V_pf_status.stateid = V_pf_status.stateid << 32; 1610 } 1611#else 1612 pf_status.running = 1; 1613 pf_status.since = time_second; 1614 1615 if (pf_status.stateid == 0) { 1616 pf_status.stateid = time_second; 1617 pf_status.stateid = pf_status.stateid << 32; 1618 } 1619#endif 1620 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1621 } 1622 break; 1623 1624 case DIOCSTOP: 1625#ifdef __FreeBSD__ 1626 if (!V_pf_status.running) 1627 error = ENOENT; 1628 else { 1629 V_pf_status.running = 0; 1630 PF_UNLOCK(); 1631 error = dehook_pf(); 1632 PF_LOCK(); 1633 if (error) { 1634 V_pf_status.running = 1; 1635 DPFPRINTF(PF_DEBUG_MISC, 1636 ("pf: pfil unregisteration failed\n")); 1637 } 1638 V_pf_status.since = time_second; 1639#else 1640 if (!pf_status.running) 1641 error = ENOENT; 1642 else { 1643 pf_status.running = 0; 1644 pf_status.since = time_second; 1645#endif 1646 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1647 } 1648 break; 1649 1650 case DIOCADDRULE: { 1651 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1652 struct pf_ruleset *ruleset; 1653 struct pf_rule *rule, *tail; 1654 struct pf_pooladdr *pa; 1655 int rs_num; 1656 1657 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1658 ruleset = pf_find_ruleset(pr->anchor); 1659 if (ruleset == NULL) { 1660 error = EINVAL; 1661 break; 1662 } 1663 rs_num = pf_get_ruleset_number(pr->rule.action); 1664 if (rs_num >= PF_RULESET_MAX) { 1665 error = EINVAL; 1666 break; 1667 } 1668 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1669 error = EINVAL; 1670 break; 1671 } 1672 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1673#ifdef __FreeBSD__ 1674 DPFPRINTF(PF_DEBUG_MISC, 1675 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num, 1676 ruleset->rules[rs_num].inactive.ticket)); 1677#endif 1678 error = EBUSY; 1679 break; 1680 } 1681#ifdef __FreeBSD__ 1682 if (pr->pool_ticket != V_ticket_pabuf) { 1683 DPFPRINTF(PF_DEBUG_MISC, 1684 ("pool_ticket: %d != %d\n", pr->pool_ticket, 1685 V_ticket_pabuf)); 1686#else 1687 if (pr->pool_ticket != ticket_pabuf) { 1688#endif 1689 error = EBUSY; 1690 break; 1691 } 1692#ifdef __FreeBSD__ 1693 rule = pool_get(&V_pf_rule_pl, PR_NOWAIT); 1694#else 1695 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL); 1696#endif 1697 if (rule == NULL) { 1698 error = ENOMEM; 1699 break; 1700 } 1701 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1702#ifdef __FreeBSD__ 1703 rule->cuid = td->td_ucred->cr_ruid; 1704 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1705#else 1706 rule->cuid = p->p_cred->p_ruid; 1707 rule->cpid = p->p_pid; 1708#endif 1709 rule->anchor = NULL; 1710 rule->kif = NULL; 1711 TAILQ_INIT(&rule->rpool.list); 1712 /* initialize refcounting */ 1713 rule->states_cur = 0; 1714 rule->src_nodes = 0; 1715 rule->entries.tqe_prev = NULL; 1716#ifndef INET 1717 if (rule->af == AF_INET) { 1718#ifdef __FreeBSD__ 1719 pool_put(&V_pf_rule_pl, rule); 1720#else 1721 pool_put(&pf_rule_pl, rule); 1722#endif 1723 error = EAFNOSUPPORT; 1724 break; 1725 } 1726#endif /* INET */ 1727#ifndef INET6 1728 if (rule->af == AF_INET6) { 1729#ifdef __FreeBSD__ 1730 pool_put(&V_pf_rule_pl, rule); 1731#else 1732 pool_put(&pf_rule_pl, rule); 1733#endif 1734 error = EAFNOSUPPORT; 1735 break; 1736 } 1737#endif /* INET6 */ 1738 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1739 pf_rulequeue); 1740 if (tail) 1741 rule->nr = tail->nr + 1; 1742 else 1743 rule->nr = 0; 1744 if (rule->ifname[0]) { 1745 rule->kif = pfi_kif_get(rule->ifname); 1746 if (rule->kif == NULL) { 1747#ifdef __FreeBSD__ 1748 pool_put(&V_pf_rule_pl, rule); 1749#else 1750 pool_put(&pf_rule_pl, rule); 1751#endif 1752 error = EINVAL; 1753 break; 1754 } 1755 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1756 } 1757 1758#ifdef __FreeBSD__ /* ROUTING */ 1759 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 1760#else 1761 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1762#endif 1763 error = EBUSY; 1764 1765#ifdef ALTQ 1766 /* set queue IDs */ 1767 if (rule->qname[0] != 0) { 1768 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1769 error = EBUSY; 1770 else if (rule->pqname[0] != 0) { 1771 if ((rule->pqid = 1772 pf_qname2qid(rule->pqname)) == 0) 1773 error = EBUSY; 1774 } else 1775 rule->pqid = rule->qid; 1776 } 1777#endif 1778 if (rule->tagname[0]) 1779 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1780 error = EBUSY; 1781 if (rule->match_tagname[0]) 1782 if ((rule->match_tag = 1783 pf_tagname2tag(rule->match_tagname)) == 0) 1784 error = EBUSY; 1785 if (rule->rt && !rule->direction) 1786 error = EINVAL; 1787#if NPFLOG > 0 1788 if (!rule->log) 1789 rule->logif = 0; 1790 if (rule->logif >= PFLOGIFS_MAX) 1791 error = EINVAL; 1792#endif 1793 if (pf_rtlabel_add(&rule->src.addr) || 1794 pf_rtlabel_add(&rule->dst.addr)) 1795 error = EBUSY; 1796 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1797 error = EINVAL; 1798 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1799 error = EINVAL; 1800 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1801 error = EINVAL; 1802#ifdef __FreeBSD__ 1803 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 1804#else 1805 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1806#endif 1807 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1808 error = EINVAL; 1809 1810 if (rule->overload_tblname[0]) { 1811 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1812 rule->overload_tblname, 0)) == NULL) 1813 error = EINVAL; 1814 else 1815 rule->overload_tbl->pfrkt_flags |= 1816 PFR_TFLAG_ACTIVE; 1817 } 1818 1819#ifdef __FreeBSD__ 1820 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list); 1821#else 1822 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1823#endif 1824 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1825 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1826 (rule->rt > PF_FASTROUTE)) && 1827 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1828 error = EINVAL; 1829 1830 if (error) { 1831 pf_rm_rule(NULL, rule); 1832 break; 1833 } 1834 1835#ifdef __FreeBSD__ 1836 if (!V_debug_pfugidhack && (rule->uid.op || rule->gid.op || 1837 rule->log & PF_LOG_SOCKET_LOOKUP)) { 1838 DPFPRINTF(PF_DEBUG_MISC, 1839 ("pf: debug.pfugidhack enabled\n")); 1840 V_debug_pfugidhack = 1; 1841 } 1842#endif 1843 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1844 rule->evaluations = rule->packets[0] = rule->packets[1] = 1845 rule->bytes[0] = rule->bytes[1] = 0; 1846 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1847 rule, entries); 1848 ruleset->rules[rs_num].inactive.rcount++; 1849 break; 1850 } 1851 1852 case DIOCGETRULES: { 1853 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1854 struct pf_ruleset *ruleset; 1855 struct pf_rule *tail; 1856 int rs_num; 1857 1858 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1859 ruleset = pf_find_ruleset(pr->anchor); 1860 if (ruleset == NULL) { 1861 error = EINVAL; 1862 break; 1863 } 1864 rs_num = pf_get_ruleset_number(pr->rule.action); 1865 if (rs_num >= PF_RULESET_MAX) { 1866 error = EINVAL; 1867 break; 1868 } 1869 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1870 pf_rulequeue); 1871 if (tail) 1872 pr->nr = tail->nr + 1; 1873 else 1874 pr->nr = 0; 1875 pr->ticket = ruleset->rules[rs_num].active.ticket; 1876 break; 1877 } 1878 1879 case DIOCGETRULE: { 1880 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1881 struct pf_ruleset *ruleset; 1882 struct pf_rule *rule; 1883 int rs_num, i; 1884 1885 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1886 ruleset = pf_find_ruleset(pr->anchor); 1887 if (ruleset == NULL) { 1888 error = EINVAL; 1889 break; 1890 } 1891 rs_num = pf_get_ruleset_number(pr->rule.action); 1892 if (rs_num >= PF_RULESET_MAX) { 1893 error = EINVAL; 1894 break; 1895 } 1896 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1897 error = EBUSY; 1898 break; 1899 } 1900 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1901 while ((rule != NULL) && (rule->nr != pr->nr)) 1902 rule = TAILQ_NEXT(rule, entries); 1903 if (rule == NULL) { 1904 error = EBUSY; 1905 break; 1906 } 1907 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1908 if (pf_anchor_copyout(ruleset, rule, pr)) { 1909 error = EBUSY; 1910 break; 1911 } 1912 pf_addr_copyout(&pr->rule.src.addr); 1913 pf_addr_copyout(&pr->rule.dst.addr); 1914 for (i = 0; i < PF_SKIP_COUNT; ++i) 1915 if (rule->skip[i].ptr == NULL) 1916 pr->rule.skip[i].nr = -1; 1917 else 1918 pr->rule.skip[i].nr = 1919 rule->skip[i].ptr->nr; 1920 1921 if (pr->action == PF_GET_CLR_CNTR) { 1922 rule->evaluations = 0; 1923 rule->packets[0] = rule->packets[1] = 0; 1924 rule->bytes[0] = rule->bytes[1] = 0; 1925 rule->states_tot = 0; 1926 } 1927 break; 1928 } 1929 1930 case DIOCCHANGERULE: { 1931 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1932 struct pf_ruleset *ruleset; 1933 struct pf_rule *oldrule = NULL, *newrule = NULL; 1934 u_int32_t nr = 0; 1935 int rs_num; 1936 1937 if (!(pcr->action == PF_CHANGE_REMOVE || 1938 pcr->action == PF_CHANGE_GET_TICKET) && 1939#ifdef __FreeBSD__ 1940 pcr->pool_ticket != V_ticket_pabuf) { 1941#else 1942 pcr->pool_ticket != ticket_pabuf) { 1943#endif 1944 error = EBUSY; 1945 break; 1946 } 1947 1948 if (pcr->action < PF_CHANGE_ADD_HEAD || 1949 pcr->action > PF_CHANGE_GET_TICKET) { 1950 error = EINVAL; 1951 break; 1952 } 1953 ruleset = pf_find_ruleset(pcr->anchor); 1954 if (ruleset == NULL) { 1955 error = EINVAL; 1956 break; 1957 } 1958 rs_num = pf_get_ruleset_number(pcr->rule.action); 1959 if (rs_num >= PF_RULESET_MAX) { 1960 error = EINVAL; 1961 break; 1962 } 1963 1964 if (pcr->action == PF_CHANGE_GET_TICKET) { 1965 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1966 break; 1967 } else { 1968 if (pcr->ticket != 1969 ruleset->rules[rs_num].active.ticket) { 1970 error = EINVAL; 1971 break; 1972 } 1973 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1974 error = EINVAL; 1975 break; 1976 } 1977 } 1978 1979 if (pcr->action != PF_CHANGE_REMOVE) { 1980#ifdef __FreeBSD__ 1981 newrule = pool_get(&V_pf_rule_pl, PR_NOWAIT); 1982#else 1983 newrule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL); 1984#endif 1985 if (newrule == NULL) { 1986 error = ENOMEM; 1987 break; 1988 } 1989 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1990#ifdef __FreeBSD__ 1991 newrule->cuid = td->td_ucred->cr_ruid; 1992 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1993#else 1994 newrule->cuid = p->p_cred->p_ruid; 1995 newrule->cpid = p->p_pid; 1996#endif 1997 TAILQ_INIT(&newrule->rpool.list); 1998 /* initialize refcounting */ 1999 newrule->states_cur = 0; 2000 newrule->entries.tqe_prev = NULL; 2001#ifndef INET 2002 if (newrule->af == AF_INET) { 2003#ifdef __FreeBSD__ 2004 pool_put(&V_pf_rule_pl, newrule); 2005#else 2006 pool_put(&pf_rule_pl, newrule); 2007#endif 2008 error = EAFNOSUPPORT; 2009 break; 2010 } 2011#endif /* INET */ 2012#ifndef INET6 2013 if (newrule->af == AF_INET6) { 2014#ifdef __FreeBSD__ 2015 pool_put(&V_pf_rule_pl, newrule); 2016#else 2017 pool_put(&pf_rule_pl, newrule); 2018#endif 2019 error = EAFNOSUPPORT; 2020 break; 2021 } 2022#endif /* INET6 */ 2023 if (newrule->ifname[0]) { 2024 newrule->kif = pfi_kif_get(newrule->ifname); 2025 if (newrule->kif == NULL) { 2026#ifdef __FreeBSD__ 2027 pool_put(&V_pf_rule_pl, newrule); 2028#else 2029 pool_put(&pf_rule_pl, newrule); 2030#endif 2031 error = EINVAL; 2032 break; 2033 } 2034 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 2035 } else 2036 newrule->kif = NULL; 2037 2038 if (newrule->rtableid > 0 && 2039#ifdef __FreeBSD__ /* ROUTING */ 2040 newrule->rtableid >= rt_numfibs) 2041#else 2042 !rtable_exists(newrule->rtableid)) 2043#endif 2044 error = EBUSY; 2045 2046#ifdef ALTQ 2047 /* set queue IDs */ 2048 if (newrule->qname[0] != 0) { 2049 if ((newrule->qid = 2050 pf_qname2qid(newrule->qname)) == 0) 2051 error = EBUSY; 2052 else if (newrule->pqname[0] != 0) { 2053 if ((newrule->pqid = 2054 pf_qname2qid(newrule->pqname)) == 0) 2055 error = EBUSY; 2056 } else 2057 newrule->pqid = newrule->qid; 2058 } 2059#endif /* ALTQ */ 2060 if (newrule->tagname[0]) 2061 if ((newrule->tag = 2062 pf_tagname2tag(newrule->tagname)) == 0) 2063 error = EBUSY; 2064 if (newrule->match_tagname[0]) 2065 if ((newrule->match_tag = pf_tagname2tag( 2066 newrule->match_tagname)) == 0) 2067 error = EBUSY; 2068 if (newrule->rt && !newrule->direction) 2069 error = EINVAL; 2070#if NPFLOG > 0 2071 if (!newrule->log) 2072 newrule->logif = 0; 2073 if (newrule->logif >= PFLOGIFS_MAX) 2074 error = EINVAL; 2075#endif 2076 if (pf_rtlabel_add(&newrule->src.addr) || 2077 pf_rtlabel_add(&newrule->dst.addr)) 2078 error = EBUSY; 2079 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 2080 error = EINVAL; 2081 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 2082 error = EINVAL; 2083 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 2084 error = EINVAL; 2085#ifdef __FreeBSD__ 2086 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2087#else 2088 TAILQ_FOREACH(pa, &pf_pabuf, entries) 2089#endif 2090 if (pf_tbladdr_setup(ruleset, &pa->addr)) 2091 error = EINVAL; 2092 2093 if (newrule->overload_tblname[0]) { 2094 if ((newrule->overload_tbl = pfr_attach_table( 2095 ruleset, newrule->overload_tblname, 0)) == 2096 NULL) 2097 error = EINVAL; 2098 else 2099 newrule->overload_tbl->pfrkt_flags |= 2100 PFR_TFLAG_ACTIVE; 2101 } 2102 2103#ifdef __FreeBSD__ 2104 pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list); 2105#else 2106 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 2107#endif 2108 if (((((newrule->action == PF_NAT) || 2109 (newrule->action == PF_RDR) || 2110 (newrule->action == PF_BINAT) || 2111 (newrule->rt > PF_FASTROUTE)) && 2112 !newrule->anchor)) && 2113 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 2114 error = EINVAL; 2115 2116 if (error) { 2117 pf_rm_rule(NULL, newrule); 2118 break; 2119 } 2120 2121#ifdef __FreeBSD__ 2122 if (!V_debug_pfugidhack && (newrule->uid.op || 2123 newrule->gid.op || 2124 newrule->log & PF_LOG_SOCKET_LOOKUP)) { 2125 DPFPRINTF(PF_DEBUG_MISC, 2126 ("pf: debug.pfugidhack enabled\n")); 2127 V_debug_pfugidhack = 1; 2128 } 2129#endif 2130 2131 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 2132 newrule->evaluations = 0; 2133 newrule->packets[0] = newrule->packets[1] = 0; 2134 newrule->bytes[0] = newrule->bytes[1] = 0; 2135 } 2136#ifdef __FreeBSD__ 2137 pf_empty_pool(&V_pf_pabuf); 2138#else 2139 pf_empty_pool(&pf_pabuf); 2140#endif 2141 2142 if (pcr->action == PF_CHANGE_ADD_HEAD) 2143 oldrule = TAILQ_FIRST( 2144 ruleset->rules[rs_num].active.ptr); 2145 else if (pcr->action == PF_CHANGE_ADD_TAIL) 2146 oldrule = TAILQ_LAST( 2147 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 2148 else { 2149 oldrule = TAILQ_FIRST( 2150 ruleset->rules[rs_num].active.ptr); 2151 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 2152 oldrule = TAILQ_NEXT(oldrule, entries); 2153 if (oldrule == NULL) { 2154 if (newrule != NULL) 2155 pf_rm_rule(NULL, newrule); 2156 error = EINVAL; 2157 break; 2158 } 2159 } 2160 2161 if (pcr->action == PF_CHANGE_REMOVE) { 2162 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 2163 ruleset->rules[rs_num].active.rcount--; 2164 } else { 2165 if (oldrule == NULL) 2166 TAILQ_INSERT_TAIL( 2167 ruleset->rules[rs_num].active.ptr, 2168 newrule, entries); 2169 else if (pcr->action == PF_CHANGE_ADD_HEAD || 2170 pcr->action == PF_CHANGE_ADD_BEFORE) 2171 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 2172 else 2173 TAILQ_INSERT_AFTER( 2174 ruleset->rules[rs_num].active.ptr, 2175 oldrule, newrule, entries); 2176 ruleset->rules[rs_num].active.rcount++; 2177 } 2178 2179 nr = 0; 2180 TAILQ_FOREACH(oldrule, 2181 ruleset->rules[rs_num].active.ptr, entries) 2182 oldrule->nr = nr++; 2183 2184 ruleset->rules[rs_num].active.ticket++; 2185 2186 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 2187 pf_remove_if_empty_ruleset(ruleset); 2188 2189 break; 2190 } 2191 2192 case DIOCCLRSTATES: { 2193 struct pf_state *s, *nexts; 2194 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 2195 u_int killed = 0; 2196 2197#ifdef __FreeBSD__ 2198 for (s = RB_MIN(pf_state_tree_id, &V_tree_id); s; s = nexts) { 2199 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, s); 2200#else 2201 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 2202 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 2203#endif 2204 2205 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 2206 s->kif->pfik_name)) { 2207#if NPFSYNC > 0 2208 /* don't send out individual delete messages */ 2209 SET(s->state_flags, PFSTATE_NOSYNC); 2210#endif 2211 pf_unlink_state(s); 2212 killed++; 2213 } 2214 } 2215 psk->psk_killed = killed; 2216#if NPFSYNC > 0 2217#ifdef __FreeBSD__ 2218 if (pfsync_clear_states_ptr != NULL) 2219 pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname); 2220#else 2221 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 2222#endif 2223#endif 2224 break; 2225 } 2226 2227 case DIOCKILLSTATES: { 2228 struct pf_state *s, *nexts; 2229 struct pf_state_key *sk; 2230 struct pf_addr *srcaddr, *dstaddr; 2231 u_int16_t srcport, dstport; 2232 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 2233 u_int killed = 0; 2234 2235 if (psk->psk_pfcmp.id) { 2236 if (psk->psk_pfcmp.creatorid == 0) 2237#ifdef __FreeBSD__ 2238 psk->psk_pfcmp.creatorid = V_pf_status.hostid; 2239#else 2240 psk->psk_pfcmp.creatorid = pf_status.hostid; 2241#endif 2242 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) { 2243 pf_unlink_state(s); 2244 psk->psk_killed = 1; 2245 } 2246 break; 2247 } 2248 2249#ifdef __FreeBSD__ 2250 for (s = RB_MIN(pf_state_tree_id, &V_tree_id); s; 2251 s = nexts) { 2252 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, s); 2253#else 2254 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 2255 s = nexts) { 2256 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 2257#endif 2258 sk = s->key[PF_SK_WIRE]; 2259 2260 if (s->direction == PF_OUT) { 2261 srcaddr = &sk->addr[1]; 2262 dstaddr = &sk->addr[0]; 2263 srcport = sk->port[0]; 2264 dstport = sk->port[0]; 2265 } else { 2266 srcaddr = &sk->addr[0]; 2267 dstaddr = &sk->addr[1]; 2268 srcport = sk->port[0]; 2269 dstport = sk->port[0]; 2270 } 2271 if ((!psk->psk_af || sk->af == psk->psk_af) 2272 && (!psk->psk_proto || psk->psk_proto == 2273 sk->proto) && 2274 PF_MATCHA(psk->psk_src.neg, 2275 &psk->psk_src.addr.v.a.addr, 2276 &psk->psk_src.addr.v.a.mask, 2277 srcaddr, sk->af) && 2278 PF_MATCHA(psk->psk_dst.neg, 2279 &psk->psk_dst.addr.v.a.addr, 2280 &psk->psk_dst.addr.v.a.mask, 2281 dstaddr, sk->af) && 2282 (psk->psk_src.port_op == 0 || 2283 pf_match_port(psk->psk_src.port_op, 2284 psk->psk_src.port[0], psk->psk_src.port[1], 2285 srcport)) && 2286 (psk->psk_dst.port_op == 0 || 2287 pf_match_port(psk->psk_dst.port_op, 2288 psk->psk_dst.port[0], psk->psk_dst.port[1], 2289 dstport)) && 2290 (!psk->psk_label[0] || (s->rule.ptr->label[0] && 2291 !strcmp(psk->psk_label, s->rule.ptr->label))) && 2292 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 2293 s->kif->pfik_name))) { 2294 pf_unlink_state(s); 2295 killed++; 2296 } 2297 } 2298 psk->psk_killed = killed; 2299 break; 2300 } 2301 2302 case DIOCADDSTATE: { 2303 struct pfioc_state *ps = (struct pfioc_state *)addr; 2304 struct pfsync_state *sp = &ps->state; 2305 2306 if (sp->timeout >= PFTM_MAX && 2307 sp->timeout != PFTM_UNTIL_PACKET) { 2308 error = EINVAL; 2309 break; 2310 } 2311#ifdef __FreeBSD__ 2312 if (pfsync_state_import_ptr != NULL) 2313 error = pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 2314#else 2315 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL); 2316#endif 2317 break; 2318 } 2319 2320 case DIOCGETSTATE: { 2321 struct pfioc_state *ps = (struct pfioc_state *)addr; 2322 struct pf_state *s; 2323 struct pf_state_cmp id_key; 2324 2325 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id)); 2326 id_key.creatorid = ps->state.creatorid; 2327 2328 s = pf_find_state_byid(&id_key); 2329 if (s == NULL) { 2330 error = ENOENT; 2331 break; 2332 } 2333 2334 pfsync_state_export(&ps->state, s); 2335 break; 2336 } 2337 2338 case DIOCGETSTATES: { 2339 struct pfioc_states *ps = (struct pfioc_states *)addr; 2340 struct pf_state *state; 2341 struct pfsync_state *p, *pstore; 2342 u_int32_t nr = 0; 2343 2344 if (ps->ps_len == 0) { 2345#ifdef __FreeBSD__ 2346 nr = V_pf_status.states; 2347#else 2348 nr = pf_status.states; 2349#endif 2350 ps->ps_len = sizeof(struct pfsync_state) * nr; 2351 break; 2352 } 2353 2354#ifdef __FreeBSD__ 2355 PF_UNLOCK(); 2356#endif 2357 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2358#ifdef __FreeBSD__ 2359 PF_LOCK(); 2360#endif 2361 2362 p = ps->ps_states; 2363 2364#ifdef __FreeBSD__ 2365 state = TAILQ_FIRST(&V_state_list); 2366#else 2367 state = TAILQ_FIRST(&state_list); 2368#endif 2369 while (state) { 2370 if (state->timeout != PFTM_UNLINKED) { 2371 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 2372 break; 2373 pfsync_state_export(pstore, state); 2374#ifdef __FreeBSD__ 2375 PF_COPYOUT(pstore, p, sizeof(*p), error); 2376#else 2377 error = copyout(pstore, p, sizeof(*p)); 2378#endif 2379 if (error) { 2380 free(pstore, M_TEMP); 2381 goto fail; 2382 } 2383 p++; 2384 nr++; 2385 } 2386 state = TAILQ_NEXT(state, entry_list); 2387 } 2388 2389 ps->ps_len = sizeof(struct pfsync_state) * nr; 2390 2391 free(pstore, M_TEMP); 2392 break; 2393 } 2394 2395 case DIOCGETSTATUS: { 2396 struct pf_status *s = (struct pf_status *)addr; 2397#ifdef __FreeBSD__ 2398 bcopy(&V_pf_status, s, sizeof(struct pf_status)); 2399#else 2400 bcopy(&pf_status, s, sizeof(struct pf_status)); 2401#endif 2402 pfi_update_status(s->ifname, s); 2403 break; 2404 } 2405 2406 case DIOCSETSTATUSIF: { 2407 struct pfioc_if *pi = (struct pfioc_if *)addr; 2408 2409 if (pi->ifname[0] == 0) { 2410#ifdef __FreeBSD__ 2411 bzero(V_pf_status.ifname, IFNAMSIZ); 2412#else 2413 bzero(pf_status.ifname, IFNAMSIZ); 2414#endif 2415 break; 2416 } 2417#ifdef __FreeBSD__ 2418 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 2419#else 2420 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2421#endif 2422 break; 2423 } 2424 2425 case DIOCCLRSTATUS: { 2426#ifdef __FreeBSD__ 2427 bzero(V_pf_status.counters, sizeof(V_pf_status.counters)); 2428 bzero(V_pf_status.fcounters, sizeof(V_pf_status.fcounters)); 2429 bzero(V_pf_status.scounters, sizeof(V_pf_status.scounters)); 2430 V_pf_status.since = time_second; 2431 if (*V_pf_status.ifname) 2432 pfi_update_status(V_pf_status.ifname, NULL); 2433#else 2434 bzero(pf_status.counters, sizeof(pf_status.counters)); 2435 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2436 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2437 pf_status.since = time_second; 2438 if (*pf_status.ifname) 2439 pfi_update_status(pf_status.ifname, NULL); 2440#endif 2441 break; 2442 } 2443 2444 case DIOCNATLOOK: { 2445 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2446 struct pf_state_key *sk; 2447 struct pf_state *state; 2448 struct pf_state_key_cmp key; 2449 int m = 0, direction = pnl->direction; 2450 int sidx, didx; 2451 2452 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 2453 sidx = (direction == PF_IN) ? 1 : 0; 2454 didx = (direction == PF_IN) ? 0 : 1; 2455 2456 if (!pnl->proto || 2457 PF_AZERO(&pnl->saddr, pnl->af) || 2458 PF_AZERO(&pnl->daddr, pnl->af) || 2459 ((pnl->proto == IPPROTO_TCP || 2460 pnl->proto == IPPROTO_UDP) && 2461 (!pnl->dport || !pnl->sport))) 2462 error = EINVAL; 2463 else { 2464 key.af = pnl->af; 2465 key.proto = pnl->proto; 2466 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 2467 key.port[sidx] = pnl->sport; 2468 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 2469 key.port[didx] = pnl->dport; 2470 2471 state = pf_find_state_all(&key, direction, &m); 2472 2473 if (m > 1) 2474 error = E2BIG; /* more than one state */ 2475 else if (state != NULL) { 2476 sk = state->key[sidx]; 2477 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 2478 pnl->rsport = sk->port[sidx]; 2479 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 2480 pnl->rdport = sk->port[didx]; 2481 } else 2482 error = ENOENT; 2483 } 2484 break; 2485 } 2486 2487 case DIOCSETTIMEOUT: { 2488 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2489 int old; 2490 2491 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2492 pt->seconds < 0) { 2493 error = EINVAL; 2494 goto fail; 2495 } 2496#ifdef __FreeBSD__ 2497 old = V_pf_default_rule.timeout[pt->timeout]; 2498#else 2499 old = pf_default_rule.timeout[pt->timeout]; 2500#endif 2501 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2502 pt->seconds = 1; 2503#ifdef __FreeBSD__ 2504 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 2505#else 2506 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2507#endif 2508 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 2509 wakeup(pf_purge_thread); 2510 pt->seconds = old; 2511 break; 2512 } 2513 2514 case DIOCGETTIMEOUT: { 2515 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2516 2517 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2518 error = EINVAL; 2519 goto fail; 2520 } 2521#ifdef __FreeBSD__ 2522 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 2523#else 2524 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2525#endif 2526 break; 2527 } 2528 2529 case DIOCGETLIMIT: { 2530 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2531 2532 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2533 error = EINVAL; 2534 goto fail; 2535 } 2536#ifdef __FreeBSD__ 2537 pl->limit = V_pf_pool_limits[pl->index].limit; 2538#else 2539 pl->limit = pf_pool_limits[pl->index].limit; 2540#endif 2541 break; 2542 } 2543 2544 case DIOCSETLIMIT: { 2545 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2546 int old_limit; 2547 2548 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2549#ifdef __FreeBSD__ 2550 V_pf_pool_limits[pl->index].pp == NULL) { 2551#else 2552 pf_pool_limits[pl->index].pp == NULL) { 2553#endif 2554 error = EINVAL; 2555 goto fail; 2556 } 2557#ifdef __FreeBSD__ 2558 uma_zone_set_max(V_pf_pool_limits[pl->index].pp, pl->limit); 2559 old_limit = V_pf_pool_limits[pl->index].limit; 2560 V_pf_pool_limits[pl->index].limit = pl->limit; 2561 pl->limit = old_limit; 2562#else 2563 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2564 pl->limit, NULL, 0) != 0) { 2565 error = EBUSY; 2566 goto fail; 2567 } 2568 old_limit = pf_pool_limits[pl->index].limit; 2569 pf_pool_limits[pl->index].limit = pl->limit; 2570 pl->limit = old_limit; 2571#endif 2572 break; 2573 } 2574 2575 case DIOCSETDEBUG: { 2576 u_int32_t *level = (u_int32_t *)addr; 2577 2578#ifdef __FreeBSD__ 2579 V_pf_status.debug = *level; 2580#else 2581 pf_status.debug = *level; 2582#endif 2583 break; 2584 } 2585 2586 case DIOCCLRRULECTRS: { 2587 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2588 struct pf_ruleset *ruleset = &pf_main_ruleset; 2589 struct pf_rule *rule; 2590 2591 TAILQ_FOREACH(rule, 2592 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2593 rule->evaluations = 0; 2594 rule->packets[0] = rule->packets[1] = 0; 2595 rule->bytes[0] = rule->bytes[1] = 0; 2596 } 2597 break; 2598 } 2599 2600#ifdef __FreeBSD__ 2601 case DIOCGIFSPEED: { 2602 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2603 struct pf_ifspeed ps; 2604 struct ifnet *ifp; 2605 2606 if (psp->ifname[0] != 0) { 2607 /* Can we completely trust user-land? */ 2608 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2609 ifp = ifunit(ps.ifname); 2610 if (ifp != NULL) 2611 psp->baudrate = ifp->if_baudrate; 2612 else 2613 error = EINVAL; 2614 } else 2615 error = EINVAL; 2616 break; 2617 } 2618#endif /* __FreeBSD__ */ 2619 2620#ifdef ALTQ 2621 case DIOCSTARTALTQ: { 2622 struct pf_altq *altq; 2623 2624 /* enable all altq interfaces on active list */ 2625#ifdef __FreeBSD__ 2626 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 2627 if (altq->qname[0] == 0 && (altq->local_flags & 2628 PFALTQ_FLAG_IF_REMOVED) == 0) { 2629#else 2630 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2631 if (altq->qname[0] == 0) { 2632#endif 2633 error = pf_enable_altq(altq); 2634 if (error != 0) 2635 break; 2636 } 2637 } 2638 if (error == 0) 2639#ifdef __FreeBSD__ 2640 V_pf_altq_running = 1; 2641#else 2642 pf_altq_running = 1; 2643#endif 2644 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2645 break; 2646 } 2647 2648 case DIOCSTOPALTQ: { 2649 struct pf_altq *altq; 2650 2651 /* disable all altq interfaces on active list */ 2652#ifdef __FreeBSD__ 2653 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 2654 if (altq->qname[0] == 0 && (altq->local_flags & 2655 PFALTQ_FLAG_IF_REMOVED) == 0) { 2656#else 2657 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2658 if (altq->qname[0] == 0) { 2659#endif 2660 error = pf_disable_altq(altq); 2661 if (error != 0) 2662 break; 2663 } 2664 } 2665 if (error == 0) 2666#ifdef __FreeBSD__ 2667 V_pf_altq_running = 0; 2668#else 2669 pf_altq_running = 0; 2670#endif 2671 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2672 break; 2673 } 2674 2675 case DIOCADDALTQ: { 2676 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2677 struct pf_altq *altq, *a; 2678 2679#ifdef __FreeBSD__ 2680 if (pa->ticket != V_ticket_altqs_inactive) { 2681#else 2682 if (pa->ticket != ticket_altqs_inactive) { 2683#endif 2684 error = EBUSY; 2685 break; 2686 } 2687#ifdef __FreeBSD__ 2688 altq = pool_get(&V_pf_altq_pl, PR_NOWAIT); 2689#else 2690 altq = pool_get(&pf_altq_pl, PR_WAITOK|PR_LIMITFAIL); 2691#endif 2692 if (altq == NULL) { 2693 error = ENOMEM; 2694 break; 2695 } 2696 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2697#ifdef __FreeBSD__ 2698 altq->local_flags = 0; 2699#endif 2700 2701 /* 2702 * if this is for a queue, find the discipline and 2703 * copy the necessary fields 2704 */ 2705 if (altq->qname[0] != 0) { 2706 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2707 error = EBUSY; 2708#ifdef __FreeBSD__ 2709 pool_put(&V_pf_altq_pl, altq); 2710#else 2711 pool_put(&pf_altq_pl, altq); 2712#endif 2713 break; 2714 } 2715 altq->altq_disc = NULL; 2716#ifdef __FreeBSD__ 2717 TAILQ_FOREACH(a, V_pf_altqs_inactive, entries) { 2718#else 2719 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2720#endif 2721 if (strncmp(a->ifname, altq->ifname, 2722 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2723 altq->altq_disc = a->altq_disc; 2724 break; 2725 } 2726 } 2727 } 2728 2729#ifdef __FreeBSD__ 2730 struct ifnet *ifp; 2731 2732 if ((ifp = ifunit(altq->ifname)) == NULL) { 2733 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 2734 } else { 2735 PF_UNLOCK(); 2736#endif 2737 error = altq_add(altq); 2738#ifdef __FreeBSD__ 2739 PF_LOCK(); 2740 } 2741#endif 2742 if (error) { 2743#ifdef __FreeBSD__ 2744 pool_put(&V_pf_altq_pl, altq); 2745#else 2746 pool_put(&pf_altq_pl, altq); 2747#endif 2748 break; 2749 } 2750 2751#ifdef __FreeBSD__ 2752 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 2753#else 2754 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2755#endif 2756 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2757 break; 2758 } 2759 2760 case DIOCGETALTQS: { 2761 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2762 struct pf_altq *altq; 2763 2764 pa->nr = 0; 2765#ifdef __FreeBSD__ 2766 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 2767 pa->nr++; 2768 pa->ticket = V_ticket_altqs_active; 2769#else 2770 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2771 pa->nr++; 2772 pa->ticket = ticket_altqs_active; 2773#endif 2774 break; 2775 } 2776 2777 case DIOCGETALTQ: { 2778 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2779 struct pf_altq *altq; 2780 u_int32_t nr; 2781 2782#ifdef __FreeBSD__ 2783 if (pa->ticket != V_ticket_altqs_active) { 2784#else 2785 if (pa->ticket != ticket_altqs_active) { 2786#endif 2787 error = EBUSY; 2788 break; 2789 } 2790 nr = 0; 2791#ifdef __FreeBSD__ 2792 altq = TAILQ_FIRST(V_pf_altqs_active); 2793#else 2794 altq = TAILQ_FIRST(pf_altqs_active); 2795#endif 2796 while ((altq != NULL) && (nr < pa->nr)) { 2797 altq = TAILQ_NEXT(altq, entries); 2798 nr++; 2799 } 2800 if (altq == NULL) { 2801 error = EBUSY; 2802 break; 2803 } 2804 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2805 break; 2806 } 2807 2808 case DIOCCHANGEALTQ: 2809 /* CHANGEALTQ not supported yet! */ 2810 error = ENODEV; 2811 break; 2812 2813 case DIOCGETQSTATS: { 2814 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2815 struct pf_altq *altq; 2816 u_int32_t nr; 2817 int nbytes; 2818 2819#ifdef __FreeBSD__ 2820 if (pq->ticket != V_ticket_altqs_active) { 2821#else 2822 if (pq->ticket != ticket_altqs_active) { 2823#endif 2824 error = EBUSY; 2825 break; 2826 } 2827 nbytes = pq->nbytes; 2828 nr = 0; 2829#ifdef __FreeBSD__ 2830 altq = TAILQ_FIRST(V_pf_altqs_active); 2831#else 2832 altq = TAILQ_FIRST(pf_altqs_active); 2833#endif 2834 while ((altq != NULL) && (nr < pq->nr)) { 2835 altq = TAILQ_NEXT(altq, entries); 2836 nr++; 2837 } 2838 if (altq == NULL) { 2839 error = EBUSY; 2840 break; 2841 } 2842 2843#ifdef __FreeBSD__ 2844 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 2845 error = ENXIO; 2846 break; 2847 } 2848 PF_UNLOCK(); 2849#endif 2850 error = altq_getqstats(altq, pq->buf, &nbytes); 2851#ifdef __FreeBSD__ 2852 PF_LOCK(); 2853#endif 2854 if (error == 0) { 2855 pq->scheduler = altq->scheduler; 2856 pq->nbytes = nbytes; 2857 } 2858 break; 2859 } 2860#endif /* ALTQ */ 2861 2862 case DIOCBEGINADDRS: { 2863 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2864 2865#ifdef __FreeBSD__ 2866 pf_empty_pool(&V_pf_pabuf); 2867 pp->ticket = ++V_ticket_pabuf; 2868#else 2869 pf_empty_pool(&pf_pabuf); 2870 pp->ticket = ++ticket_pabuf; 2871#endif 2872 break; 2873 } 2874 2875 case DIOCADDADDR: { 2876 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2877 2878#ifdef __FreeBSD__ 2879 if (pp->ticket != V_ticket_pabuf) { 2880#else 2881 if (pp->ticket != ticket_pabuf) { 2882#endif 2883 error = EBUSY; 2884 break; 2885 } 2886#ifndef INET 2887 if (pp->af == AF_INET) { 2888 error = EAFNOSUPPORT; 2889 break; 2890 } 2891#endif /* INET */ 2892#ifndef INET6 2893 if (pp->af == AF_INET6) { 2894 error = EAFNOSUPPORT; 2895 break; 2896 } 2897#endif /* INET6 */ 2898 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2899 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2900 pp->addr.addr.type != PF_ADDR_TABLE) { 2901 error = EINVAL; 2902 break; 2903 } 2904#ifdef __FreeBSD__ 2905 pa = pool_get(&V_pf_pooladdr_pl, PR_NOWAIT); 2906#else 2907 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK|PR_LIMITFAIL); 2908#endif 2909 if (pa == NULL) { 2910 error = ENOMEM; 2911 break; 2912 } 2913 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2914 if (pa->ifname[0]) { 2915 pa->kif = pfi_kif_get(pa->ifname); 2916 if (pa->kif == NULL) { 2917#ifdef __FreeBSD__ 2918 pool_put(&V_pf_pooladdr_pl, pa); 2919#else 2920 pool_put(&pf_pooladdr_pl, pa); 2921#endif 2922 error = EINVAL; 2923 break; 2924 } 2925 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2926 } 2927 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2928 pfi_dynaddr_remove(&pa->addr); 2929 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2930#ifdef __FreeBSD__ 2931 pool_put(&V_pf_pooladdr_pl, pa); 2932#else 2933 pool_put(&pf_pooladdr_pl, pa); 2934#endif 2935 error = EINVAL; 2936 break; 2937 } 2938#ifdef __FreeBSD__ 2939 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 2940#else 2941 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2942#endif 2943 break; 2944 } 2945 2946 case DIOCGETADDRS: { 2947 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2948 2949 pp->nr = 0; 2950 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2951 pp->r_num, 0, 1, 0); 2952 if (pool == NULL) { 2953 error = EBUSY; 2954 break; 2955 } 2956 TAILQ_FOREACH(pa, &pool->list, entries) 2957 pp->nr++; 2958 break; 2959 } 2960 2961 case DIOCGETADDR: { 2962 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2963 u_int32_t nr = 0; 2964 2965 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2966 pp->r_num, 0, 1, 1); 2967 if (pool == NULL) { 2968 error = EBUSY; 2969 break; 2970 } 2971 pa = TAILQ_FIRST(&pool->list); 2972 while ((pa != NULL) && (nr < pp->nr)) { 2973 pa = TAILQ_NEXT(pa, entries); 2974 nr++; 2975 } 2976 if (pa == NULL) { 2977 error = EBUSY; 2978 break; 2979 } 2980 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2981 pf_addr_copyout(&pp->addr.addr); 2982 break; 2983 } 2984 2985 case DIOCCHANGEADDR: { 2986 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2987 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2988 struct pf_ruleset *ruleset; 2989 2990 if (pca->action < PF_CHANGE_ADD_HEAD || 2991 pca->action > PF_CHANGE_REMOVE) { 2992 error = EINVAL; 2993 break; 2994 } 2995 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2996 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2997 pca->addr.addr.type != PF_ADDR_TABLE) { 2998 error = EINVAL; 2999 break; 3000 } 3001 3002 ruleset = pf_find_ruleset(pca->anchor); 3003 if (ruleset == NULL) { 3004 error = EBUSY; 3005 break; 3006 } 3007 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 3008 pca->r_num, pca->r_last, 1, 1); 3009 if (pool == NULL) { 3010 error = EBUSY; 3011 break; 3012 } 3013 if (pca->action != PF_CHANGE_REMOVE) { 3014#ifdef __FreeBSD__ 3015 newpa = pool_get(&V_pf_pooladdr_pl, 3016 PR_NOWAIT); 3017#else 3018 newpa = pool_get(&pf_pooladdr_pl, 3019 PR_WAITOK|PR_LIMITFAIL); 3020#endif 3021 if (newpa == NULL) { 3022 error = ENOMEM; 3023 break; 3024 } 3025 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 3026#ifndef INET 3027 if (pca->af == AF_INET) { 3028#ifdef __FreeBSD__ 3029 pool_put(&V_pf_pooladdr_pl, newpa); 3030#else 3031 pool_put(&pf_pooladdr_pl, newpa); 3032#endif 3033 error = EAFNOSUPPORT; 3034 break; 3035 } 3036#endif /* INET */ 3037#ifndef INET6 3038 if (pca->af == AF_INET6) { 3039#ifdef __FreeBSD__ 3040 pool_put(&V_pf_pooladdr_pl, newpa); 3041#else 3042 pool_put(&pf_pooladdr_pl, newpa); 3043#endif 3044 error = EAFNOSUPPORT; 3045 break; 3046 } 3047#endif /* INET6 */ 3048 if (newpa->ifname[0]) { 3049 newpa->kif = pfi_kif_get(newpa->ifname); 3050 if (newpa->kif == NULL) { 3051#ifdef __FreeBSD__ 3052 pool_put(&V_pf_pooladdr_pl, newpa); 3053#else 3054 pool_put(&pf_pooladdr_pl, newpa); 3055#endif 3056 error = EINVAL; 3057 break; 3058 } 3059 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 3060 } else 3061 newpa->kif = NULL; 3062 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 3063 pf_tbladdr_setup(ruleset, &newpa->addr)) { 3064 pfi_dynaddr_remove(&newpa->addr); 3065 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 3066#ifdef __FreeBSD__ 3067 pool_put(&V_pf_pooladdr_pl, newpa); 3068#else 3069 pool_put(&pf_pooladdr_pl, newpa); 3070#endif 3071 error = EINVAL; 3072 break; 3073 } 3074 } 3075 3076 if (pca->action == PF_CHANGE_ADD_HEAD) 3077 oldpa = TAILQ_FIRST(&pool->list); 3078 else if (pca->action == PF_CHANGE_ADD_TAIL) 3079 oldpa = TAILQ_LAST(&pool->list, pf_palist); 3080 else { 3081 int i = 0; 3082 3083 oldpa = TAILQ_FIRST(&pool->list); 3084 while ((oldpa != NULL) && (i < pca->nr)) { 3085 oldpa = TAILQ_NEXT(oldpa, entries); 3086 i++; 3087 } 3088 if (oldpa == NULL) { 3089 error = EINVAL; 3090 break; 3091 } 3092 } 3093 3094 if (pca->action == PF_CHANGE_REMOVE) { 3095 TAILQ_REMOVE(&pool->list, oldpa, entries); 3096 pfi_dynaddr_remove(&oldpa->addr); 3097 pf_tbladdr_remove(&oldpa->addr); 3098 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 3099#ifdef __FreeBSD__ 3100 pool_put(&V_pf_pooladdr_pl, oldpa); 3101#else 3102 pool_put(&pf_pooladdr_pl, oldpa); 3103#endif 3104 } else { 3105 if (oldpa == NULL) 3106 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 3107 else if (pca->action == PF_CHANGE_ADD_HEAD || 3108 pca->action == PF_CHANGE_ADD_BEFORE) 3109 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 3110 else 3111 TAILQ_INSERT_AFTER(&pool->list, oldpa, 3112 newpa, entries); 3113 } 3114 3115 pool->cur = TAILQ_FIRST(&pool->list); 3116 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 3117 pca->af); 3118 break; 3119 } 3120 3121 case DIOCGETRULESETS: { 3122 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 3123 struct pf_ruleset *ruleset; 3124 struct pf_anchor *anchor; 3125 3126 pr->path[sizeof(pr->path) - 1] = 0; 3127 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 3128 error = EINVAL; 3129 break; 3130 } 3131 pr->nr = 0; 3132 if (ruleset->anchor == NULL) { 3133 /* XXX kludge for pf_main_ruleset */ 3134#ifdef __FreeBSD__ 3135 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors) 3136#else 3137 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 3138#endif 3139 if (anchor->parent == NULL) 3140 pr->nr++; 3141 } else { 3142 RB_FOREACH(anchor, pf_anchor_node, 3143 &ruleset->anchor->children) 3144 pr->nr++; 3145 } 3146 break; 3147 } 3148 3149 case DIOCGETRULESET: { 3150 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 3151 struct pf_ruleset *ruleset; 3152 struct pf_anchor *anchor; 3153 u_int32_t nr = 0; 3154 3155 pr->path[sizeof(pr->path) - 1] = 0; 3156 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 3157 error = EINVAL; 3158 break; 3159 } 3160 pr->name[0] = 0; 3161 if (ruleset->anchor == NULL) { 3162 /* XXX kludge for pf_main_ruleset */ 3163#ifdef __FreeBSD__ 3164 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors) 3165#else 3166 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 3167#endif 3168 if (anchor->parent == NULL && nr++ == pr->nr) { 3169 strlcpy(pr->name, anchor->name, 3170 sizeof(pr->name)); 3171 break; 3172 } 3173 } else { 3174 RB_FOREACH(anchor, pf_anchor_node, 3175 &ruleset->anchor->children) 3176 if (nr++ == pr->nr) { 3177 strlcpy(pr->name, anchor->name, 3178 sizeof(pr->name)); 3179 break; 3180 } 3181 } 3182 if (!pr->name[0]) 3183 error = EBUSY; 3184 break; 3185 } 3186 3187 case DIOCRCLRTABLES: { 3188 struct pfioc_table *io = (struct pfioc_table *)addr; 3189 3190 if (io->pfrio_esize != 0) { 3191 error = ENODEV; 3192 break; 3193 } 3194 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 3195 io->pfrio_flags | PFR_FLAG_USERIOCTL); 3196 break; 3197 } 3198 3199 case DIOCRADDTABLES: { 3200 struct pfioc_table *io = (struct pfioc_table *)addr; 3201 3202 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3203 error = ENODEV; 3204 break; 3205 } 3206 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 3207 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3208 break; 3209 } 3210 3211 case DIOCRDELTABLES: { 3212 struct pfioc_table *io = (struct pfioc_table *)addr; 3213 3214 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3215 error = ENODEV; 3216 break; 3217 } 3218 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 3219 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3220 break; 3221 } 3222 3223 case DIOCRGETTABLES: { 3224 struct pfioc_table *io = (struct pfioc_table *)addr; 3225 3226 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3227 error = ENODEV; 3228 break; 3229 } 3230 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 3231 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3232 break; 3233 } 3234 3235 case DIOCRGETTSTATS: { 3236 struct pfioc_table *io = (struct pfioc_table *)addr; 3237 3238 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 3239 error = ENODEV; 3240 break; 3241 } 3242 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 3243 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3244 break; 3245 } 3246 3247 case DIOCRCLRTSTATS: { 3248 struct pfioc_table *io = (struct pfioc_table *)addr; 3249 3250 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3251 error = ENODEV; 3252 break; 3253 } 3254 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 3255 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3256 break; 3257 } 3258 3259 case DIOCRSETTFLAGS: { 3260 struct pfioc_table *io = (struct pfioc_table *)addr; 3261 3262 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3263 error = ENODEV; 3264 break; 3265 } 3266 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 3267 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 3268 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3269 break; 3270 } 3271 3272 case DIOCRCLRADDRS: { 3273 struct pfioc_table *io = (struct pfioc_table *)addr; 3274 3275 if (io->pfrio_esize != 0) { 3276 error = ENODEV; 3277 break; 3278 } 3279 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 3280 io->pfrio_flags | PFR_FLAG_USERIOCTL); 3281 break; 3282 } 3283 3284 case DIOCRADDADDRS: { 3285 struct pfioc_table *io = (struct pfioc_table *)addr; 3286 3287 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3288 error = ENODEV; 3289 break; 3290 } 3291 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 3292 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 3293 PFR_FLAG_USERIOCTL); 3294 break; 3295 } 3296 3297 case DIOCRDELADDRS: { 3298 struct pfioc_table *io = (struct pfioc_table *)addr; 3299 3300 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3301 error = ENODEV; 3302 break; 3303 } 3304 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 3305 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 3306 PFR_FLAG_USERIOCTL); 3307 break; 3308 } 3309 3310 case DIOCRSETADDRS: { 3311 struct pfioc_table *io = (struct pfioc_table *)addr; 3312 3313 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3314 error = ENODEV; 3315 break; 3316 } 3317 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 3318 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 3319 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 3320 PFR_FLAG_USERIOCTL, 0); 3321 break; 3322 } 3323 3324 case DIOCRGETADDRS: { 3325 struct pfioc_table *io = (struct pfioc_table *)addr; 3326 3327 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3328 error = ENODEV; 3329 break; 3330 } 3331 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 3332 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3333 break; 3334 } 3335 3336 case DIOCRGETASTATS: { 3337 struct pfioc_table *io = (struct pfioc_table *)addr; 3338 3339 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 3340 error = ENODEV; 3341 break; 3342 } 3343 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 3344 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3345 break; 3346 } 3347 3348 case DIOCRCLRASTATS: { 3349 struct pfioc_table *io = (struct pfioc_table *)addr; 3350 3351 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3352 error = ENODEV; 3353 break; 3354 } 3355 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 3356 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 3357 PFR_FLAG_USERIOCTL); 3358 break; 3359 } 3360 3361 case DIOCRTSTADDRS: { 3362 struct pfioc_table *io = (struct pfioc_table *)addr; 3363 3364 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3365 error = ENODEV; 3366 break; 3367 } 3368 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 3369 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 3370 PFR_FLAG_USERIOCTL); 3371 break; 3372 } 3373 3374 case DIOCRINADEFINE: { 3375 struct pfioc_table *io = (struct pfioc_table *)addr; 3376 3377 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3378 error = ENODEV; 3379 break; 3380 } 3381 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 3382 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 3383 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3384 break; 3385 } 3386 3387 case DIOCOSFPADD: { 3388 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 3389 error = pf_osfp_add(io); 3390 break; 3391 } 3392 3393 case DIOCOSFPGET: { 3394 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 3395 error = pf_osfp_get(io); 3396 break; 3397 } 3398 3399 case DIOCXBEGIN: { 3400 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3401 struct pfioc_trans_e *ioe; 3402 struct pfr_table *table; 3403 int i; 3404 3405 if (io->esize != sizeof(*ioe)) { 3406 error = ENODEV; 3407 goto fail; 3408 } 3409#ifdef __FreeBSD__ 3410 PF_UNLOCK(); 3411#endif 3412 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 3413 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 3414#ifdef __FreeBSD__ 3415 PF_LOCK(); 3416#endif 3417 for (i = 0; i < io->size; i++) { 3418#ifdef __FreeBSD__ 3419 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3420 if (error) { 3421#else 3422 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3423#endif 3424 free(table, M_TEMP); 3425 free(ioe, M_TEMP); 3426 error = EFAULT; 3427 goto fail; 3428 } 3429 switch (ioe->rs_num) { 3430#ifdef ALTQ 3431 case PF_RULESET_ALTQ: 3432 if (ioe->anchor[0]) { 3433 free(table, M_TEMP); 3434 free(ioe, M_TEMP); 3435 error = EINVAL; 3436 goto fail; 3437 } 3438 if ((error = pf_begin_altq(&ioe->ticket))) { 3439 free(table, M_TEMP); 3440 free(ioe, M_TEMP); 3441 goto fail; 3442 } 3443 break; 3444#endif /* ALTQ */ 3445 case PF_RULESET_TABLE: 3446 bzero(table, sizeof(*table)); 3447 strlcpy(table->pfrt_anchor, ioe->anchor, 3448 sizeof(table->pfrt_anchor)); 3449 if ((error = pfr_ina_begin(table, 3450 &ioe->ticket, NULL, 0))) { 3451 free(table, M_TEMP); 3452 free(ioe, M_TEMP); 3453 goto fail; 3454 } 3455 break; 3456 default: 3457 if ((error = pf_begin_rules(&ioe->ticket, 3458 ioe->rs_num, ioe->anchor))) { 3459 free(table, M_TEMP); 3460 free(ioe, M_TEMP); 3461 goto fail; 3462 } 3463 break; 3464 } 3465#ifdef __FreeBSD__ 3466 PF_COPYOUT(ioe, io->array+i, sizeof(io->array[i]), 3467 error); 3468 if (error) { 3469#else 3470 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 3471#endif 3472 free(table, M_TEMP); 3473 free(ioe, M_TEMP); 3474 error = EFAULT; 3475 goto fail; 3476 } 3477 } 3478 free(table, M_TEMP); 3479 free(ioe, M_TEMP); 3480 break; 3481 } 3482 3483 case DIOCXROLLBACK: { 3484 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3485 struct pfioc_trans_e *ioe; 3486 struct pfr_table *table; 3487 int i; 3488 3489 if (io->esize != sizeof(*ioe)) { 3490 error = ENODEV; 3491 goto fail; 3492 } 3493#ifdef __FreeBSD__ 3494 PF_UNLOCK(); 3495#endif 3496 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 3497 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 3498#ifdef __FreeBSD__ 3499 PF_LOCK(); 3500#endif 3501 for (i = 0; i < io->size; i++) { 3502#ifdef __FreeBSD__ 3503 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3504 if (error) { 3505#else 3506 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3507#endif 3508 free(table, M_TEMP); 3509 free(ioe, M_TEMP); 3510 error = EFAULT; 3511 goto fail; 3512 } 3513 switch (ioe->rs_num) { 3514#ifdef ALTQ 3515 case PF_RULESET_ALTQ: 3516 if (ioe->anchor[0]) { 3517 free(table, M_TEMP); 3518 free(ioe, M_TEMP); 3519 error = EINVAL; 3520 goto fail; 3521 } 3522 if ((error = pf_rollback_altq(ioe->ticket))) { 3523 free(table, M_TEMP); 3524 free(ioe, M_TEMP); 3525 goto fail; /* really bad */ 3526 } 3527 break; 3528#endif /* ALTQ */ 3529 case PF_RULESET_TABLE: 3530 bzero(table, sizeof(*table)); 3531 strlcpy(table->pfrt_anchor, ioe->anchor, 3532 sizeof(table->pfrt_anchor)); 3533 if ((error = pfr_ina_rollback(table, 3534 ioe->ticket, NULL, 0))) { 3535 free(table, M_TEMP); 3536 free(ioe, M_TEMP); 3537 goto fail; /* really bad */ 3538 } 3539 break; 3540 default: 3541 if ((error = pf_rollback_rules(ioe->ticket, 3542 ioe->rs_num, ioe->anchor))) { 3543 free(table, M_TEMP); 3544 free(ioe, M_TEMP); 3545 goto fail; /* really bad */ 3546 } 3547 break; 3548 } 3549 } 3550 free(table, M_TEMP); 3551 free(ioe, M_TEMP); 3552 break; 3553 } 3554 3555 case DIOCXCOMMIT: { 3556 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3557 struct pfioc_trans_e *ioe; 3558 struct pfr_table *table; 3559 struct pf_ruleset *rs; 3560 int i; 3561 3562 if (io->esize != sizeof(*ioe)) { 3563 error = ENODEV; 3564 goto fail; 3565 } 3566#ifdef __FreeBSD__ 3567 PF_UNLOCK(); 3568#endif 3569 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 3570 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 3571#ifdef __FreeBSD__ 3572 PF_LOCK(); 3573#endif 3574 /* first makes sure everything will succeed */ 3575 for (i = 0; i < io->size; i++) { 3576#ifdef __FreeBSD__ 3577 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3578 if (error) { 3579#else 3580 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3581#endif 3582 free(table, M_TEMP); 3583 free(ioe, M_TEMP); 3584 error = EFAULT; 3585 goto fail; 3586 } 3587 switch (ioe->rs_num) { 3588#ifdef ALTQ 3589 case PF_RULESET_ALTQ: 3590 if (ioe->anchor[0]) { 3591 free(table, M_TEMP); 3592 free(ioe, M_TEMP); 3593 error = EINVAL; 3594 goto fail; 3595 } 3596#ifdef __FreeBSD__ 3597 if (!V_altqs_inactive_open || ioe->ticket != 3598 V_ticket_altqs_inactive) { 3599#else 3600 if (!altqs_inactive_open || ioe->ticket != 3601 ticket_altqs_inactive) { 3602#endif 3603 free(table, M_TEMP); 3604 free(ioe, M_TEMP); 3605 error = EBUSY; 3606 goto fail; 3607 } 3608 break; 3609#endif /* ALTQ */ 3610 case PF_RULESET_TABLE: 3611 rs = pf_find_ruleset(ioe->anchor); 3612 if (rs == NULL || !rs->topen || ioe->ticket != 3613 rs->tticket) { 3614 free(table, M_TEMP); 3615 free(ioe, M_TEMP); 3616 error = EBUSY; 3617 goto fail; 3618 } 3619 break; 3620 default: 3621 if (ioe->rs_num < 0 || ioe->rs_num >= 3622 PF_RULESET_MAX) { 3623 free(table, M_TEMP); 3624 free(ioe, M_TEMP); 3625 error = EINVAL; 3626 goto fail; 3627 } 3628 rs = pf_find_ruleset(ioe->anchor); 3629 if (rs == NULL || 3630 !rs->rules[ioe->rs_num].inactive.open || 3631 rs->rules[ioe->rs_num].inactive.ticket != 3632 ioe->ticket) { 3633 free(table, M_TEMP); 3634 free(ioe, M_TEMP); 3635 error = EBUSY; 3636 goto fail; 3637 } 3638 break; 3639 } 3640 } 3641 /* now do the commit - no errors should happen here */ 3642 for (i = 0; i < io->size; i++) { 3643#ifdef __FreeBSD__ 3644 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3645 if (error) { 3646#else 3647 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3648#endif 3649 free(table, M_TEMP); 3650 free(ioe, M_TEMP); 3651 error = EFAULT; 3652 goto fail; 3653 } 3654 switch (ioe->rs_num) { 3655#ifdef ALTQ 3656 case PF_RULESET_ALTQ: 3657 if ((error = pf_commit_altq(ioe->ticket))) { 3658 free(table, M_TEMP); 3659 free(ioe, M_TEMP); 3660 goto fail; /* really bad */ 3661 } 3662 break; 3663#endif /* ALTQ */ 3664 case PF_RULESET_TABLE: 3665 bzero(table, sizeof(*table)); 3666 strlcpy(table->pfrt_anchor, ioe->anchor, 3667 sizeof(table->pfrt_anchor)); 3668 if ((error = pfr_ina_commit(table, ioe->ticket, 3669 NULL, NULL, 0))) { 3670 free(table, M_TEMP); 3671 free(ioe, M_TEMP); 3672 goto fail; /* really bad */ 3673 } 3674 break; 3675 default: 3676 if ((error = pf_commit_rules(ioe->ticket, 3677 ioe->rs_num, ioe->anchor))) { 3678 free(table, M_TEMP); 3679 free(ioe, M_TEMP); 3680 goto fail; /* really bad */ 3681 } 3682 break; 3683 } 3684 } 3685 free(table, M_TEMP); 3686 free(ioe, M_TEMP); 3687 break; 3688 } 3689 3690 case DIOCGETSRCNODES: { 3691 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3692 struct pf_src_node *n, *p, *pstore; 3693 u_int32_t nr = 0; 3694 int space = psn->psn_len; 3695 3696 if (space == 0) { 3697#ifdef __FreeBSD__ 3698 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) 3699#else 3700 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3701#endif 3702 nr++; 3703 psn->psn_len = sizeof(struct pf_src_node) * nr; 3704 break; 3705 } 3706 3707#ifdef __FreeBSD__ 3708 PF_UNLOCK(); 3709#endif 3710 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 3711#ifdef __FreeBSD__ 3712 PF_LOCK(); 3713#endif 3714 p = psn->psn_src_nodes; 3715#ifdef __FreeBSD__ 3716 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) { 3717#else 3718 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3719#endif 3720 int secs = time_second, diff; 3721 3722 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3723 break; 3724 3725 bcopy(n, pstore, sizeof(*pstore)); 3726 if (n->rule.ptr != NULL) 3727 pstore->rule.nr = n->rule.ptr->nr; 3728 pstore->creation = secs - pstore->creation; 3729 if (pstore->expire > secs) 3730 pstore->expire -= secs; 3731 else 3732 pstore->expire = 0; 3733 3734 /* adjust the connection rate estimate */ 3735 diff = secs - n->conn_rate.last; 3736 if (diff >= n->conn_rate.seconds) 3737 pstore->conn_rate.count = 0; 3738 else 3739 pstore->conn_rate.count -= 3740 n->conn_rate.count * diff / 3741 n->conn_rate.seconds; 3742 3743#ifdef __FreeBSD__ 3744 PF_COPYOUT(pstore, p, sizeof(*p), error); 3745#else 3746 error = copyout(pstore, p, sizeof(*p)); 3747#endif 3748 if (error) { 3749 free(pstore, M_TEMP); 3750 goto fail; 3751 } 3752 p++; 3753 nr++; 3754 } 3755 psn->psn_len = sizeof(struct pf_src_node) * nr; 3756 3757 free(pstore, M_TEMP); 3758 break; 3759 } 3760 3761 case DIOCCLRSRCNODES: { 3762 struct pf_src_node *n; 3763 struct pf_state *state; 3764 3765#ifdef __FreeBSD__ 3766 RB_FOREACH(state, pf_state_tree_id, &V_tree_id) { 3767#else 3768 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3769#endif 3770 state->src_node = NULL; 3771 state->nat_src_node = NULL; 3772 } 3773#ifdef __FreeBSD__ 3774 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) { 3775#else 3776 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3777#endif 3778 n->expire = 1; 3779 n->states = 0; 3780 } 3781 pf_purge_expired_src_nodes(1); 3782#ifdef __FreeBSD__ 3783 V_pf_status.src_nodes = 0; 3784#else 3785 pf_status.src_nodes = 0; 3786#endif 3787 break; 3788 } 3789 3790 case DIOCKILLSRCNODES: { 3791 struct pf_src_node *sn; 3792 struct pf_state *s; 3793 struct pfioc_src_node_kill *psnk = 3794 (struct pfioc_src_node_kill *)addr; 3795 u_int killed = 0; 3796 3797#ifdef __FreeBSD__ 3798 RB_FOREACH(sn, pf_src_tree, &V_tree_src_tracking) { 3799#else 3800 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 3801#endif 3802 if (PF_MATCHA(psnk->psnk_src.neg, 3803 &psnk->psnk_src.addr.v.a.addr, 3804 &psnk->psnk_src.addr.v.a.mask, 3805 &sn->addr, sn->af) && 3806 PF_MATCHA(psnk->psnk_dst.neg, 3807 &psnk->psnk_dst.addr.v.a.addr, 3808 &psnk->psnk_dst.addr.v.a.mask, 3809 &sn->raddr, sn->af)) { 3810 /* Handle state to src_node linkage */ 3811 if (sn->states != 0) { 3812 RB_FOREACH(s, pf_state_tree_id, 3813#ifdef __FreeBSD__ 3814 &V_tree_id) { 3815#else 3816 &tree_id) { 3817#endif 3818 if (s->src_node == sn) 3819 s->src_node = NULL; 3820 if (s->nat_src_node == sn) 3821 s->nat_src_node = NULL; 3822 } 3823 sn->states = 0; 3824 } 3825 sn->expire = 1; 3826 killed++; 3827 } 3828 } 3829 3830 if (killed > 0) 3831 pf_purge_expired_src_nodes(1); 3832 3833 psnk->psnk_killed = killed; 3834 break; 3835 } 3836 3837 case DIOCSETHOSTID: { 3838 u_int32_t *hostid = (u_int32_t *)addr; 3839 3840#ifdef __FreeBSD__ 3841 if (*hostid == 0) 3842 V_pf_status.hostid = arc4random(); 3843 else 3844 V_pf_status.hostid = *hostid; 3845#else 3846 if (*hostid == 0) 3847 pf_status.hostid = arc4random(); 3848 else 3849 pf_status.hostid = *hostid; 3850#endif 3851 break; 3852 } 3853 3854 case DIOCOSFPFLUSH: 3855 pf_osfp_flush(); 3856 break; 3857 3858 case DIOCIGETIFACES: { 3859 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3860 3861 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3862 error = ENODEV; 3863 break; 3864 } 3865 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3866 &io->pfiio_size); 3867 break; 3868 } 3869 3870 case DIOCSETIFFLAG: { 3871 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3872 3873 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3874 break; 3875 } 3876 3877 case DIOCCLRIFFLAG: { 3878 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3879 3880 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3881 break; 3882 } 3883 3884 default: 3885 error = ENODEV; 3886 break; 3887 } 3888fail: 3889#ifdef __FreeBSD__ 3890 PF_UNLOCK(); 3891 3892 if (flags & FWRITE) 3893 sx_xunlock(&V_pf_consistency_lock); 3894 else 3895 sx_sunlock(&V_pf_consistency_lock); 3896#else 3897 splx(s); 3898 if (flags & FWRITE) 3899 rw_exit_write(&pf_consistency_lock); 3900 else 3901 rw_exit_read(&pf_consistency_lock); 3902#endif 3903 3904 CURVNET_RESTORE(); 3905 3906 return (error); 3907} 3908 3909#ifdef __FreeBSD__ 3910void 3911pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 3912{ 3913 bzero(sp, sizeof(struct pfsync_state)); 3914 3915 /* copy from state key */ 3916 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 3917 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 3918 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 3919 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 3920 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 3921 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 3922 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 3923 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 3924 sp->proto = st->key[PF_SK_WIRE]->proto; 3925 sp->af = st->key[PF_SK_WIRE]->af; 3926 3927 /* copy from state */ 3928 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 3929 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 3930 sp->creation = htonl(time_second - st->creation); 3931 sp->expire = pf_state_expires(st); 3932 if (sp->expire <= time_second) 3933 sp->expire = htonl(0); 3934 else 3935 sp->expire = htonl(sp->expire - time_second); 3936 3937 sp->direction = st->direction; 3938 sp->log = st->log; 3939 sp->timeout = st->timeout; 3940 sp->state_flags = st->state_flags; 3941 if (st->src_node) 3942 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 3943 if (st->nat_src_node) 3944 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 3945 3946 bcopy(&st->id, &sp->id, sizeof(sp->id)); 3947 sp->creatorid = st->creatorid; 3948 pf_state_peer_hton(&st->src, &sp->src); 3949 pf_state_peer_hton(&st->dst, &sp->dst); 3950 3951 if (st->rule.ptr == NULL) 3952 sp->rule = htonl(-1); 3953 else 3954 sp->rule = htonl(st->rule.ptr->nr); 3955 if (st->anchor.ptr == NULL) 3956 sp->anchor = htonl(-1); 3957 else 3958 sp->anchor = htonl(st->anchor.ptr->nr); 3959 if (st->nat_rule.ptr == NULL) 3960 sp->nat_rule = htonl(-1); 3961 else 3962 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 3963 3964 pf_state_counter_hton(st->packets[0], sp->packets[0]); 3965 pf_state_counter_hton(st->packets[1], sp->packets[1]); 3966 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 3967 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 3968 3969} 3970 3971/* 3972 * XXX - Check for version missmatch!!! 3973 */ 3974static void 3975pf_clear_states(void) 3976{ 3977 struct pf_state *state; 3978 3979#ifdef __FreeBSD__ 3980 RB_FOREACH(state, pf_state_tree_id, &V_tree_id) { 3981#else 3982 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3983#endif 3984 state->timeout = PFTM_PURGE; 3985#if NPFSYNC 3986 /* don't send out individual delete messages */ 3987 state->sync_state = PFSTATE_NOSYNC; 3988#endif 3989 pf_unlink_state(state); 3990 } 3991 3992#if 0 /* NPFSYNC */ 3993/* 3994 * XXX This is called on module unload, we do not want to sync that over? */ 3995 */ 3996 pfsync_clear_states(V_pf_status.hostid, psk->psk_ifname); 3997#endif 3998} 3999 4000static int 4001pf_clear_tables(void) 4002{ 4003 struct pfioc_table io; 4004 int error; 4005 4006 bzero(&io, sizeof(io)); 4007 4008 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 4009 io.pfrio_flags); 4010 4011 return (error); 4012} 4013 4014static void 4015pf_clear_srcnodes(void) 4016{ 4017 struct pf_src_node *n; 4018 struct pf_state *state; 4019 4020#ifdef __FreeBSD__ 4021 RB_FOREACH(state, pf_state_tree_id, &V_tree_id) { 4022#else 4023 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 4024#endif 4025 state->src_node = NULL; 4026 state->nat_src_node = NULL; 4027 } 4028#ifdef __FreeBSD__ 4029 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) { 4030#else 4031 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 4032#endif 4033 n->expire = 1; 4034 n->states = 0; 4035 } 4036} 4037/* 4038 * XXX - Check for version missmatch!!! 4039 */ 4040 4041/* 4042 * Duplicate pfctl -Fa operation to get rid of as much as we can. 4043 */ 4044static int 4045shutdown_pf(void) 4046{ 4047 int error = 0; 4048 u_int32_t t[5]; 4049 char nn = '\0'; 4050 4051 V_pf_status.running = 0; 4052 do { 4053 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 4054 != 0) { 4055 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 4056 break; 4057 } 4058 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 4059 != 0) { 4060 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 4061 break; /* XXX: rollback? */ 4062 } 4063 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 4064 != 0) { 4065 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 4066 break; /* XXX: rollback? */ 4067 } 4068 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 4069 != 0) { 4070 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 4071 break; /* XXX: rollback? */ 4072 } 4073 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 4074 != 0) { 4075 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 4076 break; /* XXX: rollback? */ 4077 } 4078 4079 /* XXX: these should always succeed here */ 4080 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 4081 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 4082 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 4083 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 4084 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 4085 4086 if ((error = pf_clear_tables()) != 0) 4087 break; 4088 4089 #ifdef ALTQ 4090 if ((error = pf_begin_altq(&t[0])) != 0) { 4091 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 4092 break; 4093 } 4094 pf_commit_altq(t[0]); 4095 #endif 4096 4097 pf_clear_states(); 4098 4099 pf_clear_srcnodes(); 4100 4101 /* status does not use malloced mem so no need to cleanup */ 4102 /* fingerprints and interfaces have thier own cleanup code */ 4103 } while(0); 4104 4105 return (error); 4106} 4107 4108#ifdef INET 4109static int 4110pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 4111 struct inpcb *inp) 4112{ 4113 /* 4114 * XXX Wed Jul 9 22:03:16 2003 UTC 4115 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 4116 * in network stack. OpenBSD's network stack have converted 4117 * ip_len/ip_off to host byte order frist as FreeBSD. 4118 * Now this is not true anymore , so we should convert back to network 4119 * byte order. 4120 */ 4121 struct ip *h = NULL; 4122 int chk; 4123 4124 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) { 4125 /* if m_pkthdr.len is less than ip header, pf will handle. */ 4126 h = mtod(*m, struct ip *); 4127 HTONS(h->ip_len); 4128 HTONS(h->ip_off); 4129 } 4130 CURVNET_SET(ifp->if_vnet); 4131 chk = pf_test(PF_IN, ifp, m, NULL, inp); 4132 CURVNET_RESTORE(); 4133 if (chk && *m) { 4134 m_freem(*m); 4135 *m = NULL; 4136 } 4137 if (*m != NULL) { 4138 /* pf_test can change ip header location */ 4139 h = mtod(*m, struct ip *); 4140 NTOHS(h->ip_len); 4141 NTOHS(h->ip_off); 4142 } 4143 return chk; 4144} 4145 4146static int 4147pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 4148 struct inpcb *inp) 4149{ 4150 /* 4151 * XXX Wed Jul 9 22:03:16 2003 UTC 4152 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 4153 * in network stack. OpenBSD's network stack have converted 4154 * ip_len/ip_off to host byte order frist as FreeBSD. 4155 * Now this is not true anymore , so we should convert back to network 4156 * byte order. 4157 */ 4158 struct ip *h = NULL; 4159 int chk; 4160 4161 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) { 4162 /* if m_pkthdr.len is less than ip header, pf will handle. */ 4163 h = mtod(*m, struct ip *); 4164 HTONS(h->ip_len); 4165 HTONS(h->ip_off); 4166 } 4167 CURVNET_SET(ifp->if_vnet); 4168 chk = pf_test(PF_OUT, ifp, m, NULL, inp); 4169 CURVNET_RESTORE(); 4170 if (chk && *m) { 4171 m_freem(*m); 4172 *m = NULL; 4173 } 4174 if (*m != NULL) { 4175 /* pf_test can change ip header location */ 4176 h = mtod(*m, struct ip *); 4177 NTOHS(h->ip_len); 4178 NTOHS(h->ip_off); 4179 } 4180 return chk; 4181} 4182#endif 4183 4184#ifdef INET6 4185static int 4186pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 4187 struct inpcb *inp) 4188{ 4189 4190 /* 4191 * IPv6 is not affected by ip_len/ip_off byte order changes. 4192 */ 4193 int chk; 4194 4195 /* 4196 * In case of loopback traffic IPv6 uses the real interface in 4197 * order to support scoped addresses. In order to support stateful 4198 * filtering we have change this to lo0 as it is the case in IPv4. 4199 */ 4200 CURVNET_SET(ifp->if_vnet); 4201 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, 4202 NULL, inp); 4203 CURVNET_RESTORE(); 4204 if (chk && *m) { 4205 m_freem(*m); 4206 *m = NULL; 4207 } 4208 return chk; 4209} 4210 4211static int 4212pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 4213 struct inpcb *inp) 4214{ 4215 /* 4216 * IPv6 does not affected ip_len/ip_off byte order changes. 4217 */ 4218 int chk; 4219 4220 CURVNET_SET(ifp->if_vnet); 4221 chk = pf_test6(PF_OUT, ifp, m, NULL, inp); 4222 CURVNET_RESTORE(); 4223 if (chk && *m) { 4224 m_freem(*m); 4225 *m = NULL; 4226 } 4227 return chk; 4228} 4229#endif /* INET6 */ 4230 4231static int 4232hook_pf(void) 4233{ 4234#ifdef INET 4235 struct pfil_head *pfh_inet; 4236#endif 4237#ifdef INET6 4238 struct pfil_head *pfh_inet6; 4239#endif 4240 4241 PF_UNLOCK_ASSERT(); 4242 4243 if (V_pf_pfil_hooked) 4244 return (0); 4245 4246#ifdef INET 4247 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4248 if (pfh_inet == NULL) 4249 return (ESRCH); /* XXX */ 4250 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 4251 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 4252#endif 4253#ifdef INET6 4254 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 4255 if (pfh_inet6 == NULL) { 4256#ifdef INET 4257 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 4258 pfh_inet); 4259 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 4260 pfh_inet); 4261#endif 4262 return (ESRCH); /* XXX */ 4263 } 4264 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 4265 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 4266#endif 4267 4268 V_pf_pfil_hooked = 1; 4269 return (0); 4270} 4271 4272static int 4273dehook_pf(void) 4274{ 4275#ifdef INET 4276 struct pfil_head *pfh_inet; 4277#endif 4278#ifdef INET6 4279 struct pfil_head *pfh_inet6; 4280#endif 4281 4282 PF_UNLOCK_ASSERT(); 4283 4284 if (V_pf_pfil_hooked == 0) 4285 return (0); 4286 4287#ifdef INET 4288 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4289 if (pfh_inet == NULL) 4290 return (ESRCH); /* XXX */ 4291 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 4292 pfh_inet); 4293 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 4294 pfh_inet); 4295#endif 4296#ifdef INET6 4297 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 4298 if (pfh_inet6 == NULL) 4299 return (ESRCH); /* XXX */ 4300 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 4301 pfh_inet6); 4302 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 4303 pfh_inet6); 4304#endif 4305 4306 V_pf_pfil_hooked = 0; 4307 return (0); 4308} 4309 4310static int 4311pf_load(void) 4312{ 4313 VNET_ITERATOR_DECL(vnet_iter); 4314 4315 VNET_LIST_RLOCK(); 4316 VNET_FOREACH(vnet_iter) { 4317 CURVNET_SET(vnet_iter); 4318 V_pf_pfil_hooked = 0; 4319 V_pf_end_threads = 0; 4320 V_debug_pfugidhack = 0; 4321 TAILQ_INIT(&V_pf_tags); 4322 TAILQ_INIT(&V_pf_qids); 4323 CURVNET_RESTORE(); 4324 } 4325 VNET_LIST_RUNLOCK(); 4326 4327 init_pf_mutex(); 4328 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 4329 init_zone_var(); 4330 sx_init(&V_pf_consistency_lock, "pf_statetbl_lock"); 4331 if (pfattach() < 0) 4332 return (ENOMEM); 4333 4334 return (0); 4335} 4336 4337static int 4338pf_unload(void) 4339{ 4340 int error = 0; 4341 4342 PF_LOCK(); 4343 V_pf_status.running = 0; 4344 PF_UNLOCK(); 4345 m_addr_chg_pf_p = NULL; 4346 error = dehook_pf(); 4347 if (error) { 4348 /* 4349 * Should not happen! 4350 * XXX Due to error code ESRCH, kldunload will show 4351 * a message like 'No such process'. 4352 */ 4353 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 4354 return error; 4355 } 4356 PF_LOCK(); 4357 shutdown_pf(); 4358 V_pf_end_threads = 1; 4359 while (V_pf_end_threads < 2) { 4360 wakeup_one(pf_purge_thread); 4361 msleep(pf_purge_thread, &pf_task_mtx, 0, "pftmo", hz); 4362 } 4363 pfi_cleanup(); 4364 pf_osfp_flush(); 4365 pf_osfp_cleanup(); 4366 cleanup_pf_zone(); 4367 PF_UNLOCK(); 4368 destroy_dev(pf_dev); 4369 destroy_pf_mutex(); 4370 sx_destroy(&V_pf_consistency_lock); 4371 return error; 4372} 4373 4374static int 4375pf_modevent(module_t mod, int type, void *data) 4376{ 4377 int error = 0; 4378 4379 switch(type) { 4380 case MOD_LOAD: 4381 error = pf_load(); 4382 break; 4383 case MOD_QUIESCE: 4384 /* 4385 * Module should not be unloaded due to race conditions. 4386 */ 4387 error = EPERM; 4388 break; 4389 case MOD_UNLOAD: 4390 error = pf_unload(); 4391 break; 4392 default: 4393 error = EINVAL; 4394 break; 4395 } 4396 return error; 4397} 4398 4399static moduledata_t pf_mod = { 4400 "pf", 4401 pf_modevent, 4402 0 4403}; 4404 4405DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST); 4406MODULE_VERSION(pf, PF_MODVER); 4407#endif /* __FreeBSD__ */ 4408