pf_ioctl.c revision 223637
1/* $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ */ 2 3/* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38#ifdef __FreeBSD__ 39#include <sys/cdefs.h> 40__FBSDID("$FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 223637 2011-06-28 11:57:25Z bz $"); 41 42#include "opt_inet.h" 43#include "opt_inet6.h" 44#include "opt_bpf.h" 45#include "opt_pf.h" 46 47#ifdef DEV_BPF 48#define NBPFILTER DEV_BPF 49#else 50#define NBPFILTER 0 51#endif 52 53#ifdef DEV_PFLOG 54#define NPFLOG DEV_PFLOG 55#else 56#define NPFLOG 0 57#endif 58 59#ifdef DEV_PFSYNC 60#define NPFSYNC DEV_PFSYNC 61#else 62#define NPFSYNC 0 63#endif 64 65#else 66#include "pfsync.h" 67#include "pflog.h" 68#endif 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/mbuf.h> 73#include <sys/filio.h> 74#include <sys/fcntl.h> 75#include <sys/socket.h> 76#include <sys/socketvar.h> 77#include <sys/kernel.h> 78#include <sys/time.h> 79#ifdef __FreeBSD__ 80#include <sys/ucred.h> 81#include <sys/jail.h> 82#include <sys/module.h> 83#include <sys/conf.h> 84#include <sys/proc.h> 85#include <sys/sysctl.h> 86#else 87#include <sys/timeout.h> 88#include <sys/pool.h> 89#endif 90#include <sys/proc.h> 91#include <sys/malloc.h> 92#include <sys/kthread.h> 93#ifndef __FreeBSD__ 94#include <sys/rwlock.h> 95#include <uvm/uvm_extern.h> 96#endif 97 98#include <net/if.h> 99#include <net/if_types.h> 100#ifdef __FreeBSD__ 101#include <net/vnet.h> 102#endif 103#include <net/route.h> 104 105#include <netinet/in.h> 106#include <netinet/in_var.h> 107#include <netinet/in_systm.h> 108#include <netinet/ip.h> 109#include <netinet/ip_var.h> 110#include <netinet/ip_icmp.h> 111 112#ifdef __FreeBSD__ 113#include <sys/md5.h> 114#else 115#include <dev/rndvar.h> 116#include <crypto/md5.h> 117#endif 118#include <net/pfvar.h> 119 120#include <net/if_pfsync.h> 121 122#if NPFLOG > 0 123#include <net/if_pflog.h> 124#endif /* NPFLOG > 0 */ 125 126#ifdef INET6 127#include <netinet/ip6.h> 128#include <netinet/in_pcb.h> 129#endif /* INET6 */ 130 131#ifdef ALTQ 132#include <altq/altq.h> 133#endif 134 135#ifdef __FreeBSD__ 136#include <sys/limits.h> 137#include <sys/lock.h> 138#include <sys/mutex.h> 139#include <net/pfil.h> 140#endif /* __FreeBSD__ */ 141 142#ifdef __FreeBSD__ 143void init_zone_var(void); 144void cleanup_pf_zone(void); 145int pfattach(void); 146#else 147void pfattach(int); 148void pf_thread_create(void *); 149int pfopen(dev_t, int, int, struct proc *); 150int pfclose(dev_t, int, int, struct proc *); 151#endif 152struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 153 u_int8_t, u_int8_t, u_int8_t); 154 155void pf_mv_pool(struct pf_palist *, struct pf_palist *); 156void pf_empty_pool(struct pf_palist *); 157#ifdef __FreeBSD__ 158int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 159#else 160int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 161#endif 162#ifdef ALTQ 163int pf_begin_altq(u_int32_t *); 164int pf_rollback_altq(u_int32_t); 165int pf_commit_altq(u_int32_t); 166int pf_enable_altq(struct pf_altq *); 167int pf_disable_altq(struct pf_altq *); 168#endif /* ALTQ */ 169int pf_begin_rules(u_int32_t *, int, const char *); 170int pf_rollback_rules(u_int32_t, int, char *); 171int pf_setup_pfsync_matching(struct pf_ruleset *); 172void pf_hash_rule(MD5_CTX *, struct pf_rule *); 173void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 174int pf_commit_rules(u_int32_t, int, char *); 175int pf_addr_setup(struct pf_ruleset *, 176 struct pf_addr_wrap *, sa_family_t); 177void pf_addr_copyout(struct pf_addr_wrap *); 178 179#define TAGID_MAX 50000 180 181#ifdef __FreeBSD__ 182VNET_DEFINE(struct pf_rule, pf_default_rule); 183VNET_DEFINE(struct sx, pf_consistency_lock); 184 185#ifdef ALTQ 186static VNET_DEFINE(int, pf_altq_running); 187#define V_pf_altq_running VNET(pf_altq_running) 188#endif 189 190TAILQ_HEAD(pf_tags, pf_tagname); 191 192#define V_pf_tags VNET(pf_tags) 193VNET_DEFINE(struct pf_tags, pf_tags); 194#define V_pf_qids VNET(pf_qids) 195VNET_DEFINE(struct pf_tags, pf_qids); 196 197#else /* !__FreeBSD__ */ 198struct pf_rule pf_default_rule; 199struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk"); 200#ifdef ALTQ 201static int pf_altq_running; 202#endif 203 204TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 205 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 206#endif /* __FreeBSD__ */ 207 208#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 209#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 210#endif 211 212u_int16_t tagname2tag(struct pf_tags *, char *); 213void tag2tagname(struct pf_tags *, u_int16_t, char *); 214void tag_unref(struct pf_tags *, u_int16_t); 215int pf_rtlabel_add(struct pf_addr_wrap *); 216void pf_rtlabel_remove(struct pf_addr_wrap *); 217void pf_rtlabel_copyout(struct pf_addr_wrap *); 218 219#ifdef __FreeBSD__ 220#define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 221#else 222#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 223#endif 224 225#ifdef __FreeBSD__ 226struct cdev *pf_dev; 227 228/* 229 * XXX - These are new and need to be checked when moveing to a new version 230 */ 231static void pf_clear_states(void); 232static int pf_clear_tables(void); 233static void pf_clear_srcnodes(void); 234/* 235 * XXX - These are new and need to be checked when moveing to a new version 236 */ 237 238/* 239 * Wrapper functions for pfil(9) hooks 240 */ 241#ifdef INET 242static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 243 int dir, struct inpcb *inp); 244static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 245 int dir, struct inpcb *inp); 246#endif 247#ifdef INET6 248static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 249 int dir, struct inpcb *inp); 250static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 251 int dir, struct inpcb *inp); 252#endif 253 254static int hook_pf(void); 255static int dehook_pf(void); 256static int shutdown_pf(void); 257static int pf_load(void); 258static int pf_unload(void); 259 260static struct cdevsw pf_cdevsw = { 261 .d_ioctl = pfioctl, 262 .d_name = PF_NAME, 263 .d_version = D_VERSION, 264}; 265 266static volatile VNET_DEFINE(int, pf_pfil_hooked); 267#define V_pf_pfil_hooked VNET(pf_pfil_hooked) 268VNET_DEFINE(int, pf_end_threads); 269VNET_DEFINE(struct mtx, pf_task_mtx); 270 271/* pfsync */ 272pfsync_state_import_t *pfsync_state_import_ptr = NULL; 273pfsync_insert_state_t *pfsync_insert_state_ptr = NULL; 274pfsync_update_state_t *pfsync_update_state_ptr = NULL; 275pfsync_delete_state_t *pfsync_delete_state_ptr = NULL; 276pfsync_clear_states_t *pfsync_clear_states_ptr = NULL; 277pfsync_state_in_use_t *pfsync_state_in_use_ptr = NULL; 278pfsync_defer_t *pfsync_defer_ptr = NULL; 279pfsync_up_t *pfsync_up_ptr = NULL; 280/* pflow */ 281export_pflow_t *export_pflow_ptr = NULL; 282/* pflog */ 283pflog_packet_t *pflog_packet_ptr = NULL; 284 285VNET_DEFINE(int, debug_pfugidhack); 286SYSCTL_VNET_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, 287 &VNET_NAME(debug_pfugidhack), 0, 288 "Enable/disable pf user/group rules mpsafe hack"); 289 290void 291init_pf_mutex(void) 292{ 293 294 mtx_init(&V_pf_task_mtx, "pf task mtx", NULL, MTX_DEF); 295} 296 297void 298destroy_pf_mutex(void) 299{ 300 301 mtx_destroy(&V_pf_task_mtx); 302} 303void 304init_zone_var(void) 305{ 306 V_pf_src_tree_pl = V_pf_rule_pl = NULL; 307 V_pf_state_pl = V_pf_state_key_pl = V_pf_state_item_pl = NULL; 308 V_pf_altq_pl = V_pf_pooladdr_pl = NULL; 309 V_pf_frent_pl = V_pf_frag_pl = V_pf_cache_pl = V_pf_cent_pl = NULL; 310 V_pf_state_scrub_pl = NULL; 311 V_pfr_ktable_pl = V_pfr_kentry_pl = NULL; 312} 313 314void 315cleanup_pf_zone(void) 316{ 317 UMA_DESTROY(V_pf_src_tree_pl); 318 UMA_DESTROY(V_pf_rule_pl); 319 UMA_DESTROY(V_pf_state_pl); 320 UMA_DESTROY(V_pf_state_key_pl); 321 UMA_DESTROY(V_pf_state_item_pl); 322 UMA_DESTROY(V_pf_altq_pl); 323 UMA_DESTROY(V_pf_pooladdr_pl); 324 UMA_DESTROY(V_pf_frent_pl); 325 UMA_DESTROY(V_pf_frag_pl); 326 UMA_DESTROY(V_pf_cache_pl); 327 UMA_DESTROY(V_pf_cent_pl); 328 UMA_DESTROY(V_pfr_ktable_pl); 329 UMA_DESTROY(V_pfr_kentry_pl); 330 UMA_DESTROY(V_pf_state_scrub_pl); 331 UMA_DESTROY(V_pfi_addr_pl); 332} 333 334int 335pfattach(void) 336{ 337 u_int32_t *my_timeout = V_pf_default_rule.timeout; 338 int error = 1; 339 340 do { 341 UMA_CREATE(V_pf_src_tree_pl, struct pf_src_node, "pfsrctrpl"); 342 UMA_CREATE(V_pf_rule_pl, struct pf_rule, "pfrulepl"); 343 UMA_CREATE(V_pf_state_pl, struct pf_state, "pfstatepl"); 344 UMA_CREATE(V_pf_state_key_pl, struct pf_state, "pfstatekeypl"); 345 UMA_CREATE(V_pf_state_item_pl, struct pf_state, "pfstateitempl"); 346 UMA_CREATE(V_pf_altq_pl, struct pf_altq, "pfaltqpl"); 347 UMA_CREATE(V_pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl"); 348 UMA_CREATE(V_pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 349 UMA_CREATE(V_pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 350 UMA_CREATE(V_pf_frent_pl, struct pf_frent, "pffrent"); 351 UMA_CREATE(V_pf_frag_pl, struct pf_fragment, "pffrag"); 352 UMA_CREATE(V_pf_cache_pl, struct pf_fragment, "pffrcache"); 353 UMA_CREATE(V_pf_cent_pl, struct pf_frcache, "pffrcent"); 354 UMA_CREATE(V_pf_state_scrub_pl, struct pf_state_scrub, 355 "pfstatescrub"); 356 UMA_CREATE(V_pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 357 error = 0; 358 } while(0); 359 if (error) { 360 cleanup_pf_zone(); 361 return (error); 362 } 363 pfr_initialize(); 364 pfi_initialize(); 365 if ( (error = pf_osfp_initialize()) ) { 366 cleanup_pf_zone(); 367 pf_osfp_cleanup(); 368 return (error); 369 } 370 371 V_pf_pool_limits[PF_LIMIT_STATES].pp = V_pf_state_pl; 372 V_pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 373 V_pf_pool_limits[PF_LIMIT_SRC_NODES].pp = V_pf_src_tree_pl; 374 V_pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 375 V_pf_pool_limits[PF_LIMIT_FRAGS].pp = V_pf_frent_pl; 376 V_pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 377 V_pf_pool_limits[PF_LIMIT_TABLES].pp = V_pfr_ktable_pl; 378 V_pf_pool_limits[PF_LIMIT_TABLES].limit = PFR_KTABLE_HIWAT; 379 V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = V_pfr_kentry_pl; 380 V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 381 uma_zone_set_max(V_pf_pool_limits[PF_LIMIT_STATES].pp, 382 V_pf_pool_limits[PF_LIMIT_STATES].limit); 383 384 RB_INIT(&V_tree_src_tracking); 385 RB_INIT(&V_pf_anchors); 386 pf_init_ruleset(&pf_main_ruleset); 387 388 TAILQ_INIT(&V_pf_altqs[0]); 389 TAILQ_INIT(&V_pf_altqs[1]); 390 TAILQ_INIT(&V_pf_pabuf); 391 V_pf_altqs_active = &V_pf_altqs[0]; 392 V_pf_altqs_inactive = &V_pf_altqs[1]; 393 TAILQ_INIT(&V_state_list); 394 395 /* default rule should never be garbage collected */ 396 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 397 V_pf_default_rule.action = PF_PASS; 398 V_pf_default_rule.nr = -1; 399 V_pf_default_rule.rtableid = -1; 400 401 /* initialize default timeouts */ 402 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 403 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 404 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 405 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 406 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 407 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 408 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 409 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 410 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 411 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 412 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 413 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 414 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 415 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 416 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 417 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 418 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 419 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 420 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 421 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 422 423 pf_normalize_init(); 424 425 bzero(&V_pf_status, sizeof(V_pf_status)); 426 V_pf_status.debug = PF_DEBUG_URGENT; 427 428 V_pf_pfil_hooked = 0; 429 430 /* XXX do our best to avoid a conflict */ 431 V_pf_status.hostid = arc4random(); 432 433 if (kproc_create(pf_purge_thread, curvnet, NULL, 0, 0, "pfpurge")) 434 return (ENXIO); 435 436 m_addr_chg_pf_p = pf_pkt_addr_changed; 437 438 return (error); 439} 440#else /* !__FreeBSD__ */ 441 442void 443pfattach(int num) 444{ 445 u_int32_t *timeout = pf_default_rule.timeout; 446 447 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 448 &pool_allocator_nointr); 449 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 450 "pfsrctrpl", NULL); 451 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 452 NULL); 453 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 454 "pfstatekeypl", NULL); 455 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0, 456 "pfstateitempl", NULL); 457 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 458 &pool_allocator_nointr); 459 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 460 "pfpooladdrpl", &pool_allocator_nointr); 461 pfr_initialize(); 462 pfi_initialize(); 463 pf_osfp_initialize(); 464 465 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 466 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 467 468 if (physmem <= atop(100*1024*1024)) 469 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 470 PFR_KENTRY_HIWAT_SMALL; 471 472 RB_INIT(&tree_src_tracking); 473 RB_INIT(&pf_anchors); 474 pf_init_ruleset(&pf_main_ruleset); 475 TAILQ_INIT(&pf_altqs[0]); 476 TAILQ_INIT(&pf_altqs[1]); 477 TAILQ_INIT(&pf_pabuf); 478 pf_altqs_active = &pf_altqs[0]; 479 pf_altqs_inactive = &pf_altqs[1]; 480 TAILQ_INIT(&state_list); 481 482 /* default rule should never be garbage collected */ 483 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 484 pf_default_rule.action = PF_PASS; 485 pf_default_rule.nr = -1; 486 pf_default_rule.rtableid = -1; 487 488 /* initialize default timeouts */ 489 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 490 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 491 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 492 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 493 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 494 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 495 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 496 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 497 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 498 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 499 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 500 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 501 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 502 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 503 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 504 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 505 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 506 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 507 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 508 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 509 510 pf_normalize_init(); 511 bzero(&pf_status, sizeof(pf_status)); 512 pf_status.debug = PF_DEBUG_URGENT; 513 514 /* XXX do our best to avoid a conflict */ 515 pf_status.hostid = arc4random(); 516 517 /* require process context to purge states, so perform in a thread */ 518 kthread_create_deferred(pf_thread_create, NULL); 519} 520 521void 522pf_thread_create(void *v) 523{ 524 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 525 panic("pfpurge thread"); 526} 527 528int 529pfopen(dev_t dev, int flags, int fmt, struct proc *p) 530{ 531 if (minor(dev) >= 1) 532 return (ENXIO); 533 return (0); 534} 535 536int 537pfclose(dev_t dev, int flags, int fmt, struct proc *p) 538{ 539 if (minor(dev) >= 1) 540 return (ENXIO); 541 return (0); 542} 543#endif 544 545struct pf_pool * 546pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 547 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 548 u_int8_t check_ticket) 549{ 550 struct pf_ruleset *ruleset; 551 struct pf_rule *rule; 552 int rs_num; 553 554 ruleset = pf_find_ruleset(anchor); 555 if (ruleset == NULL) 556 return (NULL); 557 rs_num = pf_get_ruleset_number(rule_action); 558 if (rs_num >= PF_RULESET_MAX) 559 return (NULL); 560 if (active) { 561 if (check_ticket && ticket != 562 ruleset->rules[rs_num].active.ticket) 563 return (NULL); 564 if (r_last) 565 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 566 pf_rulequeue); 567 else 568 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 569 } else { 570 if (check_ticket && ticket != 571 ruleset->rules[rs_num].inactive.ticket) 572 return (NULL); 573 if (r_last) 574 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 575 pf_rulequeue); 576 else 577 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 578 } 579 if (!r_last) { 580 while ((rule != NULL) && (rule->nr != rule_number)) 581 rule = TAILQ_NEXT(rule, entries); 582 } 583 if (rule == NULL) 584 return (NULL); 585 586 return (&rule->rpool); 587} 588 589void 590pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 591{ 592 struct pf_pooladdr *mv_pool_pa; 593 594 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 595 TAILQ_REMOVE(poola, mv_pool_pa, entries); 596 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 597 } 598} 599 600void 601pf_empty_pool(struct pf_palist *poola) 602{ 603 struct pf_pooladdr *empty_pool_pa; 604 605 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 606 pfi_dynaddr_remove(&empty_pool_pa->addr); 607 pf_tbladdr_remove(&empty_pool_pa->addr); 608 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 609 TAILQ_REMOVE(poola, empty_pool_pa, entries); 610#ifdef __FreeBSD__ 611 pool_put(&V_pf_pooladdr_pl, empty_pool_pa); 612#else 613 pool_put(&pf_pooladdr_pl, empty_pool_pa); 614#endif 615 } 616} 617 618void 619pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 620{ 621 if (rulequeue != NULL) { 622 if (rule->states_cur <= 0) { 623 /* 624 * XXX - we need to remove the table *before* detaching 625 * the rule to make sure the table code does not delete 626 * the anchor under our feet. 627 */ 628 pf_tbladdr_remove(&rule->src.addr); 629 pf_tbladdr_remove(&rule->dst.addr); 630 if (rule->overload_tbl) 631 pfr_detach_table(rule->overload_tbl); 632 } 633 TAILQ_REMOVE(rulequeue, rule, entries); 634 rule->entries.tqe_prev = NULL; 635 rule->nr = -1; 636 } 637 638 if (rule->states_cur > 0 || rule->src_nodes > 0 || 639 rule->entries.tqe_prev != NULL) 640 return; 641 pf_tag_unref(rule->tag); 642 pf_tag_unref(rule->match_tag); 643#ifdef ALTQ 644 if (rule->pqid != rule->qid) 645 pf_qid_unref(rule->pqid); 646 pf_qid_unref(rule->qid); 647#endif 648 pf_rtlabel_remove(&rule->src.addr); 649 pf_rtlabel_remove(&rule->dst.addr); 650 pfi_dynaddr_remove(&rule->src.addr); 651 pfi_dynaddr_remove(&rule->dst.addr); 652 if (rulequeue == NULL) { 653 pf_tbladdr_remove(&rule->src.addr); 654 pf_tbladdr_remove(&rule->dst.addr); 655 if (rule->overload_tbl) 656 pfr_detach_table(rule->overload_tbl); 657 } 658 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 659 pf_anchor_remove(rule); 660 pf_empty_pool(&rule->rpool.list); 661#ifdef __FreeBSD__ 662 pool_put(&V_pf_rule_pl, rule); 663#else 664 pool_put(&pf_rule_pl, rule); 665#endif 666} 667 668u_int16_t 669tagname2tag(struct pf_tags *head, char *tagname) 670{ 671 struct pf_tagname *tag, *p = NULL; 672 u_int16_t new_tagid = 1; 673 674 TAILQ_FOREACH(tag, head, entries) 675 if (strcmp(tagname, tag->name) == 0) { 676 tag->ref++; 677 return (tag->tag); 678 } 679 680 /* 681 * to avoid fragmentation, we do a linear search from the beginning 682 * and take the first free slot we find. if there is none or the list 683 * is empty, append a new entry at the end. 684 */ 685 686 /* new entry */ 687 if (!TAILQ_EMPTY(head)) 688 for (p = TAILQ_FIRST(head); p != NULL && 689 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 690 new_tagid = p->tag + 1; 691 692 if (new_tagid > TAGID_MAX) 693 return (0); 694 695 /* allocate and fill new struct pf_tagname */ 696 tag = malloc(sizeof(*tag), M_TEMP, M_NOWAIT|M_ZERO); 697 if (tag == NULL) 698 return (0); 699 strlcpy(tag->name, tagname, sizeof(tag->name)); 700 tag->tag = new_tagid; 701 tag->ref++; 702 703 if (p != NULL) /* insert new entry before p */ 704 TAILQ_INSERT_BEFORE(p, tag, entries); 705 else /* either list empty or no free slot in between */ 706 TAILQ_INSERT_TAIL(head, tag, entries); 707 708 return (tag->tag); 709} 710 711void 712tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 713{ 714 struct pf_tagname *tag; 715 716 TAILQ_FOREACH(tag, head, entries) 717 if (tag->tag == tagid) { 718 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 719 return; 720 } 721} 722 723void 724tag_unref(struct pf_tags *head, u_int16_t tag) 725{ 726 struct pf_tagname *p, *next; 727 728 if (tag == 0) 729 return; 730 731 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 732 next = TAILQ_NEXT(p, entries); 733 if (tag == p->tag) { 734 if (--p->ref == 0) { 735 TAILQ_REMOVE(head, p, entries); 736 free(p, M_TEMP); 737 } 738 break; 739 } 740 } 741} 742 743u_int16_t 744pf_tagname2tag(char *tagname) 745{ 746#ifdef __FreeBSD__ 747 return (tagname2tag(&V_pf_tags, tagname)); 748#else 749 return (tagname2tag(&pf_tags, tagname)); 750#endif 751} 752 753void 754pf_tag2tagname(u_int16_t tagid, char *p) 755{ 756#ifdef __FreeBSD__ 757 tag2tagname(&V_pf_tags, tagid, p); 758#else 759 tag2tagname(&pf_tags, tagid, p); 760#endif 761} 762 763void 764pf_tag_ref(u_int16_t tag) 765{ 766 struct pf_tagname *t; 767 768#ifdef __FreeBSD__ 769 TAILQ_FOREACH(t, &V_pf_tags, entries) 770#else 771 TAILQ_FOREACH(t, &pf_tags, entries) 772#endif 773 if (t->tag == tag) 774 break; 775 if (t != NULL) 776 t->ref++; 777} 778 779void 780pf_tag_unref(u_int16_t tag) 781{ 782#ifdef __FreeBSD__ 783 tag_unref(&V_pf_tags, tag); 784#else 785 tag_unref(&pf_tags, tag); 786#endif 787} 788 789int 790pf_rtlabel_add(struct pf_addr_wrap *a) 791{ 792#ifdef __FreeBSD__ 793 /* XXX_IMPORT: later */ 794 return (0); 795#else 796 if (a->type == PF_ADDR_RTLABEL && 797 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 798 return (-1); 799 return (0); 800#endif 801} 802 803void 804pf_rtlabel_remove(struct pf_addr_wrap *a) 805{ 806#ifdef __FreeBSD__ 807 /* XXX_IMPORT: later */ 808#else 809 if (a->type == PF_ADDR_RTLABEL) 810 rtlabel_unref(a->v.rtlabel); 811#endif 812} 813 814void 815pf_rtlabel_copyout(struct pf_addr_wrap *a) 816{ 817#ifdef __FreeBSD__ 818 /* XXX_IMPORT: later */ 819 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 820 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 821#else 822 const char *name; 823 824 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 825 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 826 strlcpy(a->v.rtlabelname, "?", 827 sizeof(a->v.rtlabelname)); 828 else 829 strlcpy(a->v.rtlabelname, name, 830 sizeof(a->v.rtlabelname)); 831 } 832#endif 833} 834 835#ifdef ALTQ 836u_int32_t 837pf_qname2qid(char *qname) 838{ 839#ifdef __FreeBSD__ 840 return ((u_int32_t)tagname2tag(&V_pf_qids, qname)); 841#else 842 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 843#endif 844} 845 846void 847pf_qid2qname(u_int32_t qid, char *p) 848{ 849#ifdef __FreeBSD__ 850 tag2tagname(&V_pf_qids, (u_int16_t)qid, p); 851#else 852 tag2tagname(&pf_qids, (u_int16_t)qid, p); 853#endif 854} 855 856void 857pf_qid_unref(u_int32_t qid) 858{ 859#ifdef __FreeBSD__ 860 tag_unref(&V_pf_qids, (u_int16_t)qid); 861#else 862 tag_unref(&pf_qids, (u_int16_t)qid); 863#endif 864} 865 866int 867pf_begin_altq(u_int32_t *ticket) 868{ 869 struct pf_altq *altq; 870 int error = 0; 871 872 /* Purge the old altq list */ 873#ifdef __FreeBSD__ 874 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 875 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 876 if (altq->qname[0] == 0 && 877 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 878#else 879 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 880 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 881 if (altq->qname[0] == 0) { 882#endif 883 /* detach and destroy the discipline */ 884 error = altq_remove(altq); 885 } else 886 pf_qid_unref(altq->qid); 887#ifdef __FreeBSD__ 888 pool_put(&V_pf_altq_pl, altq); 889#else 890 pool_put(&pf_altq_pl, altq); 891#endif 892 } 893 if (error) 894 return (error); 895#ifdef __FreeBSD__ 896 *ticket = ++V_ticket_altqs_inactive; 897 V_altqs_inactive_open = 1; 898#else 899 *ticket = ++ticket_altqs_inactive; 900 altqs_inactive_open = 1; 901#endif 902 return (0); 903} 904 905int 906pf_rollback_altq(u_int32_t ticket) 907{ 908 struct pf_altq *altq; 909 int error = 0; 910 911#ifdef __FreeBSD__ 912 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 913 return (0); 914 /* Purge the old altq list */ 915 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 916 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 917 if (altq->qname[0] == 0 && 918 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 919#else 920 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 921 return (0); 922 /* Purge the old altq list */ 923 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 924 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 925 if (altq->qname[0] == 0) { 926#endif 927 /* detach and destroy the discipline */ 928 error = altq_remove(altq); 929 } else 930 pf_qid_unref(altq->qid); 931#ifdef __FreeBSD__ 932 pool_put(&V_pf_altq_pl, altq); 933#else 934 pool_put(&pf_altq_pl, altq); 935#endif 936 } 937#ifdef __FreeBSD__ 938 V_altqs_inactive_open = 0; 939#else 940 altqs_inactive_open = 0; 941#endif 942 return (error); 943} 944 945int 946pf_commit_altq(u_int32_t ticket) 947{ 948 struct pf_altqqueue *old_altqs; 949 struct pf_altq *altq; 950 int s, err, error = 0; 951 952#ifdef __FreeBSD__ 953 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 954#else 955 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 956#endif 957 return (EBUSY); 958 959 /* swap altqs, keep the old. */ 960 s = splsoftnet(); 961#ifdef __FreeBSD__ 962 old_altqs = V_pf_altqs_active; 963 V_pf_altqs_active = V_pf_altqs_inactive; 964 V_pf_altqs_inactive = old_altqs; 965 V_ticket_altqs_active = V_ticket_altqs_inactive; 966#else 967 old_altqs = pf_altqs_active; 968 pf_altqs_active = pf_altqs_inactive; 969 pf_altqs_inactive = old_altqs; 970 ticket_altqs_active = ticket_altqs_inactive; 971#endif 972 973 /* Attach new disciplines */ 974#ifdef __FreeBSD__ 975 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 976 if (altq->qname[0] == 0 && 977 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 978#else 979 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 980 if (altq->qname[0] == 0) { 981#endif 982 /* attach the discipline */ 983 error = altq_pfattach(altq); 984#ifdef __FreeBSD__ 985 if (error == 0 && V_pf_altq_running) 986#else 987 if (error == 0 && pf_altq_running) 988#endif 989 error = pf_enable_altq(altq); 990 if (error != 0) { 991 splx(s); 992 return (error); 993 } 994 } 995 } 996 997 /* Purge the old altq list */ 998#ifdef __FreeBSD__ 999 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 1000 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 1001 if (altq->qname[0] == 0 && 1002 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 1003#else 1004 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 1005 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 1006 if (altq->qname[0] == 0) { 1007#endif 1008 /* detach and destroy the discipline */ 1009#ifdef __FreeBSD__ 1010 if (V_pf_altq_running) 1011#else 1012 if (pf_altq_running) 1013#endif 1014 error = pf_disable_altq(altq); 1015 err = altq_pfdetach(altq); 1016 if (err != 0 && error == 0) 1017 error = err; 1018 err = altq_remove(altq); 1019 if (err != 0 && error == 0) 1020 error = err; 1021 } else 1022 pf_qid_unref(altq->qid); 1023#ifdef __FreeBSD__ 1024 pool_put(&V_pf_altq_pl, altq); 1025#else 1026 pool_put(&pf_altq_pl, altq); 1027#endif 1028 } 1029 splx(s); 1030 1031#ifdef __FreeBSD__ 1032 V_altqs_inactive_open = 0; 1033#else 1034 altqs_inactive_open = 0; 1035#endif 1036 return (error); 1037} 1038 1039int 1040pf_enable_altq(struct pf_altq *altq) 1041{ 1042 struct ifnet *ifp; 1043 struct tb_profile tb; 1044 int s, error = 0; 1045 1046 if ((ifp = ifunit(altq->ifname)) == NULL) 1047 return (EINVAL); 1048 1049 if (ifp->if_snd.altq_type != ALTQT_NONE) 1050 error = altq_enable(&ifp->if_snd); 1051 1052 /* set tokenbucket regulator */ 1053 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1054 tb.rate = altq->ifbandwidth; 1055 tb.depth = altq->tbrsize; 1056 s = splnet(); 1057#ifdef __FreeBSD__ 1058 PF_UNLOCK(); 1059#endif 1060 error = tbr_set(&ifp->if_snd, &tb); 1061#ifdef __FreeBSD__ 1062 PF_LOCK(); 1063#endif 1064 splx(s); 1065 } 1066 1067 return (error); 1068} 1069 1070int 1071pf_disable_altq(struct pf_altq *altq) 1072{ 1073 struct ifnet *ifp; 1074 struct tb_profile tb; 1075 int s, error; 1076 1077 if ((ifp = ifunit(altq->ifname)) == NULL) 1078 return (EINVAL); 1079 1080 /* 1081 * when the discipline is no longer referenced, it was overridden 1082 * by a new one. if so, just return. 1083 */ 1084 if (altq->altq_disc != ifp->if_snd.altq_disc) 1085 return (0); 1086 1087 error = altq_disable(&ifp->if_snd); 1088 1089 if (error == 0) { 1090 /* clear tokenbucket regulator */ 1091 tb.rate = 0; 1092 s = splnet(); 1093#ifdef __FreeBSD__ 1094 PF_UNLOCK(); 1095#endif 1096 error = tbr_set(&ifp->if_snd, &tb); 1097#ifdef __FreeBSD__ 1098 PF_LOCK(); 1099#endif 1100 splx(s); 1101 } 1102 1103 return (error); 1104} 1105 1106#ifdef __FreeBSD__ 1107void 1108pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1109{ 1110 struct ifnet *ifp1; 1111 struct pf_altq *a1, *a2, *a3; 1112 u_int32_t ticket; 1113 int error = 0; 1114 1115 /* Interrupt userland queue modifications */ 1116#ifdef __FreeBSD__ 1117 if (V_altqs_inactive_open) 1118 pf_rollback_altq(V_ticket_altqs_inactive); 1119#else 1120 if (altqs_inactive_open) 1121 pf_rollback_altq(ticket_altqs_inactive); 1122#endif 1123 1124 /* Start new altq ruleset */ 1125 if (pf_begin_altq(&ticket)) 1126 return; 1127 1128 /* Copy the current active set */ 1129#ifdef __FreeBSD__ 1130 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1131 a2 = pool_get(&V_pf_altq_pl, PR_NOWAIT); 1132#else 1133 TAILQ_FOREACH(a1, pf_altqs_active, entries) { 1134 a2 = pool_get(&pf_altq_pl, PR_NOWAIT); 1135#endif 1136 if (a2 == NULL) { 1137 error = ENOMEM; 1138 break; 1139 } 1140 bcopy(a1, a2, sizeof(struct pf_altq)); 1141 1142 if (a2->qname[0] != 0) { 1143 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1144 error = EBUSY; 1145#ifdef __FreeBSD__ 1146 pool_put(&V_pf_altq_pl, a2); 1147#else 1148 pool_put(&pf_altq_pl, a2); 1149#endif 1150 break; 1151 } 1152 a2->altq_disc = NULL; 1153#ifdef __FreeBSD__ 1154 TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries) { 1155#else 1156 TAILQ_FOREACH(a3, pf_altqs_inactive, entries) { 1157#endif 1158 if (strncmp(a3->ifname, a2->ifname, 1159 IFNAMSIZ) == 0 && a3->qname[0] == 0) { 1160 a2->altq_disc = a3->altq_disc; 1161 break; 1162 } 1163 } 1164 } 1165 /* Deactivate the interface in question */ 1166 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1167 if ((ifp1 = ifunit(a2->ifname)) == NULL || 1168 (remove && ifp1 == ifp)) { 1169 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1170 } else { 1171 PF_UNLOCK(); 1172 error = altq_add(a2); 1173 PF_LOCK(); 1174 1175#ifdef __FreeBSD__ 1176 if (ticket != V_ticket_altqs_inactive) 1177#else 1178 if (ticket != ticket_altqs_inactive) 1179#endif 1180 error = EBUSY; 1181 1182 if (error) { 1183#ifdef __FreeBSD__ 1184 pool_put(&V_pf_altq_pl, a2); 1185#else 1186 pool_put(&pf_altq_pl, a2); 1187#endif 1188 break; 1189 } 1190 } 1191 1192#ifdef __FreeBSD__ 1193 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1194#else 1195 TAILQ_INSERT_TAIL(pf_altqs_inactive, a2, entries); 1196#endif 1197 } 1198 1199 if (error != 0) 1200 pf_rollback_altq(ticket); 1201 else 1202 pf_commit_altq(ticket); 1203 } 1204#endif 1205#endif /* ALTQ */ 1206 1207int 1208pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1209{ 1210 struct pf_ruleset *rs; 1211 struct pf_rule *rule; 1212 1213 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1214 return (EINVAL); 1215 rs = pf_find_or_create_ruleset(anchor); 1216 if (rs == NULL) 1217 return (EINVAL); 1218 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1219 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1220 rs->rules[rs_num].inactive.rcount--; 1221 } 1222 *ticket = ++rs->rules[rs_num].inactive.ticket; 1223 rs->rules[rs_num].inactive.open = 1; 1224 return (0); 1225} 1226 1227int 1228pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1229{ 1230 struct pf_ruleset *rs; 1231 struct pf_rule *rule; 1232 1233 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1234 return (EINVAL); 1235 rs = pf_find_ruleset(anchor); 1236 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1237 rs->rules[rs_num].inactive.ticket != ticket) 1238 return (0); 1239 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1240 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 1241 rs->rules[rs_num].inactive.rcount--; 1242 } 1243 rs->rules[rs_num].inactive.open = 0; 1244 return (0); 1245} 1246 1247#define PF_MD5_UPD(st, elm) \ 1248 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1249 1250#define PF_MD5_UPD_STR(st, elm) \ 1251 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1252 1253#define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1254 (stor) = htonl((st)->elm); \ 1255 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1256} while (0) 1257 1258#define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1259 (stor) = htons((st)->elm); \ 1260 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1261} while (0) 1262 1263void 1264pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1265{ 1266 PF_MD5_UPD(pfr, addr.type); 1267 switch (pfr->addr.type) { 1268 case PF_ADDR_DYNIFTL: 1269 PF_MD5_UPD(pfr, addr.v.ifname); 1270 PF_MD5_UPD(pfr, addr.iflags); 1271 break; 1272 case PF_ADDR_TABLE: 1273 PF_MD5_UPD(pfr, addr.v.tblname); 1274 break; 1275 case PF_ADDR_ADDRMASK: 1276 /* XXX ignore af? */ 1277 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1278 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1279 break; 1280 case PF_ADDR_RTLABEL: 1281 PF_MD5_UPD(pfr, addr.v.rtlabelname); 1282 break; 1283 } 1284 1285 PF_MD5_UPD(pfr, port[0]); 1286 PF_MD5_UPD(pfr, port[1]); 1287 PF_MD5_UPD(pfr, neg); 1288 PF_MD5_UPD(pfr, port_op); 1289} 1290 1291void 1292pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 1293{ 1294 u_int16_t x; 1295 u_int32_t y; 1296 1297 pf_hash_rule_addr(ctx, &rule->src); 1298 pf_hash_rule_addr(ctx, &rule->dst); 1299 PF_MD5_UPD_STR(rule, label); 1300 PF_MD5_UPD_STR(rule, ifname); 1301 PF_MD5_UPD_STR(rule, match_tagname); 1302 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1303 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1304 PF_MD5_UPD_HTONL(rule, prob, y); 1305 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1306 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1307 PF_MD5_UPD(rule, uid.op); 1308 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1309 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1310 PF_MD5_UPD(rule, gid.op); 1311 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1312 PF_MD5_UPD(rule, action); 1313 PF_MD5_UPD(rule, direction); 1314 PF_MD5_UPD(rule, af); 1315 PF_MD5_UPD(rule, quick); 1316 PF_MD5_UPD(rule, ifnot); 1317 PF_MD5_UPD(rule, match_tag_not); 1318 PF_MD5_UPD(rule, natpass); 1319 PF_MD5_UPD(rule, keep_state); 1320 PF_MD5_UPD(rule, proto); 1321 PF_MD5_UPD(rule, type); 1322 PF_MD5_UPD(rule, code); 1323 PF_MD5_UPD(rule, flags); 1324 PF_MD5_UPD(rule, flagset); 1325 PF_MD5_UPD(rule, allow_opts); 1326 PF_MD5_UPD(rule, rt); 1327 PF_MD5_UPD(rule, tos); 1328} 1329 1330int 1331pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1332{ 1333 struct pf_ruleset *rs; 1334 struct pf_rule *rule, **old_array; 1335 struct pf_rulequeue *old_rules; 1336 int s, error; 1337 u_int32_t old_rcount; 1338 1339 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1340 return (EINVAL); 1341 rs = pf_find_ruleset(anchor); 1342 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1343 ticket != rs->rules[rs_num].inactive.ticket) 1344 return (EBUSY); 1345 1346 /* Calculate checksum for the main ruleset */ 1347 if (rs == &pf_main_ruleset) { 1348 error = pf_setup_pfsync_matching(rs); 1349 if (error != 0) 1350 return (error); 1351 } 1352 1353 /* Swap rules, keep the old. */ 1354 s = splsoftnet(); 1355 old_rules = rs->rules[rs_num].active.ptr; 1356 old_rcount = rs->rules[rs_num].active.rcount; 1357 old_array = rs->rules[rs_num].active.ptr_array; 1358 1359 rs->rules[rs_num].active.ptr = 1360 rs->rules[rs_num].inactive.ptr; 1361 rs->rules[rs_num].active.ptr_array = 1362 rs->rules[rs_num].inactive.ptr_array; 1363 rs->rules[rs_num].active.rcount = 1364 rs->rules[rs_num].inactive.rcount; 1365 rs->rules[rs_num].inactive.ptr = old_rules; 1366 rs->rules[rs_num].inactive.ptr_array = old_array; 1367 rs->rules[rs_num].inactive.rcount = old_rcount; 1368 1369 rs->rules[rs_num].active.ticket = 1370 rs->rules[rs_num].inactive.ticket; 1371 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1372 1373 1374 /* Purge the old rule list. */ 1375 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1376 pf_rm_rule(old_rules, rule); 1377 if (rs->rules[rs_num].inactive.ptr_array) 1378 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1379 rs->rules[rs_num].inactive.ptr_array = NULL; 1380 rs->rules[rs_num].inactive.rcount = 0; 1381 rs->rules[rs_num].inactive.open = 0; 1382 pf_remove_if_empty_ruleset(rs); 1383 splx(s); 1384 return (0); 1385} 1386 1387int 1388pf_setup_pfsync_matching(struct pf_ruleset *rs) 1389{ 1390 MD5_CTX ctx; 1391 struct pf_rule *rule; 1392 int rs_cnt; 1393 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1394 1395 MD5Init(&ctx); 1396 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1397 /* XXX PF_RULESET_SCRUB as well? */ 1398 if (rs_cnt == PF_RULESET_SCRUB) 1399 continue; 1400 1401 if (rs->rules[rs_cnt].inactive.ptr_array) 1402 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1403 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1404 1405 if (rs->rules[rs_cnt].inactive.rcount) { 1406 rs->rules[rs_cnt].inactive.ptr_array = 1407 malloc(sizeof(caddr_t) * 1408 rs->rules[rs_cnt].inactive.rcount, 1409 M_TEMP, M_NOWAIT); 1410 1411 if (!rs->rules[rs_cnt].inactive.ptr_array) 1412 return (ENOMEM); 1413 } 1414 1415 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1416 entries) { 1417 pf_hash_rule(&ctx, rule); 1418 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1419 } 1420 } 1421 1422 MD5Final(digest, &ctx); 1423#ifdef __FreeBSD__ 1424 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1425#else 1426 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1427#endif 1428 return (0); 1429} 1430 1431int 1432pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 1433 sa_family_t af) 1434{ 1435 if (pfi_dynaddr_setup(addr, af) || 1436 pf_tbladdr_setup(ruleset, addr)) 1437 return (EINVAL); 1438 1439 return (0); 1440} 1441 1442void 1443pf_addr_copyout(struct pf_addr_wrap *addr) 1444{ 1445 pfi_dynaddr_copyout(addr); 1446 pf_tbladdr_copyout(addr); 1447 pf_rtlabel_copyout(addr); 1448} 1449 1450int 1451#ifdef __FreeBSD__ 1452pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1453#else 1454pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1455#endif 1456{ 1457 struct pf_pooladdr *pa = NULL; 1458 struct pf_pool *pool = NULL; 1459#ifndef __FreeBSD__ 1460 int s; 1461#endif 1462 int error = 0; 1463 1464 CURVNET_SET(TD_TO_VNET(td)); 1465 1466 /* XXX keep in sync with switch() below */ 1467#ifdef __FreeBSD__ 1468 if (securelevel_gt(td->td_ucred, 2)) 1469#else 1470 if (securelevel > 1) 1471#endif 1472 switch (cmd) { 1473 case DIOCGETRULES: 1474 case DIOCGETRULE: 1475 case DIOCGETADDRS: 1476 case DIOCGETADDR: 1477 case DIOCGETSTATE: 1478 case DIOCSETSTATUSIF: 1479 case DIOCGETSTATUS: 1480 case DIOCCLRSTATUS: 1481 case DIOCNATLOOK: 1482 case DIOCSETDEBUG: 1483 case DIOCGETSTATES: 1484 case DIOCGETTIMEOUT: 1485 case DIOCCLRRULECTRS: 1486 case DIOCGETLIMIT: 1487 case DIOCGETALTQS: 1488 case DIOCGETALTQ: 1489 case DIOCGETQSTATS: 1490 case DIOCGETRULESETS: 1491 case DIOCGETRULESET: 1492 case DIOCRGETTABLES: 1493 case DIOCRGETTSTATS: 1494 case DIOCRCLRTSTATS: 1495 case DIOCRCLRADDRS: 1496 case DIOCRADDADDRS: 1497 case DIOCRDELADDRS: 1498 case DIOCRSETADDRS: 1499 case DIOCRGETADDRS: 1500 case DIOCRGETASTATS: 1501 case DIOCRCLRASTATS: 1502 case DIOCRTSTADDRS: 1503 case DIOCOSFPGET: 1504 case DIOCGETSRCNODES: 1505 case DIOCCLRSRCNODES: 1506 case DIOCIGETIFACES: 1507#ifdef __FreeBSD__ 1508 case DIOCGIFSPEED: 1509#endif 1510 case DIOCSETIFFLAG: 1511 case DIOCCLRIFFLAG: 1512 break; 1513 case DIOCRCLRTABLES: 1514 case DIOCRADDTABLES: 1515 case DIOCRDELTABLES: 1516 case DIOCRSETTFLAGS: 1517 if (((struct pfioc_table *)addr)->pfrio_flags & 1518 PFR_FLAG_DUMMY) 1519 break; /* dummy operation ok */ 1520 return (EPERM); 1521 default: 1522 return (EPERM); 1523 } 1524 1525 if (!(flags & FWRITE)) 1526 switch (cmd) { 1527 case DIOCGETRULES: 1528 case DIOCGETADDRS: 1529 case DIOCGETADDR: 1530 case DIOCGETSTATE: 1531 case DIOCGETSTATUS: 1532 case DIOCGETSTATES: 1533 case DIOCGETTIMEOUT: 1534 case DIOCGETLIMIT: 1535 case DIOCGETALTQS: 1536 case DIOCGETALTQ: 1537 case DIOCGETQSTATS: 1538 case DIOCGETRULESETS: 1539 case DIOCGETRULESET: 1540 case DIOCNATLOOK: 1541 case DIOCRGETTABLES: 1542 case DIOCRGETTSTATS: 1543 case DIOCRGETADDRS: 1544 case DIOCRGETASTATS: 1545 case DIOCRTSTADDRS: 1546 case DIOCOSFPGET: 1547 case DIOCGETSRCNODES: 1548 case DIOCIGETIFACES: 1549#ifdef __FreeBSD__ 1550 case DIOCGIFSPEED: 1551#endif 1552 break; 1553 case DIOCRCLRTABLES: 1554 case DIOCRADDTABLES: 1555 case DIOCRDELTABLES: 1556 case DIOCRCLRTSTATS: 1557 case DIOCRCLRADDRS: 1558 case DIOCRADDADDRS: 1559 case DIOCRDELADDRS: 1560 case DIOCRSETADDRS: 1561 case DIOCRSETTFLAGS: 1562 if (((struct pfioc_table *)addr)->pfrio_flags & 1563 PFR_FLAG_DUMMY) { 1564 flags |= FWRITE; /* need write lock for dummy */ 1565 break; /* dummy operation ok */ 1566 } 1567 return (EACCES); 1568 case DIOCGETRULE: 1569 if (((struct pfioc_rule *)addr)->action == 1570 PF_GET_CLR_CNTR) 1571 return (EACCES); 1572 break; 1573 default: 1574 return (EACCES); 1575 } 1576 1577 if (flags & FWRITE) 1578#ifdef __FreeBSD__ 1579 sx_xlock(&V_pf_consistency_lock); 1580 else 1581 sx_slock(&V_pf_consistency_lock); 1582#else 1583 rw_enter_write(&pf_consistency_lock); 1584 else 1585 rw_enter_read(&pf_consistency_lock); 1586#endif 1587 1588#ifdef __FreeBSD__ 1589 PF_LOCK(); 1590#else 1591 s = splsoftnet(); 1592#endif 1593 switch (cmd) { 1594 1595 case DIOCSTART: 1596#ifdef __FreeBSD__ 1597 if (V_pf_status.running) 1598#else 1599 if (pf_status.running) 1600#endif 1601 error = EEXIST; 1602 else { 1603#ifdef __FreeBSD__ 1604 PF_UNLOCK(); 1605 error = hook_pf(); 1606 PF_LOCK(); 1607 if (error) { 1608 DPFPRINTF(PF_DEBUG_MISC, 1609 ("pf: pfil registeration fail\n")); 1610 break; 1611 } 1612 V_pf_status.running = 1; 1613 V_pf_status.since = time_second; 1614 1615 if (V_pf_status.stateid == 0) { 1616 V_pf_status.stateid = time_second; 1617 V_pf_status.stateid = V_pf_status.stateid << 32; 1618 } 1619#else 1620 pf_status.running = 1; 1621 pf_status.since = time_second; 1622 1623 if (pf_status.stateid == 0) { 1624 pf_status.stateid = time_second; 1625 pf_status.stateid = pf_status.stateid << 32; 1626 } 1627#endif 1628 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1629 } 1630 break; 1631 1632 case DIOCSTOP: 1633#ifdef __FreeBSD__ 1634 if (!V_pf_status.running) 1635 error = ENOENT; 1636 else { 1637 V_pf_status.running = 0; 1638 PF_UNLOCK(); 1639 error = dehook_pf(); 1640 PF_LOCK(); 1641 if (error) { 1642 V_pf_status.running = 1; 1643 DPFPRINTF(PF_DEBUG_MISC, 1644 ("pf: pfil unregisteration failed\n")); 1645 } 1646 V_pf_status.since = time_second; 1647#else 1648 if (!pf_status.running) 1649 error = ENOENT; 1650 else { 1651 pf_status.running = 0; 1652 pf_status.since = time_second; 1653#endif 1654 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1655 } 1656 break; 1657 1658 case DIOCADDRULE: { 1659 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1660 struct pf_ruleset *ruleset; 1661 struct pf_rule *rule, *tail; 1662 struct pf_pooladdr *pa; 1663 int rs_num; 1664 1665 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1666 ruleset = pf_find_ruleset(pr->anchor); 1667 if (ruleset == NULL) { 1668 error = EINVAL; 1669 break; 1670 } 1671 rs_num = pf_get_ruleset_number(pr->rule.action); 1672 if (rs_num >= PF_RULESET_MAX) { 1673 error = EINVAL; 1674 break; 1675 } 1676 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1677 error = EINVAL; 1678 break; 1679 } 1680 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1681#ifdef __FreeBSD__ 1682 DPFPRINTF(PF_DEBUG_MISC, 1683 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num, 1684 ruleset->rules[rs_num].inactive.ticket)); 1685#endif 1686 error = EBUSY; 1687 break; 1688 } 1689#ifdef __FreeBSD__ 1690 if (pr->pool_ticket != V_ticket_pabuf) { 1691 DPFPRINTF(PF_DEBUG_MISC, 1692 ("pool_ticket: %d != %d\n", pr->pool_ticket, 1693 V_ticket_pabuf)); 1694#else 1695 if (pr->pool_ticket != ticket_pabuf) { 1696#endif 1697 error = EBUSY; 1698 break; 1699 } 1700#ifdef __FreeBSD__ 1701 rule = pool_get(&V_pf_rule_pl, PR_NOWAIT); 1702#else 1703 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL); 1704#endif 1705 if (rule == NULL) { 1706 error = ENOMEM; 1707 break; 1708 } 1709 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1710#ifdef __FreeBSD__ 1711 rule->cuid = td->td_ucred->cr_ruid; 1712 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1713#else 1714 rule->cuid = p->p_cred->p_ruid; 1715 rule->cpid = p->p_pid; 1716#endif 1717 rule->anchor = NULL; 1718 rule->kif = NULL; 1719 TAILQ_INIT(&rule->rpool.list); 1720 /* initialize refcounting */ 1721 rule->states_cur = 0; 1722 rule->src_nodes = 0; 1723 rule->entries.tqe_prev = NULL; 1724#ifndef INET 1725 if (rule->af == AF_INET) { 1726#ifdef __FreeBSD__ 1727 pool_put(&V_pf_rule_pl, rule); 1728#else 1729 pool_put(&pf_rule_pl, rule); 1730#endif 1731 error = EAFNOSUPPORT; 1732 break; 1733 } 1734#endif /* INET */ 1735#ifndef INET6 1736 if (rule->af == AF_INET6) { 1737#ifdef __FreeBSD__ 1738 pool_put(&V_pf_rule_pl, rule); 1739#else 1740 pool_put(&pf_rule_pl, rule); 1741#endif 1742 error = EAFNOSUPPORT; 1743 break; 1744 } 1745#endif /* INET6 */ 1746 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1747 pf_rulequeue); 1748 if (tail) 1749 rule->nr = tail->nr + 1; 1750 else 1751 rule->nr = 0; 1752 if (rule->ifname[0]) { 1753 rule->kif = pfi_kif_get(rule->ifname); 1754 if (rule->kif == NULL) { 1755#ifdef __FreeBSD__ 1756 pool_put(&V_pf_rule_pl, rule); 1757#else 1758 pool_put(&pf_rule_pl, rule); 1759#endif 1760 error = EINVAL; 1761 break; 1762 } 1763 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1764 } 1765 1766#ifdef __FreeBSD__ /* ROUTING */ 1767 if (rule->rtableid > 0 && rule->rtableid > rt_numfibs) 1768#else 1769 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1770#endif 1771 error = EBUSY; 1772 1773#ifdef ALTQ 1774 /* set queue IDs */ 1775 if (rule->qname[0] != 0) { 1776 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1777 error = EBUSY; 1778 else if (rule->pqname[0] != 0) { 1779 if ((rule->pqid = 1780 pf_qname2qid(rule->pqname)) == 0) 1781 error = EBUSY; 1782 } else 1783 rule->pqid = rule->qid; 1784 } 1785#endif 1786 if (rule->tagname[0]) 1787 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1788 error = EBUSY; 1789 if (rule->match_tagname[0]) 1790 if ((rule->match_tag = 1791 pf_tagname2tag(rule->match_tagname)) == 0) 1792 error = EBUSY; 1793 if (rule->rt && !rule->direction) 1794 error = EINVAL; 1795#if NPFLOG > 0 1796 if (!rule->log) 1797 rule->logif = 0; 1798 if (rule->logif >= PFLOGIFS_MAX) 1799 error = EINVAL; 1800#endif 1801 if (pf_rtlabel_add(&rule->src.addr) || 1802 pf_rtlabel_add(&rule->dst.addr)) 1803 error = EBUSY; 1804 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1805 error = EINVAL; 1806 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1807 error = EINVAL; 1808 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1809 error = EINVAL; 1810#ifdef __FreeBSD__ 1811 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 1812#else 1813 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1814#endif 1815 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1816 error = EINVAL; 1817 1818 if (rule->overload_tblname[0]) { 1819 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1820 rule->overload_tblname, 0)) == NULL) 1821 error = EINVAL; 1822 else 1823 rule->overload_tbl->pfrkt_flags |= 1824 PFR_TFLAG_ACTIVE; 1825 } 1826 1827#ifdef __FreeBSD__ 1828 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list); 1829#else 1830 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1831#endif 1832 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1833 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1834 (rule->rt > PF_FASTROUTE)) && 1835 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1836 error = EINVAL; 1837 1838 if (error) { 1839 pf_rm_rule(NULL, rule); 1840 break; 1841 } 1842 1843#ifdef __FreeBSD__ 1844 if (!V_debug_pfugidhack && (rule->uid.op || rule->gid.op || 1845 rule->log & PF_LOG_SOCKET_LOOKUP)) { 1846 DPFPRINTF(PF_DEBUG_MISC, 1847 ("pf: debug.pfugidhack enabled\n")); 1848 V_debug_pfugidhack = 1; 1849 } 1850#endif 1851 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1852 rule->evaluations = rule->packets[0] = rule->packets[1] = 1853 rule->bytes[0] = rule->bytes[1] = 0; 1854 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1855 rule, entries); 1856 ruleset->rules[rs_num].inactive.rcount++; 1857 break; 1858 } 1859 1860 case DIOCGETRULES: { 1861 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1862 struct pf_ruleset *ruleset; 1863 struct pf_rule *tail; 1864 int rs_num; 1865 1866 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1867 ruleset = pf_find_ruleset(pr->anchor); 1868 if (ruleset == NULL) { 1869 error = EINVAL; 1870 break; 1871 } 1872 rs_num = pf_get_ruleset_number(pr->rule.action); 1873 if (rs_num >= PF_RULESET_MAX) { 1874 error = EINVAL; 1875 break; 1876 } 1877 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1878 pf_rulequeue); 1879 if (tail) 1880 pr->nr = tail->nr + 1; 1881 else 1882 pr->nr = 0; 1883 pr->ticket = ruleset->rules[rs_num].active.ticket; 1884 break; 1885 } 1886 1887 case DIOCGETRULE: { 1888 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1889 struct pf_ruleset *ruleset; 1890 struct pf_rule *rule; 1891 int rs_num, i; 1892 1893 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1894 ruleset = pf_find_ruleset(pr->anchor); 1895 if (ruleset == NULL) { 1896 error = EINVAL; 1897 break; 1898 } 1899 rs_num = pf_get_ruleset_number(pr->rule.action); 1900 if (rs_num >= PF_RULESET_MAX) { 1901 error = EINVAL; 1902 break; 1903 } 1904 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1905 error = EBUSY; 1906 break; 1907 } 1908 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1909 while ((rule != NULL) && (rule->nr != pr->nr)) 1910 rule = TAILQ_NEXT(rule, entries); 1911 if (rule == NULL) { 1912 error = EBUSY; 1913 break; 1914 } 1915 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1916 if (pf_anchor_copyout(ruleset, rule, pr)) { 1917 error = EBUSY; 1918 break; 1919 } 1920 pf_addr_copyout(&pr->rule.src.addr); 1921 pf_addr_copyout(&pr->rule.dst.addr); 1922 for (i = 0; i < PF_SKIP_COUNT; ++i) 1923 if (rule->skip[i].ptr == NULL) 1924 pr->rule.skip[i].nr = -1; 1925 else 1926 pr->rule.skip[i].nr = 1927 rule->skip[i].ptr->nr; 1928 1929 if (pr->action == PF_GET_CLR_CNTR) { 1930 rule->evaluations = 0; 1931 rule->packets[0] = rule->packets[1] = 0; 1932 rule->bytes[0] = rule->bytes[1] = 0; 1933 rule->states_tot = 0; 1934 } 1935 break; 1936 } 1937 1938 case DIOCCHANGERULE: { 1939 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1940 struct pf_ruleset *ruleset; 1941 struct pf_rule *oldrule = NULL, *newrule = NULL; 1942 u_int32_t nr = 0; 1943 int rs_num; 1944 1945 if (!(pcr->action == PF_CHANGE_REMOVE || 1946 pcr->action == PF_CHANGE_GET_TICKET) && 1947#ifdef __FreeBSD__ 1948 pcr->pool_ticket != V_ticket_pabuf) { 1949#else 1950 pcr->pool_ticket != ticket_pabuf) { 1951#endif 1952 error = EBUSY; 1953 break; 1954 } 1955 1956 if (pcr->action < PF_CHANGE_ADD_HEAD || 1957 pcr->action > PF_CHANGE_GET_TICKET) { 1958 error = EINVAL; 1959 break; 1960 } 1961 ruleset = pf_find_ruleset(pcr->anchor); 1962 if (ruleset == NULL) { 1963 error = EINVAL; 1964 break; 1965 } 1966 rs_num = pf_get_ruleset_number(pcr->rule.action); 1967 if (rs_num >= PF_RULESET_MAX) { 1968 error = EINVAL; 1969 break; 1970 } 1971 1972 if (pcr->action == PF_CHANGE_GET_TICKET) { 1973 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1974 break; 1975 } else { 1976 if (pcr->ticket != 1977 ruleset->rules[rs_num].active.ticket) { 1978 error = EINVAL; 1979 break; 1980 } 1981 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1982 error = EINVAL; 1983 break; 1984 } 1985 } 1986 1987 if (pcr->action != PF_CHANGE_REMOVE) { 1988#ifdef __FreeBSD__ 1989 newrule = pool_get(&V_pf_rule_pl, PR_NOWAIT); 1990#else 1991 newrule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL); 1992#endif 1993 if (newrule == NULL) { 1994 error = ENOMEM; 1995 break; 1996 } 1997 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1998#ifdef __FreeBSD__ 1999 newrule->cuid = td->td_ucred->cr_ruid; 2000 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2001#else 2002 newrule->cuid = p->p_cred->p_ruid; 2003 newrule->cpid = p->p_pid; 2004#endif 2005 TAILQ_INIT(&newrule->rpool.list); 2006 /* initialize refcounting */ 2007 newrule->states_cur = 0; 2008 newrule->entries.tqe_prev = NULL; 2009#ifndef INET 2010 if (newrule->af == AF_INET) { 2011#ifdef __FreeBSD__ 2012 pool_put(&V_pf_rule_pl, newrule); 2013#else 2014 pool_put(&pf_rule_pl, newrule); 2015#endif 2016 error = EAFNOSUPPORT; 2017 break; 2018 } 2019#endif /* INET */ 2020#ifndef INET6 2021 if (newrule->af == AF_INET6) { 2022#ifdef __FreeBSD__ 2023 pool_put(&V_pf_rule_pl, newrule); 2024#else 2025 pool_put(&pf_rule_pl, newrule); 2026#endif 2027 error = EAFNOSUPPORT; 2028 break; 2029 } 2030#endif /* INET6 */ 2031 if (newrule->ifname[0]) { 2032 newrule->kif = pfi_kif_get(newrule->ifname); 2033 if (newrule->kif == NULL) { 2034#ifdef __FreeBSD__ 2035 pool_put(&V_pf_rule_pl, newrule); 2036#else 2037 pool_put(&pf_rule_pl, newrule); 2038#endif 2039 error = EINVAL; 2040 break; 2041 } 2042 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 2043 } else 2044 newrule->kif = NULL; 2045 2046 if (newrule->rtableid > 0 && 2047#ifdef __FreeBSD__ /* ROUTING */ 2048 newrule->rtableid > rt_numfibs) 2049#else 2050 !rtable_exists(newrule->rtableid)) 2051#endif 2052 error = EBUSY; 2053 2054#ifdef ALTQ 2055 /* set queue IDs */ 2056 if (newrule->qname[0] != 0) { 2057 if ((newrule->qid = 2058 pf_qname2qid(newrule->qname)) == 0) 2059 error = EBUSY; 2060 else if (newrule->pqname[0] != 0) { 2061 if ((newrule->pqid = 2062 pf_qname2qid(newrule->pqname)) == 0) 2063 error = EBUSY; 2064 } else 2065 newrule->pqid = newrule->qid; 2066 } 2067#endif /* ALTQ */ 2068 if (newrule->tagname[0]) 2069 if ((newrule->tag = 2070 pf_tagname2tag(newrule->tagname)) == 0) 2071 error = EBUSY; 2072 if (newrule->match_tagname[0]) 2073 if ((newrule->match_tag = pf_tagname2tag( 2074 newrule->match_tagname)) == 0) 2075 error = EBUSY; 2076 if (newrule->rt && !newrule->direction) 2077 error = EINVAL; 2078#if NPFLOG > 0 2079 if (!newrule->log) 2080 newrule->logif = 0; 2081 if (newrule->logif >= PFLOGIFS_MAX) 2082 error = EINVAL; 2083#endif 2084 if (pf_rtlabel_add(&newrule->src.addr) || 2085 pf_rtlabel_add(&newrule->dst.addr)) 2086 error = EBUSY; 2087 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 2088 error = EINVAL; 2089 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 2090 error = EINVAL; 2091 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 2092 error = EINVAL; 2093#ifdef __FreeBSD__ 2094 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2095#else 2096 TAILQ_FOREACH(pa, &pf_pabuf, entries) 2097#endif 2098 if (pf_tbladdr_setup(ruleset, &pa->addr)) 2099 error = EINVAL; 2100 2101 if (newrule->overload_tblname[0]) { 2102 if ((newrule->overload_tbl = pfr_attach_table( 2103 ruleset, newrule->overload_tblname, 0)) == 2104 NULL) 2105 error = EINVAL; 2106 else 2107 newrule->overload_tbl->pfrkt_flags |= 2108 PFR_TFLAG_ACTIVE; 2109 } 2110 2111#ifdef __FreeBSD__ 2112 pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list); 2113#else 2114 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 2115#endif 2116 if (((((newrule->action == PF_NAT) || 2117 (newrule->action == PF_RDR) || 2118 (newrule->action == PF_BINAT) || 2119 (newrule->rt > PF_FASTROUTE)) && 2120 !newrule->anchor)) && 2121 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 2122 error = EINVAL; 2123 2124 if (error) { 2125 pf_rm_rule(NULL, newrule); 2126 break; 2127 } 2128 2129#ifdef __FreeBSD__ 2130 if (!V_debug_pfugidhack && (newrule->uid.op || 2131 newrule->gid.op || 2132 newrule->log & PF_LOG_SOCKET_LOOKUP)) { 2133 DPFPRINTF(PF_DEBUG_MISC, 2134 ("pf: debug.pfugidhack enabled\n")); 2135 V_debug_pfugidhack = 1; 2136 } 2137#endif 2138 2139 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 2140 newrule->evaluations = 0; 2141 newrule->packets[0] = newrule->packets[1] = 0; 2142 newrule->bytes[0] = newrule->bytes[1] = 0; 2143 } 2144#ifdef __FreeBSD__ 2145 pf_empty_pool(&V_pf_pabuf); 2146#else 2147 pf_empty_pool(&pf_pabuf); 2148#endif 2149 2150 if (pcr->action == PF_CHANGE_ADD_HEAD) 2151 oldrule = TAILQ_FIRST( 2152 ruleset->rules[rs_num].active.ptr); 2153 else if (pcr->action == PF_CHANGE_ADD_TAIL) 2154 oldrule = TAILQ_LAST( 2155 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 2156 else { 2157 oldrule = TAILQ_FIRST( 2158 ruleset->rules[rs_num].active.ptr); 2159 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 2160 oldrule = TAILQ_NEXT(oldrule, entries); 2161 if (oldrule == NULL) { 2162 if (newrule != NULL) 2163 pf_rm_rule(NULL, newrule); 2164 error = EINVAL; 2165 break; 2166 } 2167 } 2168 2169 if (pcr->action == PF_CHANGE_REMOVE) { 2170 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 2171 ruleset->rules[rs_num].active.rcount--; 2172 } else { 2173 if (oldrule == NULL) 2174 TAILQ_INSERT_TAIL( 2175 ruleset->rules[rs_num].active.ptr, 2176 newrule, entries); 2177 else if (pcr->action == PF_CHANGE_ADD_HEAD || 2178 pcr->action == PF_CHANGE_ADD_BEFORE) 2179 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 2180 else 2181 TAILQ_INSERT_AFTER( 2182 ruleset->rules[rs_num].active.ptr, 2183 oldrule, newrule, entries); 2184 ruleset->rules[rs_num].active.rcount++; 2185 } 2186 2187 nr = 0; 2188 TAILQ_FOREACH(oldrule, 2189 ruleset->rules[rs_num].active.ptr, entries) 2190 oldrule->nr = nr++; 2191 2192 ruleset->rules[rs_num].active.ticket++; 2193 2194 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 2195 pf_remove_if_empty_ruleset(ruleset); 2196 2197 break; 2198 } 2199 2200 case DIOCCLRSTATES: { 2201 struct pf_state *s, *nexts; 2202 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 2203 u_int killed = 0; 2204 2205#ifdef __FreeBSD__ 2206 for (s = RB_MIN(pf_state_tree_id, &V_tree_id); s; s = nexts) { 2207 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, s); 2208#else 2209 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 2210 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 2211#endif 2212 2213 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 2214 s->kif->pfik_name)) { 2215#if NPFSYNC > 0 2216 /* don't send out individual delete messages */ 2217 SET(s->state_flags, PFSTATE_NOSYNC); 2218#endif 2219 pf_unlink_state(s); 2220 killed++; 2221 } 2222 } 2223 psk->psk_killed = killed; 2224#if NPFSYNC > 0 2225#ifdef __FreeBSD__ 2226 if (pfsync_clear_states_ptr != NULL) 2227 pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname); 2228#else 2229 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 2230#endif 2231#endif 2232 break; 2233 } 2234 2235 case DIOCKILLSTATES: { 2236 struct pf_state *s, *nexts; 2237 struct pf_state_key *sk; 2238 struct pf_addr *srcaddr, *dstaddr; 2239 u_int16_t srcport, dstport; 2240 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 2241 u_int killed = 0; 2242 2243 if (psk->psk_pfcmp.id) { 2244 if (psk->psk_pfcmp.creatorid == 0) 2245#ifdef __FreeBSD__ 2246 psk->psk_pfcmp.creatorid = V_pf_status.hostid; 2247#else 2248 psk->psk_pfcmp.creatorid = pf_status.hostid; 2249#endif 2250 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) { 2251 pf_unlink_state(s); 2252 psk->psk_killed = 1; 2253 } 2254 break; 2255 } 2256 2257#ifdef __FreeBSD__ 2258 for (s = RB_MIN(pf_state_tree_id, &V_tree_id); s; 2259 s = nexts) { 2260 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, s); 2261#else 2262 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 2263 s = nexts) { 2264 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 2265#endif 2266 sk = s->key[PF_SK_WIRE]; 2267 2268 if (s->direction == PF_OUT) { 2269 srcaddr = &sk->addr[1]; 2270 dstaddr = &sk->addr[0]; 2271 srcport = sk->port[0]; 2272 dstport = sk->port[0]; 2273 } else { 2274 srcaddr = &sk->addr[0]; 2275 dstaddr = &sk->addr[1]; 2276 srcport = sk->port[0]; 2277 dstport = sk->port[0]; 2278 } 2279 if ((!psk->psk_af || sk->af == psk->psk_af) 2280 && (!psk->psk_proto || psk->psk_proto == 2281 sk->proto) && 2282 PF_MATCHA(psk->psk_src.neg, 2283 &psk->psk_src.addr.v.a.addr, 2284 &psk->psk_src.addr.v.a.mask, 2285 srcaddr, sk->af) && 2286 PF_MATCHA(psk->psk_dst.neg, 2287 &psk->psk_dst.addr.v.a.addr, 2288 &psk->psk_dst.addr.v.a.mask, 2289 dstaddr, sk->af) && 2290 (psk->psk_src.port_op == 0 || 2291 pf_match_port(psk->psk_src.port_op, 2292 psk->psk_src.port[0], psk->psk_src.port[1], 2293 srcport)) && 2294 (psk->psk_dst.port_op == 0 || 2295 pf_match_port(psk->psk_dst.port_op, 2296 psk->psk_dst.port[0], psk->psk_dst.port[1], 2297 dstport)) && 2298 (!psk->psk_label[0] || (s->rule.ptr->label[0] && 2299 !strcmp(psk->psk_label, s->rule.ptr->label))) && 2300 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 2301 s->kif->pfik_name))) { 2302 pf_unlink_state(s); 2303 killed++; 2304 } 2305 } 2306 psk->psk_killed = killed; 2307 break; 2308 } 2309 2310 case DIOCADDSTATE: { 2311 struct pfioc_state *ps = (struct pfioc_state *)addr; 2312 struct pfsync_state *sp = &ps->state; 2313 2314 if (sp->timeout >= PFTM_MAX && 2315 sp->timeout != PFTM_UNTIL_PACKET) { 2316 error = EINVAL; 2317 break; 2318 } 2319#ifdef __FreeBSD__ 2320 if (pfsync_state_import_ptr != NULL) 2321 error = pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 2322#else 2323 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL); 2324#endif 2325 break; 2326 } 2327 2328 case DIOCGETSTATE: { 2329 struct pfioc_state *ps = (struct pfioc_state *)addr; 2330 struct pf_state *s; 2331 struct pf_state_cmp id_key; 2332 2333 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id)); 2334 id_key.creatorid = ps->state.creatorid; 2335 2336 s = pf_find_state_byid(&id_key); 2337 if (s == NULL) { 2338 error = ENOENT; 2339 break; 2340 } 2341 2342 pfsync_state_export(&ps->state, s); 2343 break; 2344 } 2345 2346 case DIOCGETSTATES: { 2347 struct pfioc_states *ps = (struct pfioc_states *)addr; 2348 struct pf_state *state; 2349 struct pfsync_state *p, *pstore; 2350 u_int32_t nr = 0; 2351 2352 if (ps->ps_len == 0) { 2353#ifdef __FreeBSD__ 2354 nr = V_pf_status.states; 2355#else 2356 nr = pf_status.states; 2357#endif 2358 ps->ps_len = sizeof(struct pfsync_state) * nr; 2359 break; 2360 } 2361 2362#ifdef __FreeBSD__ 2363 PF_UNLOCK(); 2364#endif 2365 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2366#ifdef __FreeBSD__ 2367 PF_LOCK(); 2368#endif 2369 2370 p = ps->ps_states; 2371 2372#ifdef __FreeBSD__ 2373 state = TAILQ_FIRST(&V_state_list); 2374#else 2375 state = TAILQ_FIRST(&state_list); 2376#endif 2377 while (state) { 2378 if (state->timeout != PFTM_UNLINKED) { 2379 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 2380 break; 2381 pfsync_state_export(pstore, state); 2382#ifdef __FreeBSD__ 2383 PF_COPYOUT(pstore, p, sizeof(*p), error); 2384#else 2385 error = copyout(pstore, p, sizeof(*p)); 2386#endif 2387 if (error) { 2388 free(pstore, M_TEMP); 2389 goto fail; 2390 } 2391 p++; 2392 nr++; 2393 } 2394 state = TAILQ_NEXT(state, entry_list); 2395 } 2396 2397 ps->ps_len = sizeof(struct pfsync_state) * nr; 2398 2399 free(pstore, M_TEMP); 2400 break; 2401 } 2402 2403 case DIOCGETSTATUS: { 2404 struct pf_status *s = (struct pf_status *)addr; 2405#ifdef __FreeBSD__ 2406 bcopy(&V_pf_status, s, sizeof(struct pf_status)); 2407#else 2408 bcopy(&pf_status, s, sizeof(struct pf_status)); 2409#endif 2410 pfi_update_status(s->ifname, s); 2411 break; 2412 } 2413 2414 case DIOCSETSTATUSIF: { 2415 struct pfioc_if *pi = (struct pfioc_if *)addr; 2416 2417 if (pi->ifname[0] == 0) { 2418#ifdef __FreeBSD__ 2419 bzero(V_pf_status.ifname, IFNAMSIZ); 2420#else 2421 bzero(pf_status.ifname, IFNAMSIZ); 2422#endif 2423 break; 2424 } 2425#ifdef __FreeBSD__ 2426 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 2427#else 2428 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2429#endif 2430 break; 2431 } 2432 2433 case DIOCCLRSTATUS: { 2434#ifdef __FreeBSD__ 2435 bzero(V_pf_status.counters, sizeof(V_pf_status.counters)); 2436 bzero(V_pf_status.fcounters, sizeof(V_pf_status.fcounters)); 2437 bzero(V_pf_status.scounters, sizeof(V_pf_status.scounters)); 2438 V_pf_status.since = time_second; 2439 if (*V_pf_status.ifname) 2440 pfi_update_status(V_pf_status.ifname, NULL); 2441#else 2442 bzero(pf_status.counters, sizeof(pf_status.counters)); 2443 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2444 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2445 pf_status.since = time_second; 2446 if (*pf_status.ifname) 2447 pfi_update_status(pf_status.ifname, NULL); 2448#endif 2449 break; 2450 } 2451 2452 case DIOCNATLOOK: { 2453 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2454 struct pf_state_key *sk; 2455 struct pf_state *state; 2456 struct pf_state_key_cmp key; 2457 int m = 0, direction = pnl->direction; 2458 int sidx, didx; 2459 2460 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 2461 sidx = (direction == PF_IN) ? 1 : 0; 2462 didx = (direction == PF_IN) ? 0 : 1; 2463 2464 if (!pnl->proto || 2465 PF_AZERO(&pnl->saddr, pnl->af) || 2466 PF_AZERO(&pnl->daddr, pnl->af) || 2467 ((pnl->proto == IPPROTO_TCP || 2468 pnl->proto == IPPROTO_UDP) && 2469 (!pnl->dport || !pnl->sport))) 2470 error = EINVAL; 2471 else { 2472 key.af = pnl->af; 2473 key.proto = pnl->proto; 2474 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 2475 key.port[sidx] = pnl->sport; 2476 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 2477 key.port[didx] = pnl->dport; 2478 2479 state = pf_find_state_all(&key, direction, &m); 2480 2481 if (m > 1) 2482 error = E2BIG; /* more than one state */ 2483 else if (state != NULL) { 2484 sk = state->key[sidx]; 2485 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 2486 pnl->rsport = sk->port[sidx]; 2487 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 2488 pnl->rdport = sk->port[didx]; 2489 } else 2490 error = ENOENT; 2491 } 2492 break; 2493 } 2494 2495 case DIOCSETTIMEOUT: { 2496 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2497 int old; 2498 2499 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2500 pt->seconds < 0) { 2501 error = EINVAL; 2502 goto fail; 2503 } 2504#ifdef __FreeBSD__ 2505 old = V_pf_default_rule.timeout[pt->timeout]; 2506#else 2507 old = pf_default_rule.timeout[pt->timeout]; 2508#endif 2509 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2510 pt->seconds = 1; 2511#ifdef __FreeBSD__ 2512 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 2513#else 2514 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2515#endif 2516 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 2517 wakeup(pf_purge_thread); 2518 pt->seconds = old; 2519 break; 2520 } 2521 2522 case DIOCGETTIMEOUT: { 2523 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2524 2525 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2526 error = EINVAL; 2527 goto fail; 2528 } 2529#ifdef __FreeBSD__ 2530 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 2531#else 2532 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2533#endif 2534 break; 2535 } 2536 2537 case DIOCGETLIMIT: { 2538 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2539 2540 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2541 error = EINVAL; 2542 goto fail; 2543 } 2544#ifdef __FreeBSD__ 2545 pl->limit = V_pf_pool_limits[pl->index].limit; 2546#else 2547 pl->limit = pf_pool_limits[pl->index].limit; 2548#endif 2549 break; 2550 } 2551 2552 case DIOCSETLIMIT: { 2553 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2554 int old_limit; 2555 2556 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2557#ifdef __FreeBSD__ 2558 V_pf_pool_limits[pl->index].pp == NULL) { 2559#else 2560 pf_pool_limits[pl->index].pp == NULL) { 2561#endif 2562 error = EINVAL; 2563 goto fail; 2564 } 2565#ifdef __FreeBSD__ 2566 uma_zone_set_max(V_pf_pool_limits[pl->index].pp, pl->limit); 2567 old_limit = V_pf_pool_limits[pl->index].limit; 2568 V_pf_pool_limits[pl->index].limit = pl->limit; 2569 pl->limit = old_limit; 2570#else 2571 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2572 pl->limit, NULL, 0) != 0) { 2573 error = EBUSY; 2574 goto fail; 2575 } 2576 old_limit = pf_pool_limits[pl->index].limit; 2577 pf_pool_limits[pl->index].limit = pl->limit; 2578 pl->limit = old_limit; 2579#endif 2580 break; 2581 } 2582 2583 case DIOCSETDEBUG: { 2584 u_int32_t *level = (u_int32_t *)addr; 2585 2586#ifdef __FreeBSD__ 2587 V_pf_status.debug = *level; 2588#else 2589 pf_status.debug = *level; 2590#endif 2591 break; 2592 } 2593 2594 case DIOCCLRRULECTRS: { 2595 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2596 struct pf_ruleset *ruleset = &pf_main_ruleset; 2597 struct pf_rule *rule; 2598 2599 TAILQ_FOREACH(rule, 2600 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2601 rule->evaluations = 0; 2602 rule->packets[0] = rule->packets[1] = 0; 2603 rule->bytes[0] = rule->bytes[1] = 0; 2604 } 2605 break; 2606 } 2607 2608#ifdef __FreeBSD__ 2609 case DIOCGIFSPEED: { 2610 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2611 struct pf_ifspeed ps; 2612 struct ifnet *ifp; 2613 2614 if (psp->ifname[0] != 0) { 2615 /* Can we completely trust user-land? */ 2616 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2617 ifp = ifunit(ps.ifname); 2618 if (ifp != NULL) 2619 psp->baudrate = ifp->if_baudrate; 2620 else 2621 error = EINVAL; 2622 } else 2623 error = EINVAL; 2624 break; 2625 } 2626#endif /* __FreeBSD__ */ 2627 2628#ifdef ALTQ 2629 case DIOCSTARTALTQ: { 2630 struct pf_altq *altq; 2631 2632 /* enable all altq interfaces on active list */ 2633#ifdef __FreeBSD__ 2634 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 2635 if (altq->qname[0] == 0 && (altq->local_flags & 2636 PFALTQ_FLAG_IF_REMOVED) == 0) { 2637#else 2638 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2639 if (altq->qname[0] == 0) { 2640#endif 2641 error = pf_enable_altq(altq); 2642 if (error != 0) 2643 break; 2644 } 2645 } 2646 if (error == 0) 2647#ifdef __FreeBSD__ 2648 V_pf_altq_running = 1; 2649#else 2650 pf_altq_running = 1; 2651#endif 2652 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2653 break; 2654 } 2655 2656 case DIOCSTOPALTQ: { 2657 struct pf_altq *altq; 2658 2659 /* disable all altq interfaces on active list */ 2660#ifdef __FreeBSD__ 2661 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 2662 if (altq->qname[0] == 0 && (altq->local_flags & 2663 PFALTQ_FLAG_IF_REMOVED) == 0) { 2664#else 2665 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2666 if (altq->qname[0] == 0) { 2667#endif 2668 error = pf_disable_altq(altq); 2669 if (error != 0) 2670 break; 2671 } 2672 } 2673 if (error == 0) 2674#ifdef __FreeBSD__ 2675 V_pf_altq_running = 0; 2676#else 2677 pf_altq_running = 0; 2678#endif 2679 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2680 break; 2681 } 2682 2683 case DIOCADDALTQ: { 2684 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2685 struct pf_altq *altq, *a; 2686 2687#ifdef __FreeBSD__ 2688 if (pa->ticket != V_ticket_altqs_inactive) { 2689#else 2690 if (pa->ticket != ticket_altqs_inactive) { 2691#endif 2692 error = EBUSY; 2693 break; 2694 } 2695#ifdef __FreeBSD__ 2696 altq = pool_get(&V_pf_altq_pl, PR_NOWAIT); 2697#else 2698 altq = pool_get(&pf_altq_pl, PR_WAITOK|PR_LIMITFAIL); 2699#endif 2700 if (altq == NULL) { 2701 error = ENOMEM; 2702 break; 2703 } 2704 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2705#ifdef __FreeBSD__ 2706 altq->local_flags = 0; 2707#endif 2708 2709 /* 2710 * if this is for a queue, find the discipline and 2711 * copy the necessary fields 2712 */ 2713 if (altq->qname[0] != 0) { 2714 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2715 error = EBUSY; 2716#ifdef __FreeBSD__ 2717 pool_put(&V_pf_altq_pl, altq); 2718#else 2719 pool_put(&pf_altq_pl, altq); 2720#endif 2721 break; 2722 } 2723 altq->altq_disc = NULL; 2724#ifdef __FreeBSD__ 2725 TAILQ_FOREACH(a, V_pf_altqs_inactive, entries) { 2726#else 2727 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2728#endif 2729 if (strncmp(a->ifname, altq->ifname, 2730 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2731 altq->altq_disc = a->altq_disc; 2732 break; 2733 } 2734 } 2735 } 2736 2737#ifdef __FreeBSD__ 2738 struct ifnet *ifp; 2739 2740 if ((ifp = ifunit(altq->ifname)) == NULL) { 2741 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 2742 } else { 2743 PF_UNLOCK(); 2744#endif 2745 error = altq_add(altq); 2746#ifdef __FreeBSD__ 2747 PF_LOCK(); 2748 } 2749#endif 2750 if (error) { 2751#ifdef __FreeBSD__ 2752 pool_put(&V_pf_altq_pl, altq); 2753#else 2754 pool_put(&pf_altq_pl, altq); 2755#endif 2756 break; 2757 } 2758 2759#ifdef __FreeBSD__ 2760 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 2761#else 2762 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2763#endif 2764 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2765 break; 2766 } 2767 2768 case DIOCGETALTQS: { 2769 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2770 struct pf_altq *altq; 2771 2772 pa->nr = 0; 2773#ifdef __FreeBSD__ 2774 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 2775 pa->nr++; 2776 pa->ticket = V_ticket_altqs_active; 2777#else 2778 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2779 pa->nr++; 2780 pa->ticket = ticket_altqs_active; 2781#endif 2782 break; 2783 } 2784 2785 case DIOCGETALTQ: { 2786 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2787 struct pf_altq *altq; 2788 u_int32_t nr; 2789 2790#ifdef __FreeBSD__ 2791 if (pa->ticket != V_ticket_altqs_active) { 2792#else 2793 if (pa->ticket != ticket_altqs_active) { 2794#endif 2795 error = EBUSY; 2796 break; 2797 } 2798 nr = 0; 2799#ifdef __FreeBSD__ 2800 altq = TAILQ_FIRST(V_pf_altqs_active); 2801#else 2802 altq = TAILQ_FIRST(pf_altqs_active); 2803#endif 2804 while ((altq != NULL) && (nr < pa->nr)) { 2805 altq = TAILQ_NEXT(altq, entries); 2806 nr++; 2807 } 2808 if (altq == NULL) { 2809 error = EBUSY; 2810 break; 2811 } 2812 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2813 break; 2814 } 2815 2816 case DIOCCHANGEALTQ: 2817 /* CHANGEALTQ not supported yet! */ 2818 error = ENODEV; 2819 break; 2820 2821 case DIOCGETQSTATS: { 2822 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2823 struct pf_altq *altq; 2824 u_int32_t nr; 2825 int nbytes; 2826 2827#ifdef __FreeBSD__ 2828 if (pq->ticket != V_ticket_altqs_active) { 2829#else 2830 if (pq->ticket != ticket_altqs_active) { 2831#endif 2832 error = EBUSY; 2833 break; 2834 } 2835 nbytes = pq->nbytes; 2836 nr = 0; 2837#ifdef __FreeBSD__ 2838 altq = TAILQ_FIRST(V_pf_altqs_active); 2839#else 2840 altq = TAILQ_FIRST(pf_altqs_active); 2841#endif 2842 while ((altq != NULL) && (nr < pq->nr)) { 2843 altq = TAILQ_NEXT(altq, entries); 2844 nr++; 2845 } 2846 if (altq == NULL) { 2847 error = EBUSY; 2848 break; 2849 } 2850 2851#ifdef __FreeBSD__ 2852 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 2853 error = ENXIO; 2854 break; 2855 } 2856 PF_UNLOCK(); 2857#endif 2858 error = altq_getqstats(altq, pq->buf, &nbytes); 2859#ifdef __FreeBSD__ 2860 PF_LOCK(); 2861#endif 2862 if (error == 0) { 2863 pq->scheduler = altq->scheduler; 2864 pq->nbytes = nbytes; 2865 } 2866 break; 2867 } 2868#endif /* ALTQ */ 2869 2870 case DIOCBEGINADDRS: { 2871 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2872 2873#ifdef __FreeBSD__ 2874 pf_empty_pool(&V_pf_pabuf); 2875 pp->ticket = ++V_ticket_pabuf; 2876#else 2877 pf_empty_pool(&pf_pabuf); 2878 pp->ticket = ++ticket_pabuf; 2879#endif 2880 break; 2881 } 2882 2883 case DIOCADDADDR: { 2884 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2885 2886#ifdef __FreeBSD__ 2887 if (pp->ticket != V_ticket_pabuf) { 2888#else 2889 if (pp->ticket != ticket_pabuf) { 2890#endif 2891 error = EBUSY; 2892 break; 2893 } 2894#ifndef INET 2895 if (pp->af == AF_INET) { 2896 error = EAFNOSUPPORT; 2897 break; 2898 } 2899#endif /* INET */ 2900#ifndef INET6 2901 if (pp->af == AF_INET6) { 2902 error = EAFNOSUPPORT; 2903 break; 2904 } 2905#endif /* INET6 */ 2906 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2907 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2908 pp->addr.addr.type != PF_ADDR_TABLE) { 2909 error = EINVAL; 2910 break; 2911 } 2912#ifdef __FreeBSD__ 2913 pa = pool_get(&V_pf_pooladdr_pl, PR_NOWAIT); 2914#else 2915 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK|PR_LIMITFAIL); 2916#endif 2917 if (pa == NULL) { 2918 error = ENOMEM; 2919 break; 2920 } 2921 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2922 if (pa->ifname[0]) { 2923 pa->kif = pfi_kif_get(pa->ifname); 2924 if (pa->kif == NULL) { 2925#ifdef __FreeBSD__ 2926 pool_put(&V_pf_pooladdr_pl, pa); 2927#else 2928 pool_put(&pf_pooladdr_pl, pa); 2929#endif 2930 error = EINVAL; 2931 break; 2932 } 2933 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2934 } 2935 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2936 pfi_dynaddr_remove(&pa->addr); 2937 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2938#ifdef __FreeBSD__ 2939 pool_put(&V_pf_pooladdr_pl, pa); 2940#else 2941 pool_put(&pf_pooladdr_pl, pa); 2942#endif 2943 error = EINVAL; 2944 break; 2945 } 2946#ifdef __FreeBSD__ 2947 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 2948#else 2949 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2950#endif 2951 break; 2952 } 2953 2954 case DIOCGETADDRS: { 2955 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2956 2957 pp->nr = 0; 2958 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2959 pp->r_num, 0, 1, 0); 2960 if (pool == NULL) { 2961 error = EBUSY; 2962 break; 2963 } 2964 TAILQ_FOREACH(pa, &pool->list, entries) 2965 pp->nr++; 2966 break; 2967 } 2968 2969 case DIOCGETADDR: { 2970 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2971 u_int32_t nr = 0; 2972 2973 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2974 pp->r_num, 0, 1, 1); 2975 if (pool == NULL) { 2976 error = EBUSY; 2977 break; 2978 } 2979 pa = TAILQ_FIRST(&pool->list); 2980 while ((pa != NULL) && (nr < pp->nr)) { 2981 pa = TAILQ_NEXT(pa, entries); 2982 nr++; 2983 } 2984 if (pa == NULL) { 2985 error = EBUSY; 2986 break; 2987 } 2988 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2989 pf_addr_copyout(&pp->addr.addr); 2990 break; 2991 } 2992 2993 case DIOCCHANGEADDR: { 2994 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2995 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2996 struct pf_ruleset *ruleset; 2997 2998 if (pca->action < PF_CHANGE_ADD_HEAD || 2999 pca->action > PF_CHANGE_REMOVE) { 3000 error = EINVAL; 3001 break; 3002 } 3003 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 3004 pca->addr.addr.type != PF_ADDR_DYNIFTL && 3005 pca->addr.addr.type != PF_ADDR_TABLE) { 3006 error = EINVAL; 3007 break; 3008 } 3009 3010 ruleset = pf_find_ruleset(pca->anchor); 3011 if (ruleset == NULL) { 3012 error = EBUSY; 3013 break; 3014 } 3015 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 3016 pca->r_num, pca->r_last, 1, 1); 3017 if (pool == NULL) { 3018 error = EBUSY; 3019 break; 3020 } 3021 if (pca->action != PF_CHANGE_REMOVE) { 3022#ifdef __FreeBSD__ 3023 newpa = pool_get(&V_pf_pooladdr_pl, 3024 PR_NOWAIT); 3025#else 3026 newpa = pool_get(&pf_pooladdr_pl, 3027 PR_WAITOK|PR_LIMITFAIL); 3028#endif 3029 if (newpa == NULL) { 3030 error = ENOMEM; 3031 break; 3032 } 3033 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 3034#ifndef INET 3035 if (pca->af == AF_INET) { 3036#ifdef __FreeBSD__ 3037 pool_put(&V_pf_pooladdr_pl, newpa); 3038#else 3039 pool_put(&pf_pooladdr_pl, newpa); 3040#endif 3041 error = EAFNOSUPPORT; 3042 break; 3043 } 3044#endif /* INET */ 3045#ifndef INET6 3046 if (pca->af == AF_INET6) { 3047#ifdef __FreeBSD__ 3048 pool_put(&V_pf_pooladdr_pl, newpa); 3049#else 3050 pool_put(&pf_pooladdr_pl, newpa); 3051#endif 3052 error = EAFNOSUPPORT; 3053 break; 3054 } 3055#endif /* INET6 */ 3056 if (newpa->ifname[0]) { 3057 newpa->kif = pfi_kif_get(newpa->ifname); 3058 if (newpa->kif == NULL) { 3059#ifdef __FreeBSD__ 3060 pool_put(&V_pf_pooladdr_pl, newpa); 3061#else 3062 pool_put(&pf_pooladdr_pl, newpa); 3063#endif 3064 error = EINVAL; 3065 break; 3066 } 3067 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 3068 } else 3069 newpa->kif = NULL; 3070 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 3071 pf_tbladdr_setup(ruleset, &newpa->addr)) { 3072 pfi_dynaddr_remove(&newpa->addr); 3073 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 3074#ifdef __FreeBSD__ 3075 pool_put(&V_pf_pooladdr_pl, newpa); 3076#else 3077 pool_put(&pf_pooladdr_pl, newpa); 3078#endif 3079 error = EINVAL; 3080 break; 3081 } 3082 } 3083 3084 if (pca->action == PF_CHANGE_ADD_HEAD) 3085 oldpa = TAILQ_FIRST(&pool->list); 3086 else if (pca->action == PF_CHANGE_ADD_TAIL) 3087 oldpa = TAILQ_LAST(&pool->list, pf_palist); 3088 else { 3089 int i = 0; 3090 3091 oldpa = TAILQ_FIRST(&pool->list); 3092 while ((oldpa != NULL) && (i < pca->nr)) { 3093 oldpa = TAILQ_NEXT(oldpa, entries); 3094 i++; 3095 } 3096 if (oldpa == NULL) { 3097 error = EINVAL; 3098 break; 3099 } 3100 } 3101 3102 if (pca->action == PF_CHANGE_REMOVE) { 3103 TAILQ_REMOVE(&pool->list, oldpa, entries); 3104 pfi_dynaddr_remove(&oldpa->addr); 3105 pf_tbladdr_remove(&oldpa->addr); 3106 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 3107#ifdef __FreeBSD__ 3108 pool_put(&V_pf_pooladdr_pl, oldpa); 3109#else 3110 pool_put(&pf_pooladdr_pl, oldpa); 3111#endif 3112 } else { 3113 if (oldpa == NULL) 3114 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 3115 else if (pca->action == PF_CHANGE_ADD_HEAD || 3116 pca->action == PF_CHANGE_ADD_BEFORE) 3117 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 3118 else 3119 TAILQ_INSERT_AFTER(&pool->list, oldpa, 3120 newpa, entries); 3121 } 3122 3123 pool->cur = TAILQ_FIRST(&pool->list); 3124 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 3125 pca->af); 3126 break; 3127 } 3128 3129 case DIOCGETRULESETS: { 3130 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 3131 struct pf_ruleset *ruleset; 3132 struct pf_anchor *anchor; 3133 3134 pr->path[sizeof(pr->path) - 1] = 0; 3135 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 3136 error = EINVAL; 3137 break; 3138 } 3139 pr->nr = 0; 3140 if (ruleset->anchor == NULL) { 3141 /* XXX kludge for pf_main_ruleset */ 3142#ifdef __FreeBSD__ 3143 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors) 3144#else 3145 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 3146#endif 3147 if (anchor->parent == NULL) 3148 pr->nr++; 3149 } else { 3150 RB_FOREACH(anchor, pf_anchor_node, 3151 &ruleset->anchor->children) 3152 pr->nr++; 3153 } 3154 break; 3155 } 3156 3157 case DIOCGETRULESET: { 3158 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 3159 struct pf_ruleset *ruleset; 3160 struct pf_anchor *anchor; 3161 u_int32_t nr = 0; 3162 3163 pr->path[sizeof(pr->path) - 1] = 0; 3164 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 3165 error = EINVAL; 3166 break; 3167 } 3168 pr->name[0] = 0; 3169 if (ruleset->anchor == NULL) { 3170 /* XXX kludge for pf_main_ruleset */ 3171#ifdef __FreeBSD__ 3172 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors) 3173#else 3174 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 3175#endif 3176 if (anchor->parent == NULL && nr++ == pr->nr) { 3177 strlcpy(pr->name, anchor->name, 3178 sizeof(pr->name)); 3179 break; 3180 } 3181 } else { 3182 RB_FOREACH(anchor, pf_anchor_node, 3183 &ruleset->anchor->children) 3184 if (nr++ == pr->nr) { 3185 strlcpy(pr->name, anchor->name, 3186 sizeof(pr->name)); 3187 break; 3188 } 3189 } 3190 if (!pr->name[0]) 3191 error = EBUSY; 3192 break; 3193 } 3194 3195 case DIOCRCLRTABLES: { 3196 struct pfioc_table *io = (struct pfioc_table *)addr; 3197 3198 if (io->pfrio_esize != 0) { 3199 error = ENODEV; 3200 break; 3201 } 3202 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 3203 io->pfrio_flags | PFR_FLAG_USERIOCTL); 3204 break; 3205 } 3206 3207 case DIOCRADDTABLES: { 3208 struct pfioc_table *io = (struct pfioc_table *)addr; 3209 3210 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3211 error = ENODEV; 3212 break; 3213 } 3214 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 3215 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3216 break; 3217 } 3218 3219 case DIOCRDELTABLES: { 3220 struct pfioc_table *io = (struct pfioc_table *)addr; 3221 3222 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3223 error = ENODEV; 3224 break; 3225 } 3226 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 3227 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3228 break; 3229 } 3230 3231 case DIOCRGETTABLES: { 3232 struct pfioc_table *io = (struct pfioc_table *)addr; 3233 3234 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3235 error = ENODEV; 3236 break; 3237 } 3238 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 3239 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3240 break; 3241 } 3242 3243 case DIOCRGETTSTATS: { 3244 struct pfioc_table *io = (struct pfioc_table *)addr; 3245 3246 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 3247 error = ENODEV; 3248 break; 3249 } 3250 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 3251 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3252 break; 3253 } 3254 3255 case DIOCRCLRTSTATS: { 3256 struct pfioc_table *io = (struct pfioc_table *)addr; 3257 3258 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3259 error = ENODEV; 3260 break; 3261 } 3262 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 3263 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3264 break; 3265 } 3266 3267 case DIOCRSETTFLAGS: { 3268 struct pfioc_table *io = (struct pfioc_table *)addr; 3269 3270 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3271 error = ENODEV; 3272 break; 3273 } 3274 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 3275 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 3276 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3277 break; 3278 } 3279 3280 case DIOCRCLRADDRS: { 3281 struct pfioc_table *io = (struct pfioc_table *)addr; 3282 3283 if (io->pfrio_esize != 0) { 3284 error = ENODEV; 3285 break; 3286 } 3287 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 3288 io->pfrio_flags | PFR_FLAG_USERIOCTL); 3289 break; 3290 } 3291 3292 case DIOCRADDADDRS: { 3293 struct pfioc_table *io = (struct pfioc_table *)addr; 3294 3295 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3296 error = ENODEV; 3297 break; 3298 } 3299 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 3300 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 3301 PFR_FLAG_USERIOCTL); 3302 break; 3303 } 3304 3305 case DIOCRDELADDRS: { 3306 struct pfioc_table *io = (struct pfioc_table *)addr; 3307 3308 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3309 error = ENODEV; 3310 break; 3311 } 3312 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 3313 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 3314 PFR_FLAG_USERIOCTL); 3315 break; 3316 } 3317 3318 case DIOCRSETADDRS: { 3319 struct pfioc_table *io = (struct pfioc_table *)addr; 3320 3321 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3322 error = ENODEV; 3323 break; 3324 } 3325 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 3326 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 3327 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 3328 PFR_FLAG_USERIOCTL, 0); 3329 break; 3330 } 3331 3332 case DIOCRGETADDRS: { 3333 struct pfioc_table *io = (struct pfioc_table *)addr; 3334 3335 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3336 error = ENODEV; 3337 break; 3338 } 3339 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 3340 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3341 break; 3342 } 3343 3344 case DIOCRGETASTATS: { 3345 struct pfioc_table *io = (struct pfioc_table *)addr; 3346 3347 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 3348 error = ENODEV; 3349 break; 3350 } 3351 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 3352 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3353 break; 3354 } 3355 3356 case DIOCRCLRASTATS: { 3357 struct pfioc_table *io = (struct pfioc_table *)addr; 3358 3359 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3360 error = ENODEV; 3361 break; 3362 } 3363 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 3364 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 3365 PFR_FLAG_USERIOCTL); 3366 break; 3367 } 3368 3369 case DIOCRTSTADDRS: { 3370 struct pfioc_table *io = (struct pfioc_table *)addr; 3371 3372 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3373 error = ENODEV; 3374 break; 3375 } 3376 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 3377 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 3378 PFR_FLAG_USERIOCTL); 3379 break; 3380 } 3381 3382 case DIOCRINADEFINE: { 3383 struct pfioc_table *io = (struct pfioc_table *)addr; 3384 3385 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3386 error = ENODEV; 3387 break; 3388 } 3389 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 3390 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 3391 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3392 break; 3393 } 3394 3395 case DIOCOSFPADD: { 3396 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 3397 error = pf_osfp_add(io); 3398 break; 3399 } 3400 3401 case DIOCOSFPGET: { 3402 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 3403 error = pf_osfp_get(io); 3404 break; 3405 } 3406 3407 case DIOCXBEGIN: { 3408 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3409 struct pfioc_trans_e *ioe; 3410 struct pfr_table *table; 3411 int i; 3412 3413 if (io->esize != sizeof(*ioe)) { 3414 error = ENODEV; 3415 goto fail; 3416 } 3417#ifdef __FreeBSD__ 3418 PF_UNLOCK(); 3419#endif 3420 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 3421 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 3422#ifdef __FreeBSD__ 3423 PF_LOCK(); 3424#endif 3425 for (i = 0; i < io->size; i++) { 3426#ifdef __FreeBSD__ 3427 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3428 if (error) { 3429#else 3430 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3431#endif 3432 free(table, M_TEMP); 3433 free(ioe, M_TEMP); 3434 error = EFAULT; 3435 goto fail; 3436 } 3437 switch (ioe->rs_num) { 3438#ifdef ALTQ 3439 case PF_RULESET_ALTQ: 3440 if (ioe->anchor[0]) { 3441 free(table, M_TEMP); 3442 free(ioe, M_TEMP); 3443 error = EINVAL; 3444 goto fail; 3445 } 3446 if ((error = pf_begin_altq(&ioe->ticket))) { 3447 free(table, M_TEMP); 3448 free(ioe, M_TEMP); 3449 goto fail; 3450 } 3451 break; 3452#endif /* ALTQ */ 3453 case PF_RULESET_TABLE: 3454 bzero(table, sizeof(*table)); 3455 strlcpy(table->pfrt_anchor, ioe->anchor, 3456 sizeof(table->pfrt_anchor)); 3457 if ((error = pfr_ina_begin(table, 3458 &ioe->ticket, NULL, 0))) { 3459 free(table, M_TEMP); 3460 free(ioe, M_TEMP); 3461 goto fail; 3462 } 3463 break; 3464 default: 3465 if ((error = pf_begin_rules(&ioe->ticket, 3466 ioe->rs_num, ioe->anchor))) { 3467 free(table, M_TEMP); 3468 free(ioe, M_TEMP); 3469 goto fail; 3470 } 3471 break; 3472 } 3473#ifdef __FreeBSD__ 3474 PF_COPYOUT(ioe, io->array+i, sizeof(io->array[i]), 3475 error); 3476 if (error) { 3477#else 3478 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 3479#endif 3480 free(table, M_TEMP); 3481 free(ioe, M_TEMP); 3482 error = EFAULT; 3483 goto fail; 3484 } 3485 } 3486 free(table, M_TEMP); 3487 free(ioe, M_TEMP); 3488 break; 3489 } 3490 3491 case DIOCXROLLBACK: { 3492 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3493 struct pfioc_trans_e *ioe; 3494 struct pfr_table *table; 3495 int i; 3496 3497 if (io->esize != sizeof(*ioe)) { 3498 error = ENODEV; 3499 goto fail; 3500 } 3501#ifdef __FreeBSD__ 3502 PF_UNLOCK(); 3503#endif 3504 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 3505 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 3506#ifdef __FreeBSD__ 3507 PF_LOCK(); 3508#endif 3509 for (i = 0; i < io->size; i++) { 3510#ifdef __FreeBSD__ 3511 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3512 if (error) { 3513#else 3514 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3515#endif 3516 free(table, M_TEMP); 3517 free(ioe, M_TEMP); 3518 error = EFAULT; 3519 goto fail; 3520 } 3521 switch (ioe->rs_num) { 3522#ifdef ALTQ 3523 case PF_RULESET_ALTQ: 3524 if (ioe->anchor[0]) { 3525 free(table, M_TEMP); 3526 free(ioe, M_TEMP); 3527 error = EINVAL; 3528 goto fail; 3529 } 3530 if ((error = pf_rollback_altq(ioe->ticket))) { 3531 free(table, M_TEMP); 3532 free(ioe, M_TEMP); 3533 goto fail; /* really bad */ 3534 } 3535 break; 3536#endif /* ALTQ */ 3537 case PF_RULESET_TABLE: 3538 bzero(table, sizeof(*table)); 3539 strlcpy(table->pfrt_anchor, ioe->anchor, 3540 sizeof(table->pfrt_anchor)); 3541 if ((error = pfr_ina_rollback(table, 3542 ioe->ticket, NULL, 0))) { 3543 free(table, M_TEMP); 3544 free(ioe, M_TEMP); 3545 goto fail; /* really bad */ 3546 } 3547 break; 3548 default: 3549 if ((error = pf_rollback_rules(ioe->ticket, 3550 ioe->rs_num, ioe->anchor))) { 3551 free(table, M_TEMP); 3552 free(ioe, M_TEMP); 3553 goto fail; /* really bad */ 3554 } 3555 break; 3556 } 3557 } 3558 free(table, M_TEMP); 3559 free(ioe, M_TEMP); 3560 break; 3561 } 3562 3563 case DIOCXCOMMIT: { 3564 struct pfioc_trans *io = (struct pfioc_trans *)addr; 3565 struct pfioc_trans_e *ioe; 3566 struct pfr_table *table; 3567 struct pf_ruleset *rs; 3568 int i; 3569 3570 if (io->esize != sizeof(*ioe)) { 3571 error = ENODEV; 3572 goto fail; 3573 } 3574#ifdef __FreeBSD__ 3575 PF_UNLOCK(); 3576#endif 3577 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 3578 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 3579#ifdef __FreeBSD__ 3580 PF_LOCK(); 3581#endif 3582 /* first makes sure everything will succeed */ 3583 for (i = 0; i < io->size; i++) { 3584#ifdef __FreeBSD__ 3585 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3586 if (error) { 3587#else 3588 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3589#endif 3590 free(table, M_TEMP); 3591 free(ioe, M_TEMP); 3592 error = EFAULT; 3593 goto fail; 3594 } 3595 switch (ioe->rs_num) { 3596#ifdef ALTQ 3597 case PF_RULESET_ALTQ: 3598 if (ioe->anchor[0]) { 3599 free(table, M_TEMP); 3600 free(ioe, M_TEMP); 3601 error = EINVAL; 3602 goto fail; 3603 } 3604#ifdef __FreeBSD__ 3605 if (!V_altqs_inactive_open || ioe->ticket != 3606 V_ticket_altqs_inactive) { 3607#else 3608 if (!altqs_inactive_open || ioe->ticket != 3609 ticket_altqs_inactive) { 3610#endif 3611 free(table, M_TEMP); 3612 free(ioe, M_TEMP); 3613 error = EBUSY; 3614 goto fail; 3615 } 3616 break; 3617#endif /* ALTQ */ 3618 case PF_RULESET_TABLE: 3619 rs = pf_find_ruleset(ioe->anchor); 3620 if (rs == NULL || !rs->topen || ioe->ticket != 3621 rs->tticket) { 3622 free(table, M_TEMP); 3623 free(ioe, M_TEMP); 3624 error = EBUSY; 3625 goto fail; 3626 } 3627 break; 3628 default: 3629 if (ioe->rs_num < 0 || ioe->rs_num >= 3630 PF_RULESET_MAX) { 3631 free(table, M_TEMP); 3632 free(ioe, M_TEMP); 3633 error = EINVAL; 3634 goto fail; 3635 } 3636 rs = pf_find_ruleset(ioe->anchor); 3637 if (rs == NULL || 3638 !rs->rules[ioe->rs_num].inactive.open || 3639 rs->rules[ioe->rs_num].inactive.ticket != 3640 ioe->ticket) { 3641 free(table, M_TEMP); 3642 free(ioe, M_TEMP); 3643 error = EBUSY; 3644 goto fail; 3645 } 3646 break; 3647 } 3648 } 3649 /* now do the commit - no errors should happen here */ 3650 for (i = 0; i < io->size; i++) { 3651#ifdef __FreeBSD__ 3652 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error); 3653 if (error) { 3654#else 3655 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3656#endif 3657 free(table, M_TEMP); 3658 free(ioe, M_TEMP); 3659 error = EFAULT; 3660 goto fail; 3661 } 3662 switch (ioe->rs_num) { 3663#ifdef ALTQ 3664 case PF_RULESET_ALTQ: 3665 if ((error = pf_commit_altq(ioe->ticket))) { 3666 free(table, M_TEMP); 3667 free(ioe, M_TEMP); 3668 goto fail; /* really bad */ 3669 } 3670 break; 3671#endif /* ALTQ */ 3672 case PF_RULESET_TABLE: 3673 bzero(table, sizeof(*table)); 3674 strlcpy(table->pfrt_anchor, ioe->anchor, 3675 sizeof(table->pfrt_anchor)); 3676 if ((error = pfr_ina_commit(table, ioe->ticket, 3677 NULL, NULL, 0))) { 3678 free(table, M_TEMP); 3679 free(ioe, M_TEMP); 3680 goto fail; /* really bad */ 3681 } 3682 break; 3683 default: 3684 if ((error = pf_commit_rules(ioe->ticket, 3685 ioe->rs_num, ioe->anchor))) { 3686 free(table, M_TEMP); 3687 free(ioe, M_TEMP); 3688 goto fail; /* really bad */ 3689 } 3690 break; 3691 } 3692 } 3693 free(table, M_TEMP); 3694 free(ioe, M_TEMP); 3695 break; 3696 } 3697 3698 case DIOCGETSRCNODES: { 3699 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3700 struct pf_src_node *n, *p, *pstore; 3701 u_int32_t nr = 0; 3702 int space = psn->psn_len; 3703 3704 if (space == 0) { 3705#ifdef __FreeBSD__ 3706 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) 3707#else 3708 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3709#endif 3710 nr++; 3711 psn->psn_len = sizeof(struct pf_src_node) * nr; 3712 break; 3713 } 3714 3715#ifdef __FreeBSD__ 3716 PF_UNLOCK(); 3717#endif 3718 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 3719#ifdef __FreeBSD__ 3720 PF_LOCK(); 3721#endif 3722 p = psn->psn_src_nodes; 3723#ifdef __FreeBSD__ 3724 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) { 3725#else 3726 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3727#endif 3728 int secs = time_second, diff; 3729 3730 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3731 break; 3732 3733 bcopy(n, pstore, sizeof(*pstore)); 3734 if (n->rule.ptr != NULL) 3735 pstore->rule.nr = n->rule.ptr->nr; 3736 pstore->creation = secs - pstore->creation; 3737 if (pstore->expire > secs) 3738 pstore->expire -= secs; 3739 else 3740 pstore->expire = 0; 3741 3742 /* adjust the connection rate estimate */ 3743 diff = secs - n->conn_rate.last; 3744 if (diff >= n->conn_rate.seconds) 3745 pstore->conn_rate.count = 0; 3746 else 3747 pstore->conn_rate.count -= 3748 n->conn_rate.count * diff / 3749 n->conn_rate.seconds; 3750 3751#ifdef __FreeBSD__ 3752 PF_COPYOUT(pstore, p, sizeof(*p), error); 3753#else 3754 error = copyout(pstore, p, sizeof(*p)); 3755#endif 3756 if (error) { 3757 free(pstore, M_TEMP); 3758 goto fail; 3759 } 3760 p++; 3761 nr++; 3762 } 3763 psn->psn_len = sizeof(struct pf_src_node) * nr; 3764 3765 free(pstore, M_TEMP); 3766 break; 3767 } 3768 3769 case DIOCCLRSRCNODES: { 3770 struct pf_src_node *n; 3771 struct pf_state *state; 3772 3773#ifdef __FreeBSD__ 3774 RB_FOREACH(state, pf_state_tree_id, &V_tree_id) { 3775#else 3776 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3777#endif 3778 state->src_node = NULL; 3779 state->nat_src_node = NULL; 3780 } 3781#ifdef __FreeBSD__ 3782 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) { 3783#else 3784 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3785#endif 3786 n->expire = 1; 3787 n->states = 0; 3788 } 3789 pf_purge_expired_src_nodes(1); 3790#ifdef __FreeBSD__ 3791 V_pf_status.src_nodes = 0; 3792#else 3793 pf_status.src_nodes = 0; 3794#endif 3795 break; 3796 } 3797 3798 case DIOCKILLSRCNODES: { 3799 struct pf_src_node *sn; 3800 struct pf_state *s; 3801 struct pfioc_src_node_kill *psnk = 3802 (struct pfioc_src_node_kill *)addr; 3803 u_int killed = 0; 3804 3805#ifdef __FreeBSD__ 3806 RB_FOREACH(sn, pf_src_tree, &V_tree_src_tracking) { 3807#else 3808 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 3809#endif 3810 if (PF_MATCHA(psnk->psnk_src.neg, 3811 &psnk->psnk_src.addr.v.a.addr, 3812 &psnk->psnk_src.addr.v.a.mask, 3813 &sn->addr, sn->af) && 3814 PF_MATCHA(psnk->psnk_dst.neg, 3815 &psnk->psnk_dst.addr.v.a.addr, 3816 &psnk->psnk_dst.addr.v.a.mask, 3817 &sn->raddr, sn->af)) { 3818 /* Handle state to src_node linkage */ 3819 if (sn->states != 0) { 3820 RB_FOREACH(s, pf_state_tree_id, 3821#ifdef __FreeBSD__ 3822 &V_tree_id) { 3823#else 3824 &tree_id) { 3825#endif 3826 if (s->src_node == sn) 3827 s->src_node = NULL; 3828 if (s->nat_src_node == sn) 3829 s->nat_src_node = NULL; 3830 } 3831 sn->states = 0; 3832 } 3833 sn->expire = 1; 3834 killed++; 3835 } 3836 } 3837 3838 if (killed > 0) 3839 pf_purge_expired_src_nodes(1); 3840 3841 psnk->psnk_killed = killed; 3842 break; 3843 } 3844 3845 case DIOCSETHOSTID: { 3846 u_int32_t *hostid = (u_int32_t *)addr; 3847 3848#ifdef __FreeBSD__ 3849 if (*hostid == 0) 3850 V_pf_status.hostid = arc4random(); 3851 else 3852 V_pf_status.hostid = *hostid; 3853#else 3854 if (*hostid == 0) 3855 pf_status.hostid = arc4random(); 3856 else 3857 pf_status.hostid = *hostid; 3858#endif 3859 break; 3860 } 3861 3862 case DIOCOSFPFLUSH: 3863 pf_osfp_flush(); 3864 break; 3865 3866 case DIOCIGETIFACES: { 3867 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3868 3869 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3870 error = ENODEV; 3871 break; 3872 } 3873 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3874 &io->pfiio_size); 3875 break; 3876 } 3877 3878 case DIOCSETIFFLAG: { 3879 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3880 3881 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3882 break; 3883 } 3884 3885 case DIOCCLRIFFLAG: { 3886 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3887 3888 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3889 break; 3890 } 3891 3892 default: 3893 error = ENODEV; 3894 break; 3895 } 3896fail: 3897#ifdef __FreeBSD__ 3898 PF_UNLOCK(); 3899 3900 if (flags & FWRITE) 3901 sx_xunlock(&V_pf_consistency_lock); 3902 else 3903 sx_sunlock(&V_pf_consistency_lock); 3904#else 3905 splx(s); 3906 if (flags & FWRITE) 3907 rw_exit_write(&pf_consistency_lock); 3908 else 3909 rw_exit_read(&pf_consistency_lock); 3910#endif 3911 3912 CURVNET_RESTORE(); 3913 3914 return (error); 3915} 3916 3917#ifdef __FreeBSD__ 3918void 3919pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 3920{ 3921 bzero(sp, sizeof(struct pfsync_state)); 3922 3923 /* copy from state key */ 3924 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 3925 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 3926 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 3927 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 3928 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 3929 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 3930 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 3931 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 3932 sp->proto = st->key[PF_SK_WIRE]->proto; 3933 sp->af = st->key[PF_SK_WIRE]->af; 3934 3935 /* copy from state */ 3936 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 3937 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 3938 sp->creation = htonl(time_second - st->creation); 3939 sp->expire = pf_state_expires(st); 3940 if (sp->expire <= time_second) 3941 sp->expire = htonl(0); 3942 else 3943 sp->expire = htonl(sp->expire - time_second); 3944 3945 sp->direction = st->direction; 3946 sp->log = st->log; 3947 sp->timeout = st->timeout; 3948 sp->state_flags = st->state_flags; 3949 if (st->src_node) 3950 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 3951 if (st->nat_src_node) 3952 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 3953 3954 bcopy(&st->id, &sp->id, sizeof(sp->id)); 3955 sp->creatorid = st->creatorid; 3956 pf_state_peer_hton(&st->src, &sp->src); 3957 pf_state_peer_hton(&st->dst, &sp->dst); 3958 3959 if (st->rule.ptr == NULL) 3960 sp->rule = htonl(-1); 3961 else 3962 sp->rule = htonl(st->rule.ptr->nr); 3963 if (st->anchor.ptr == NULL) 3964 sp->anchor = htonl(-1); 3965 else 3966 sp->anchor = htonl(st->anchor.ptr->nr); 3967 if (st->nat_rule.ptr == NULL) 3968 sp->nat_rule = htonl(-1); 3969 else 3970 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 3971 3972 pf_state_counter_hton(st->packets[0], sp->packets[0]); 3973 pf_state_counter_hton(st->packets[1], sp->packets[1]); 3974 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 3975 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 3976 3977} 3978 3979/* 3980 * XXX - Check for version missmatch!!! 3981 */ 3982static void 3983pf_clear_states(void) 3984{ 3985 struct pf_state *state; 3986 3987#ifdef __FreeBSD__ 3988 RB_FOREACH(state, pf_state_tree_id, &V_tree_id) { 3989#else 3990 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3991#endif 3992 state->timeout = PFTM_PURGE; 3993#if NPFSYNC 3994 /* don't send out individual delete messages */ 3995 state->sync_state = PFSTATE_NOSYNC; 3996#endif 3997 pf_unlink_state(state); 3998 } 3999 4000#if 0 /* NPFSYNC */ 4001/* 4002 * XXX This is called on module unload, we do not want to sync that over? */ 4003 */ 4004 pfsync_clear_states(V_pf_status.hostid, psk->psk_ifname); 4005#endif 4006} 4007 4008static int 4009pf_clear_tables(void) 4010{ 4011 struct pfioc_table io; 4012 int error; 4013 4014 bzero(&io, sizeof(io)); 4015 4016 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 4017 io.pfrio_flags); 4018 4019 return (error); 4020} 4021 4022static void 4023pf_clear_srcnodes(void) 4024{ 4025 struct pf_src_node *n; 4026 struct pf_state *state; 4027 4028#ifdef __FreeBSD__ 4029 RB_FOREACH(state, pf_state_tree_id, &V_tree_id) { 4030#else 4031 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 4032#endif 4033 state->src_node = NULL; 4034 state->nat_src_node = NULL; 4035 } 4036#ifdef __FreeBSD__ 4037 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) { 4038#else 4039 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 4040#endif 4041 n->expire = 1; 4042 n->states = 0; 4043 } 4044} 4045/* 4046 * XXX - Check for version missmatch!!! 4047 */ 4048 4049/* 4050 * Duplicate pfctl -Fa operation to get rid of as much as we can. 4051 */ 4052static int 4053shutdown_pf(void) 4054{ 4055 int error = 0; 4056 u_int32_t t[5]; 4057 char nn = '\0'; 4058 4059 V_pf_status.running = 0; 4060 do { 4061 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 4062 != 0) { 4063 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 4064 break; 4065 } 4066 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 4067 != 0) { 4068 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 4069 break; /* XXX: rollback? */ 4070 } 4071 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 4072 != 0) { 4073 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 4074 break; /* XXX: rollback? */ 4075 } 4076 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 4077 != 0) { 4078 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 4079 break; /* XXX: rollback? */ 4080 } 4081 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 4082 != 0) { 4083 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 4084 break; /* XXX: rollback? */ 4085 } 4086 4087 /* XXX: these should always succeed here */ 4088 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 4089 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 4090 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 4091 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 4092 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 4093 4094 if ((error = pf_clear_tables()) != 0) 4095 break; 4096 4097 #ifdef ALTQ 4098 if ((error = pf_begin_altq(&t[0])) != 0) { 4099 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 4100 break; 4101 } 4102 pf_commit_altq(t[0]); 4103 #endif 4104 4105 pf_clear_states(); 4106 4107 pf_clear_srcnodes(); 4108 4109 /* status does not use malloced mem so no need to cleanup */ 4110 /* fingerprints and interfaces have thier own cleanup code */ 4111 } while(0); 4112 4113 return (error); 4114} 4115 4116#ifdef INET 4117static int 4118pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 4119 struct inpcb *inp) 4120{ 4121 /* 4122 * XXX Wed Jul 9 22:03:16 2003 UTC 4123 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 4124 * in network stack. OpenBSD's network stack have converted 4125 * ip_len/ip_off to host byte order frist as FreeBSD. 4126 * Now this is not true anymore , so we should convert back to network 4127 * byte order. 4128 */ 4129 struct ip *h = NULL; 4130 int chk; 4131 4132 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) { 4133 /* if m_pkthdr.len is less than ip header, pf will handle. */ 4134 h = mtod(*m, struct ip *); 4135 HTONS(h->ip_len); 4136 HTONS(h->ip_off); 4137 } 4138 CURVNET_SET(ifp->if_vnet); 4139 chk = pf_test(PF_IN, ifp, m, NULL, inp); 4140 CURVNET_RESTORE(); 4141 if (chk && *m) { 4142 m_freem(*m); 4143 *m = NULL; 4144 } 4145 if (*m != NULL) { 4146 /* pf_test can change ip header location */ 4147 h = mtod(*m, struct ip *); 4148 NTOHS(h->ip_len); 4149 NTOHS(h->ip_off); 4150 } 4151 return chk; 4152} 4153 4154static int 4155pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 4156 struct inpcb *inp) 4157{ 4158 /* 4159 * XXX Wed Jul 9 22:03:16 2003 UTC 4160 * OpenBSD has changed its byte ordering convention on ip_len/ip_off 4161 * in network stack. OpenBSD's network stack have converted 4162 * ip_len/ip_off to host byte order frist as FreeBSD. 4163 * Now this is not true anymore , so we should convert back to network 4164 * byte order. 4165 */ 4166 struct ip *h = NULL; 4167 int chk; 4168 4169 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 4170 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 4171 in_delayed_cksum(*m); 4172 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 4173 } 4174 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) { 4175 /* if m_pkthdr.len is less than ip header, pf will handle. */ 4176 h = mtod(*m, struct ip *); 4177 HTONS(h->ip_len); 4178 HTONS(h->ip_off); 4179 } 4180 CURVNET_SET(ifp->if_vnet); 4181 chk = pf_test(PF_OUT, ifp, m, NULL, inp); 4182 CURVNET_RESTORE(); 4183 if (chk && *m) { 4184 m_freem(*m); 4185 *m = NULL; 4186 } 4187 if (*m != NULL) { 4188 /* pf_test can change ip header location */ 4189 h = mtod(*m, struct ip *); 4190 NTOHS(h->ip_len); 4191 NTOHS(h->ip_off); 4192 } 4193 return chk; 4194} 4195#endif 4196 4197#ifdef INET6 4198static int 4199pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 4200 struct inpcb *inp) 4201{ 4202 4203 /* 4204 * IPv6 is not affected by ip_len/ip_off byte order changes. 4205 */ 4206 int chk; 4207 4208 /* 4209 * In case of loopback traffic IPv6 uses the real interface in 4210 * order to support scoped addresses. In order to support stateful 4211 * filtering we have change this to lo0 as it is the case in IPv4. 4212 */ 4213 CURVNET_SET(ifp->if_vnet); 4214 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, 4215 NULL, inp); 4216 CURVNET_RESTORE(); 4217 if (chk && *m) { 4218 m_freem(*m); 4219 *m = NULL; 4220 } 4221 return chk; 4222} 4223 4224static int 4225pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 4226 struct inpcb *inp) 4227{ 4228 /* 4229 * IPv6 does not affected ip_len/ip_off byte order changes. 4230 */ 4231 int chk; 4232 4233 /* We need a proper CSUM before we start (s. OpenBSD ip_output) */ 4234 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 4235#ifdef INET 4236 /* XXX-BZ copy&paste error from r126261? */ 4237 in_delayed_cksum(*m); 4238#endif 4239 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 4240 } 4241 CURVNET_SET(ifp->if_vnet); 4242 chk = pf_test6(PF_OUT, ifp, m, NULL, inp); 4243 CURVNET_RESTORE(); 4244 if (chk && *m) { 4245 m_freem(*m); 4246 *m = NULL; 4247 } 4248 return chk; 4249} 4250#endif /* INET6 */ 4251 4252static int 4253hook_pf(void) 4254{ 4255#ifdef INET 4256 struct pfil_head *pfh_inet; 4257#endif 4258#ifdef INET6 4259 struct pfil_head *pfh_inet6; 4260#endif 4261 4262 PF_ASSERT(MA_NOTOWNED); 4263 4264 if (V_pf_pfil_hooked) 4265 return (0); 4266 4267#ifdef INET 4268 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4269 if (pfh_inet == NULL) 4270 return (ESRCH); /* XXX */ 4271 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 4272 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 4273#endif 4274#ifdef INET6 4275 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 4276 if (pfh_inet6 == NULL) { 4277#ifdef INET 4278 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 4279 pfh_inet); 4280 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 4281 pfh_inet); 4282#endif 4283 return (ESRCH); /* XXX */ 4284 } 4285 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 4286 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 4287#endif 4288 4289 V_pf_pfil_hooked = 1; 4290 return (0); 4291} 4292 4293static int 4294dehook_pf(void) 4295{ 4296#ifdef INET 4297 struct pfil_head *pfh_inet; 4298#endif 4299#ifdef INET6 4300 struct pfil_head *pfh_inet6; 4301#endif 4302 4303 PF_ASSERT(MA_NOTOWNED); 4304 4305 if (V_pf_pfil_hooked == 0) 4306 return (0); 4307 4308#ifdef INET 4309 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 4310 if (pfh_inet == NULL) 4311 return (ESRCH); /* XXX */ 4312 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 4313 pfh_inet); 4314 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 4315 pfh_inet); 4316#endif 4317#ifdef INET6 4318 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 4319 if (pfh_inet6 == NULL) 4320 return (ESRCH); /* XXX */ 4321 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 4322 pfh_inet6); 4323 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 4324 pfh_inet6); 4325#endif 4326 4327 V_pf_pfil_hooked = 0; 4328 return (0); 4329} 4330 4331/* Vnet accessors */ 4332static int 4333vnet_pf_init(const void *unused) 4334{ 4335 4336 V_pf_pfil_hooked = 0; 4337 V_pf_end_threads = 0; 4338 4339 V_debug_pfugidhack = 0; 4340 4341 TAILQ_INIT(&V_pf_tags); 4342 TAILQ_INIT(&V_pf_qids); 4343 4344 pf_load(); 4345 4346 return (0); 4347} 4348 4349static int 4350vnet_pf_uninit(const void *unused) 4351{ 4352 4353 pf_unload(); 4354 4355 return (0); 4356} 4357 4358/* Define startup order. */ 4359#define PF_SYSINIT_ORDER SI_SUB_PROTO_BEGIN 4360#define PF_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */ 4361#define PF_VNET_ORDER (PF_MODEVENT_ORDER + 2) /* Later still. */ 4362 4363/* 4364 * Starting up. 4365 * VNET_SYSINIT is called for each existing vnet and each new vnet. 4366 */ 4367VNET_SYSINIT(vnet_pf_init, PF_SYSINIT_ORDER, PF_VNET_ORDER, 4368 vnet_pf_init, NULL); 4369 4370/* 4371 * Closing up shop. These are done in REVERSE ORDER, 4372 * Not called on reboot. 4373 * VNET_SYSUNINIT is called for each exiting vnet as it exits. 4374 */ 4375VNET_SYSUNINIT(vnet_pf_uninit, PF_SYSINIT_ORDER, PF_VNET_ORDER, 4376 vnet_pf_uninit, NULL); 4377 4378static int 4379pf_load(void) 4380{ 4381 4382 init_zone_var(); 4383 sx_init(&V_pf_consistency_lock, "pf_statetbl_lock"); 4384 init_pf_mutex(); 4385 if (pfattach() < 0) { 4386 destroy_pf_mutex(); 4387 return (ENOMEM); 4388 } 4389 4390 return (0); 4391} 4392 4393static int 4394pf_unload(void) 4395{ 4396 int error = 0; 4397 4398 PF_LOCK(); 4399 V_pf_status.running = 0; 4400 PF_UNLOCK(); 4401 error = dehook_pf(); 4402 if (error) { 4403 /* 4404 * Should not happen! 4405 * XXX Due to error code ESRCH, kldunload will show 4406 * a message like 'No such process'. 4407 */ 4408 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 4409 return error; 4410 } 4411 PF_LOCK(); 4412 shutdown_pf(); 4413 V_pf_end_threads = 1; 4414 while (V_pf_end_threads < 2) { 4415 wakeup_one(pf_purge_thread); 4416 msleep(pf_purge_thread, &V_pf_task_mtx, 0, "pftmo", hz); 4417 } 4418 pfi_cleanup(); 4419 pf_osfp_flush(); 4420 pf_osfp_cleanup(); 4421 cleanup_pf_zone(); 4422 PF_UNLOCK(); 4423 destroy_pf_mutex(); 4424 sx_destroy(&V_pf_consistency_lock); 4425 return error; 4426} 4427 4428static int 4429pf_modevent(module_t mod, int type, void *data) 4430{ 4431 int error = 0; 4432 4433 switch(type) { 4434 case MOD_LOAD: 4435 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 4436 break; 4437 case MOD_UNLOAD: 4438 destroy_dev(pf_dev); 4439 break; 4440 default: 4441 error = EINVAL; 4442 break; 4443 } 4444 return error; 4445} 4446 4447static moduledata_t pf_mod = { 4448 "pf", 4449 pf_modevent, 4450 0 4451}; 4452 4453DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST); 4454MODULE_VERSION(pf, PF_MODVER); 4455#endif /* __FreeBSD__ */ 4456