pf_ioctl.c revision 263026
1/*- 2 * Copyright (c) 2001 Daniel Hartmeier 3 * Copyright (c) 2002,2003 Henning Brauer 4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Effort sponsored in part by the Defense Advanced Research Projects 32 * Agency (DARPA) and Air Force Research Laboratory, Air Force 33 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 34 * 35 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 36 */ 37 38#include <sys/cdefs.h> 39__FBSDID("$FreeBSD: stable/10/sys/netpfil/pf/pf_ioctl.c 263026 2014-03-11 15:19:11Z glebius $"); 40 41#include "opt_inet.h" 42#include "opt_inet6.h" 43#include "opt_bpf.h" 44#include "opt_pf.h" 45 46#include <sys/param.h> 47#include <sys/bus.h> 48#include <sys/conf.h> 49#include <sys/endian.h> 50#include <sys/fcntl.h> 51#include <sys/filio.h> 52#include <sys/interrupt.h> 53#include <sys/jail.h> 54#include <sys/kernel.h> 55#include <sys/kthread.h> 56#include <sys/mbuf.h> 57#include <sys/module.h> 58#include <sys/proc.h> 59#include <sys/smp.h> 60#include <sys/socket.h> 61#include <sys/sysctl.h> 62#include <sys/md5.h> 63#include <sys/ucred.h> 64 65#include <net/if.h> 66#include <net/route.h> 67#include <net/pfil.h> 68#include <net/pfvar.h> 69#include <net/if_pfsync.h> 70#include <net/if_pflog.h> 71 72#include <netinet/in.h> 73#include <netinet/ip.h> 74#include <netinet/ip_var.h> 75#include <netinet/ip_icmp.h> 76 77#ifdef INET6 78#include <netinet/ip6.h> 79#endif /* INET6 */ 80 81#ifdef ALTQ 82#include <altq/altq.h> 83#endif 84 85static int pfattach(void); 86static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 87 u_int8_t, u_int8_t, u_int8_t); 88 89static void pf_mv_pool(struct pf_palist *, struct pf_palist *); 90static void pf_empty_pool(struct pf_palist *); 91static int pfioctl(struct cdev *, u_long, caddr_t, int, 92 struct thread *); 93#ifdef ALTQ 94static int pf_begin_altq(u_int32_t *); 95static int pf_rollback_altq(u_int32_t); 96static int pf_commit_altq(u_int32_t); 97static int pf_enable_altq(struct pf_altq *); 98static int pf_disable_altq(struct pf_altq *); 99static u_int32_t pf_qname2qid(char *); 100static void pf_qid_unref(u_int32_t); 101#endif /* ALTQ */ 102static int pf_begin_rules(u_int32_t *, int, const char *); 103static int pf_rollback_rules(u_int32_t, int, char *); 104static int pf_setup_pfsync_matching(struct pf_ruleset *); 105static void pf_hash_rule(MD5_CTX *, struct pf_rule *); 106static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 107static int pf_commit_rules(u_int32_t, int, char *); 108static int pf_addr_setup(struct pf_ruleset *, 109 struct pf_addr_wrap *, sa_family_t); 110static void pf_addr_copyout(struct pf_addr_wrap *); 111 112VNET_DEFINE(struct pf_rule, pf_default_rule); 113 114#ifdef ALTQ 115static VNET_DEFINE(int, pf_altq_running); 116#define V_pf_altq_running VNET(pf_altq_running) 117#endif 118 119#define TAGID_MAX 50000 120struct pf_tagname { 121 TAILQ_ENTRY(pf_tagname) entries; 122 char name[PF_TAG_NAME_SIZE]; 123 uint16_t tag; 124 int ref; 125}; 126 127TAILQ_HEAD(pf_tags, pf_tagname); 128#define V_pf_tags VNET(pf_tags) 129VNET_DEFINE(struct pf_tags, pf_tags); 130#define V_pf_qids VNET(pf_qids) 131VNET_DEFINE(struct pf_tags, pf_qids); 132static MALLOC_DEFINE(M_PFTAG, "pf_tag", "pf(4) tag names"); 133static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 134static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 135 136#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 137#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 138#endif 139 140static u_int16_t tagname2tag(struct pf_tags *, char *); 141static u_int16_t pf_tagname2tag(char *); 142static void tag_unref(struct pf_tags *, u_int16_t); 143 144#define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 145 146struct cdev *pf_dev; 147 148/* 149 * XXX - These are new and need to be checked when moveing to a new version 150 */ 151static void pf_clear_states(void); 152static int pf_clear_tables(void); 153static void pf_clear_srcnodes(struct pf_src_node *); 154static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 155static void pf_tbladdr_copyout(struct pf_addr_wrap *); 156 157/* 158 * Wrapper functions for pfil(9) hooks 159 */ 160#ifdef INET 161static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 162 int dir, struct inpcb *inp); 163static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 164 int dir, struct inpcb *inp); 165#endif 166#ifdef INET6 167static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 168 int dir, struct inpcb *inp); 169static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 170 int dir, struct inpcb *inp); 171#endif 172 173static int hook_pf(void); 174static int dehook_pf(void); 175static int shutdown_pf(void); 176static int pf_load(void); 177static int pf_unload(void); 178 179static struct cdevsw pf_cdevsw = { 180 .d_ioctl = pfioctl, 181 .d_name = PF_NAME, 182 .d_version = D_VERSION, 183}; 184 185static volatile VNET_DEFINE(int, pf_pfil_hooked); 186#define V_pf_pfil_hooked VNET(pf_pfil_hooked) 187VNET_DEFINE(int, pf_end_threads); 188 189struct rwlock pf_rules_lock; 190 191/* pfsync */ 192pfsync_state_import_t *pfsync_state_import_ptr = NULL; 193pfsync_insert_state_t *pfsync_insert_state_ptr = NULL; 194pfsync_update_state_t *pfsync_update_state_ptr = NULL; 195pfsync_delete_state_t *pfsync_delete_state_ptr = NULL; 196pfsync_clear_states_t *pfsync_clear_states_ptr = NULL; 197pfsync_defer_t *pfsync_defer_ptr = NULL; 198/* pflog */ 199pflog_packet_t *pflog_packet_ptr = NULL; 200 201static int 202pfattach(void) 203{ 204 u_int32_t *my_timeout = V_pf_default_rule.timeout; 205 int error; 206 207 pf_initialize(); 208 pfr_initialize(); 209 pfi_initialize(); 210 pf_normalize_init(); 211 212 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 213 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 214 215 RB_INIT(&V_pf_anchors); 216 pf_init_ruleset(&pf_main_ruleset); 217 218 /* default rule should never be garbage collected */ 219 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 220#ifdef PF_DEFAULT_TO_DROP 221 V_pf_default_rule.action = PF_DROP; 222#else 223 V_pf_default_rule.action = PF_PASS; 224#endif 225 V_pf_default_rule.nr = -1; 226 V_pf_default_rule.rtableid = -1; 227 228 /* initialize default timeouts */ 229 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 230 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 231 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 232 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 233 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 234 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 235 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 236 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 237 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 238 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 239 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 240 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 241 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 242 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 243 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 244 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 245 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 246 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 247 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 248 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 249 250 bzero(&V_pf_status, sizeof(V_pf_status)); 251 V_pf_status.debug = PF_DEBUG_URGENT; 252 253 V_pf_pfil_hooked = 0; 254 255 /* XXX do our best to avoid a conflict */ 256 V_pf_status.hostid = arc4random(); 257 258 if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0, 259 "pf purge")) != 0) 260 /* XXXGL: leaked all above. */ 261 return (error); 262 if ((error = swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET, 263 INTR_MPSAFE, &V_pf_swi_cookie)) != 0) 264 /* XXXGL: leaked all above. */ 265 return (error); 266 267 return (0); 268} 269 270static struct pf_pool * 271pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 272 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 273 u_int8_t check_ticket) 274{ 275 struct pf_ruleset *ruleset; 276 struct pf_rule *rule; 277 int rs_num; 278 279 ruleset = pf_find_ruleset(anchor); 280 if (ruleset == NULL) 281 return (NULL); 282 rs_num = pf_get_ruleset_number(rule_action); 283 if (rs_num >= PF_RULESET_MAX) 284 return (NULL); 285 if (active) { 286 if (check_ticket && ticket != 287 ruleset->rules[rs_num].active.ticket) 288 return (NULL); 289 if (r_last) 290 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 291 pf_rulequeue); 292 else 293 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 294 } else { 295 if (check_ticket && ticket != 296 ruleset->rules[rs_num].inactive.ticket) 297 return (NULL); 298 if (r_last) 299 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 300 pf_rulequeue); 301 else 302 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 303 } 304 if (!r_last) { 305 while ((rule != NULL) && (rule->nr != rule_number)) 306 rule = TAILQ_NEXT(rule, entries); 307 } 308 if (rule == NULL) 309 return (NULL); 310 311 return (&rule->rpool); 312} 313 314static void 315pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 316{ 317 struct pf_pooladdr *mv_pool_pa; 318 319 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 320 TAILQ_REMOVE(poola, mv_pool_pa, entries); 321 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 322 } 323} 324 325static void 326pf_empty_pool(struct pf_palist *poola) 327{ 328 struct pf_pooladdr *pa; 329 330 while ((pa = TAILQ_FIRST(poola)) != NULL) { 331 switch (pa->addr.type) { 332 case PF_ADDR_DYNIFTL: 333 pfi_dynaddr_remove(pa->addr.p.dyn); 334 break; 335 case PF_ADDR_TABLE: 336 pfr_detach_table(pa->addr.p.tbl); 337 break; 338 } 339 if (pa->kif) 340 pfi_kif_unref(pa->kif); 341 TAILQ_REMOVE(poola, pa, entries); 342 free(pa, M_PFRULE); 343 } 344} 345 346static void 347pf_unlink_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 348{ 349 350 PF_RULES_WASSERT(); 351 352 TAILQ_REMOVE(rulequeue, rule, entries); 353 354 PF_UNLNKDRULES_LOCK(); 355 rule->rule_flag |= PFRULE_REFS; 356 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 357 PF_UNLNKDRULES_UNLOCK(); 358} 359 360void 361pf_free_rule(struct pf_rule *rule) 362{ 363 364 PF_RULES_WASSERT(); 365 366 if (rule->tag) 367 tag_unref(&V_pf_tags, rule->tag); 368 if (rule->match_tag) 369 tag_unref(&V_pf_tags, rule->match_tag); 370#ifdef ALTQ 371 if (rule->pqid != rule->qid) 372 pf_qid_unref(rule->pqid); 373 pf_qid_unref(rule->qid); 374#endif 375 switch (rule->src.addr.type) { 376 case PF_ADDR_DYNIFTL: 377 pfi_dynaddr_remove(rule->src.addr.p.dyn); 378 break; 379 case PF_ADDR_TABLE: 380 pfr_detach_table(rule->src.addr.p.tbl); 381 break; 382 } 383 switch (rule->dst.addr.type) { 384 case PF_ADDR_DYNIFTL: 385 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 386 break; 387 case PF_ADDR_TABLE: 388 pfr_detach_table(rule->dst.addr.p.tbl); 389 break; 390 } 391 if (rule->overload_tbl) 392 pfr_detach_table(rule->overload_tbl); 393 if (rule->kif) 394 pfi_kif_unref(rule->kif); 395 pf_anchor_remove(rule); 396 pf_empty_pool(&rule->rpool.list); 397 free(rule, M_PFRULE); 398} 399 400static u_int16_t 401tagname2tag(struct pf_tags *head, char *tagname) 402{ 403 struct pf_tagname *tag, *p = NULL; 404 u_int16_t new_tagid = 1; 405 406 PF_RULES_WASSERT(); 407 408 TAILQ_FOREACH(tag, head, entries) 409 if (strcmp(tagname, tag->name) == 0) { 410 tag->ref++; 411 return (tag->tag); 412 } 413 414 /* 415 * to avoid fragmentation, we do a linear search from the beginning 416 * and take the first free slot we find. if there is none or the list 417 * is empty, append a new entry at the end. 418 */ 419 420 /* new entry */ 421 if (!TAILQ_EMPTY(head)) 422 for (p = TAILQ_FIRST(head); p != NULL && 423 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 424 new_tagid = p->tag + 1; 425 426 if (new_tagid > TAGID_MAX) 427 return (0); 428 429 /* allocate and fill new struct pf_tagname */ 430 tag = malloc(sizeof(*tag), M_PFTAG, M_NOWAIT|M_ZERO); 431 if (tag == NULL) 432 return (0); 433 strlcpy(tag->name, tagname, sizeof(tag->name)); 434 tag->tag = new_tagid; 435 tag->ref++; 436 437 if (p != NULL) /* insert new entry before p */ 438 TAILQ_INSERT_BEFORE(p, tag, entries); 439 else /* either list empty or no free slot in between */ 440 TAILQ_INSERT_TAIL(head, tag, entries); 441 442 return (tag->tag); 443} 444 445static void 446tag_unref(struct pf_tags *head, u_int16_t tag) 447{ 448 struct pf_tagname *p, *next; 449 450 PF_RULES_WASSERT(); 451 452 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 453 next = TAILQ_NEXT(p, entries); 454 if (tag == p->tag) { 455 if (--p->ref == 0) { 456 TAILQ_REMOVE(head, p, entries); 457 free(p, M_PFTAG); 458 } 459 break; 460 } 461 } 462} 463 464static u_int16_t 465pf_tagname2tag(char *tagname) 466{ 467 return (tagname2tag(&V_pf_tags, tagname)); 468} 469 470#ifdef ALTQ 471static u_int32_t 472pf_qname2qid(char *qname) 473{ 474 return ((u_int32_t)tagname2tag(&V_pf_qids, qname)); 475} 476 477static void 478pf_qid_unref(u_int32_t qid) 479{ 480 tag_unref(&V_pf_qids, (u_int16_t)qid); 481} 482 483static int 484pf_begin_altq(u_int32_t *ticket) 485{ 486 struct pf_altq *altq; 487 int error = 0; 488 489 PF_RULES_WASSERT(); 490 491 /* Purge the old altq list */ 492 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 493 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 494 if (altq->qname[0] == 0 && 495 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 496 /* detach and destroy the discipline */ 497 error = altq_remove(altq); 498 } else 499 pf_qid_unref(altq->qid); 500 free(altq, M_PFALTQ); 501 } 502 if (error) 503 return (error); 504 *ticket = ++V_ticket_altqs_inactive; 505 V_altqs_inactive_open = 1; 506 return (0); 507} 508 509static int 510pf_rollback_altq(u_int32_t ticket) 511{ 512 struct pf_altq *altq; 513 int error = 0; 514 515 PF_RULES_WASSERT(); 516 517 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 518 return (0); 519 /* Purge the old altq list */ 520 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 521 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 522 if (altq->qname[0] == 0 && 523 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 524 /* detach and destroy the discipline */ 525 error = altq_remove(altq); 526 } else 527 pf_qid_unref(altq->qid); 528 free(altq, M_PFALTQ); 529 } 530 V_altqs_inactive_open = 0; 531 return (error); 532} 533 534static int 535pf_commit_altq(u_int32_t ticket) 536{ 537 struct pf_altqqueue *old_altqs; 538 struct pf_altq *altq; 539 int err, error = 0; 540 541 PF_RULES_WASSERT(); 542 543 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 544 return (EBUSY); 545 546 /* swap altqs, keep the old. */ 547 old_altqs = V_pf_altqs_active; 548 V_pf_altqs_active = V_pf_altqs_inactive; 549 V_pf_altqs_inactive = old_altqs; 550 V_ticket_altqs_active = V_ticket_altqs_inactive; 551 552 /* Attach new disciplines */ 553 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 554 if (altq->qname[0] == 0 && 555 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 556 /* attach the discipline */ 557 error = altq_pfattach(altq); 558 if (error == 0 && V_pf_altq_running) 559 error = pf_enable_altq(altq); 560 if (error != 0) 561 return (error); 562 } 563 } 564 565 /* Purge the old altq list */ 566 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 567 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 568 if (altq->qname[0] == 0 && 569 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 570 /* detach and destroy the discipline */ 571 if (V_pf_altq_running) 572 error = pf_disable_altq(altq); 573 err = altq_pfdetach(altq); 574 if (err != 0 && error == 0) 575 error = err; 576 err = altq_remove(altq); 577 if (err != 0 && error == 0) 578 error = err; 579 } else 580 pf_qid_unref(altq->qid); 581 free(altq, M_PFALTQ); 582 } 583 584 V_altqs_inactive_open = 0; 585 return (error); 586} 587 588static int 589pf_enable_altq(struct pf_altq *altq) 590{ 591 struct ifnet *ifp; 592 struct tb_profile tb; 593 int error = 0; 594 595 if ((ifp = ifunit(altq->ifname)) == NULL) 596 return (EINVAL); 597 598 if (ifp->if_snd.altq_type != ALTQT_NONE) 599 error = altq_enable(&ifp->if_snd); 600 601 /* set tokenbucket regulator */ 602 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 603 tb.rate = altq->ifbandwidth; 604 tb.depth = altq->tbrsize; 605 error = tbr_set(&ifp->if_snd, &tb); 606 } 607 608 return (error); 609} 610 611static int 612pf_disable_altq(struct pf_altq *altq) 613{ 614 struct ifnet *ifp; 615 struct tb_profile tb; 616 int error; 617 618 if ((ifp = ifunit(altq->ifname)) == NULL) 619 return (EINVAL); 620 621 /* 622 * when the discipline is no longer referenced, it was overridden 623 * by a new one. if so, just return. 624 */ 625 if (altq->altq_disc != ifp->if_snd.altq_disc) 626 return (0); 627 628 error = altq_disable(&ifp->if_snd); 629 630 if (error == 0) { 631 /* clear tokenbucket regulator */ 632 tb.rate = 0; 633 error = tbr_set(&ifp->if_snd, &tb); 634 } 635 636 return (error); 637} 638 639void 640pf_altq_ifnet_event(struct ifnet *ifp, int remove) 641{ 642 struct ifnet *ifp1; 643 struct pf_altq *a1, *a2, *a3; 644 u_int32_t ticket; 645 int error = 0; 646 647 /* Interrupt userland queue modifications */ 648 if (V_altqs_inactive_open) 649 pf_rollback_altq(V_ticket_altqs_inactive); 650 651 /* Start new altq ruleset */ 652 if (pf_begin_altq(&ticket)) 653 return; 654 655 /* Copy the current active set */ 656 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 657 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 658 if (a2 == NULL) { 659 error = ENOMEM; 660 break; 661 } 662 bcopy(a1, a2, sizeof(struct pf_altq)); 663 664 if (a2->qname[0] != 0) { 665 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 666 error = EBUSY; 667 free(a2, M_PFALTQ); 668 break; 669 } 670 a2->altq_disc = NULL; 671 TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries) { 672 if (strncmp(a3->ifname, a2->ifname, 673 IFNAMSIZ) == 0 && a3->qname[0] == 0) { 674 a2->altq_disc = a3->altq_disc; 675 break; 676 } 677 } 678 } 679 /* Deactivate the interface in question */ 680 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 681 if ((ifp1 = ifunit(a2->ifname)) == NULL || 682 (remove && ifp1 == ifp)) { 683 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED; 684 } else { 685 error = altq_add(a2); 686 687 if (ticket != V_ticket_altqs_inactive) 688 error = EBUSY; 689 690 if (error) { 691 free(a2, M_PFALTQ); 692 break; 693 } 694 } 695 696 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 697 } 698 699 if (error != 0) 700 pf_rollback_altq(ticket); 701 else 702 pf_commit_altq(ticket); 703} 704#endif /* ALTQ */ 705 706static int 707pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 708{ 709 struct pf_ruleset *rs; 710 struct pf_rule *rule; 711 712 PF_RULES_WASSERT(); 713 714 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 715 return (EINVAL); 716 rs = pf_find_or_create_ruleset(anchor); 717 if (rs == NULL) 718 return (EINVAL); 719 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 720 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 721 rs->rules[rs_num].inactive.rcount--; 722 } 723 *ticket = ++rs->rules[rs_num].inactive.ticket; 724 rs->rules[rs_num].inactive.open = 1; 725 return (0); 726} 727 728static int 729pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 730{ 731 struct pf_ruleset *rs; 732 struct pf_rule *rule; 733 734 PF_RULES_WASSERT(); 735 736 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 737 return (EINVAL); 738 rs = pf_find_ruleset(anchor); 739 if (rs == NULL || !rs->rules[rs_num].inactive.open || 740 rs->rules[rs_num].inactive.ticket != ticket) 741 return (0); 742 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 743 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 744 rs->rules[rs_num].inactive.rcount--; 745 } 746 rs->rules[rs_num].inactive.open = 0; 747 return (0); 748} 749 750#define PF_MD5_UPD(st, elm) \ 751 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 752 753#define PF_MD5_UPD_STR(st, elm) \ 754 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 755 756#define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 757 (stor) = htonl((st)->elm); \ 758 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 759} while (0) 760 761#define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 762 (stor) = htons((st)->elm); \ 763 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 764} while (0) 765 766static void 767pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 768{ 769 PF_MD5_UPD(pfr, addr.type); 770 switch (pfr->addr.type) { 771 case PF_ADDR_DYNIFTL: 772 PF_MD5_UPD(pfr, addr.v.ifname); 773 PF_MD5_UPD(pfr, addr.iflags); 774 break; 775 case PF_ADDR_TABLE: 776 PF_MD5_UPD(pfr, addr.v.tblname); 777 break; 778 case PF_ADDR_ADDRMASK: 779 /* XXX ignore af? */ 780 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 781 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 782 break; 783 } 784 785 PF_MD5_UPD(pfr, port[0]); 786 PF_MD5_UPD(pfr, port[1]); 787 PF_MD5_UPD(pfr, neg); 788 PF_MD5_UPD(pfr, port_op); 789} 790 791static void 792pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 793{ 794 u_int16_t x; 795 u_int32_t y; 796 797 pf_hash_rule_addr(ctx, &rule->src); 798 pf_hash_rule_addr(ctx, &rule->dst); 799 PF_MD5_UPD_STR(rule, label); 800 PF_MD5_UPD_STR(rule, ifname); 801 PF_MD5_UPD_STR(rule, match_tagname); 802 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 803 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 804 PF_MD5_UPD_HTONL(rule, prob, y); 805 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 806 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 807 PF_MD5_UPD(rule, uid.op); 808 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 809 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 810 PF_MD5_UPD(rule, gid.op); 811 PF_MD5_UPD_HTONL(rule, rule_flag, y); 812 PF_MD5_UPD(rule, action); 813 PF_MD5_UPD(rule, direction); 814 PF_MD5_UPD(rule, af); 815 PF_MD5_UPD(rule, quick); 816 PF_MD5_UPD(rule, ifnot); 817 PF_MD5_UPD(rule, match_tag_not); 818 PF_MD5_UPD(rule, natpass); 819 PF_MD5_UPD(rule, keep_state); 820 PF_MD5_UPD(rule, proto); 821 PF_MD5_UPD(rule, type); 822 PF_MD5_UPD(rule, code); 823 PF_MD5_UPD(rule, flags); 824 PF_MD5_UPD(rule, flagset); 825 PF_MD5_UPD(rule, allow_opts); 826 PF_MD5_UPD(rule, rt); 827 PF_MD5_UPD(rule, tos); 828} 829 830static int 831pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 832{ 833 struct pf_ruleset *rs; 834 struct pf_rule *rule, **old_array; 835 struct pf_rulequeue *old_rules; 836 int error; 837 u_int32_t old_rcount; 838 839 PF_RULES_WASSERT(); 840 841 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 842 return (EINVAL); 843 rs = pf_find_ruleset(anchor); 844 if (rs == NULL || !rs->rules[rs_num].inactive.open || 845 ticket != rs->rules[rs_num].inactive.ticket) 846 return (EBUSY); 847 848 /* Calculate checksum for the main ruleset */ 849 if (rs == &pf_main_ruleset) { 850 error = pf_setup_pfsync_matching(rs); 851 if (error != 0) 852 return (error); 853 } 854 855 /* Swap rules, keep the old. */ 856 old_rules = rs->rules[rs_num].active.ptr; 857 old_rcount = rs->rules[rs_num].active.rcount; 858 old_array = rs->rules[rs_num].active.ptr_array; 859 860 rs->rules[rs_num].active.ptr = 861 rs->rules[rs_num].inactive.ptr; 862 rs->rules[rs_num].active.ptr_array = 863 rs->rules[rs_num].inactive.ptr_array; 864 rs->rules[rs_num].active.rcount = 865 rs->rules[rs_num].inactive.rcount; 866 rs->rules[rs_num].inactive.ptr = old_rules; 867 rs->rules[rs_num].inactive.ptr_array = old_array; 868 rs->rules[rs_num].inactive.rcount = old_rcount; 869 870 rs->rules[rs_num].active.ticket = 871 rs->rules[rs_num].inactive.ticket; 872 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 873 874 875 /* Purge the old rule list. */ 876 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 877 pf_unlink_rule(old_rules, rule); 878 if (rs->rules[rs_num].inactive.ptr_array) 879 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 880 rs->rules[rs_num].inactive.ptr_array = NULL; 881 rs->rules[rs_num].inactive.rcount = 0; 882 rs->rules[rs_num].inactive.open = 0; 883 pf_remove_if_empty_ruleset(rs); 884 885 return (0); 886} 887 888static int 889pf_setup_pfsync_matching(struct pf_ruleset *rs) 890{ 891 MD5_CTX ctx; 892 struct pf_rule *rule; 893 int rs_cnt; 894 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 895 896 MD5Init(&ctx); 897 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 898 /* XXX PF_RULESET_SCRUB as well? */ 899 if (rs_cnt == PF_RULESET_SCRUB) 900 continue; 901 902 if (rs->rules[rs_cnt].inactive.ptr_array) 903 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 904 rs->rules[rs_cnt].inactive.ptr_array = NULL; 905 906 if (rs->rules[rs_cnt].inactive.rcount) { 907 rs->rules[rs_cnt].inactive.ptr_array = 908 malloc(sizeof(caddr_t) * 909 rs->rules[rs_cnt].inactive.rcount, 910 M_TEMP, M_NOWAIT); 911 912 if (!rs->rules[rs_cnt].inactive.ptr_array) 913 return (ENOMEM); 914 } 915 916 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 917 entries) { 918 pf_hash_rule(&ctx, rule); 919 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 920 } 921 } 922 923 MD5Final(digest, &ctx); 924 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 925 return (0); 926} 927 928static int 929pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 930 sa_family_t af) 931{ 932 int error = 0; 933 934 switch (addr->type) { 935 case PF_ADDR_TABLE: 936 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 937 if (addr->p.tbl == NULL) 938 error = ENOMEM; 939 break; 940 case PF_ADDR_DYNIFTL: 941 error = pfi_dynaddr_setup(addr, af); 942 break; 943 } 944 945 return (error); 946} 947 948static void 949pf_addr_copyout(struct pf_addr_wrap *addr) 950{ 951 952 switch (addr->type) { 953 case PF_ADDR_DYNIFTL: 954 pfi_dynaddr_copyout(addr); 955 break; 956 case PF_ADDR_TABLE: 957 pf_tbladdr_copyout(addr); 958 break; 959 } 960} 961 962static int 963pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 964{ 965 int error = 0; 966 967 /* XXX keep in sync with switch() below */ 968 if (securelevel_gt(td->td_ucred, 2)) 969 switch (cmd) { 970 case DIOCGETRULES: 971 case DIOCGETRULE: 972 case DIOCGETADDRS: 973 case DIOCGETADDR: 974 case DIOCGETSTATE: 975 case DIOCSETSTATUSIF: 976 case DIOCGETSTATUS: 977 case DIOCCLRSTATUS: 978 case DIOCNATLOOK: 979 case DIOCSETDEBUG: 980 case DIOCGETSTATES: 981 case DIOCGETTIMEOUT: 982 case DIOCCLRRULECTRS: 983 case DIOCGETLIMIT: 984 case DIOCGETALTQS: 985 case DIOCGETALTQ: 986 case DIOCGETQSTATS: 987 case DIOCGETRULESETS: 988 case DIOCGETRULESET: 989 case DIOCRGETTABLES: 990 case DIOCRGETTSTATS: 991 case DIOCRCLRTSTATS: 992 case DIOCRCLRADDRS: 993 case DIOCRADDADDRS: 994 case DIOCRDELADDRS: 995 case DIOCRSETADDRS: 996 case DIOCRGETADDRS: 997 case DIOCRGETASTATS: 998 case DIOCRCLRASTATS: 999 case DIOCRTSTADDRS: 1000 case DIOCOSFPGET: 1001 case DIOCGETSRCNODES: 1002 case DIOCCLRSRCNODES: 1003 case DIOCIGETIFACES: 1004 case DIOCGIFSPEED: 1005 case DIOCSETIFFLAG: 1006 case DIOCCLRIFFLAG: 1007 break; 1008 case DIOCRCLRTABLES: 1009 case DIOCRADDTABLES: 1010 case DIOCRDELTABLES: 1011 case DIOCRSETTFLAGS: 1012 if (((struct pfioc_table *)addr)->pfrio_flags & 1013 PFR_FLAG_DUMMY) 1014 break; /* dummy operation ok */ 1015 return (EPERM); 1016 default: 1017 return (EPERM); 1018 } 1019 1020 if (!(flags & FWRITE)) 1021 switch (cmd) { 1022 case DIOCGETRULES: 1023 case DIOCGETADDRS: 1024 case DIOCGETADDR: 1025 case DIOCGETSTATE: 1026 case DIOCGETSTATUS: 1027 case DIOCGETSTATES: 1028 case DIOCGETTIMEOUT: 1029 case DIOCGETLIMIT: 1030 case DIOCGETALTQS: 1031 case DIOCGETALTQ: 1032 case DIOCGETQSTATS: 1033 case DIOCGETRULESETS: 1034 case DIOCGETRULESET: 1035 case DIOCNATLOOK: 1036 case DIOCRGETTABLES: 1037 case DIOCRGETTSTATS: 1038 case DIOCRGETADDRS: 1039 case DIOCRGETASTATS: 1040 case DIOCRTSTADDRS: 1041 case DIOCOSFPGET: 1042 case DIOCGETSRCNODES: 1043 case DIOCIGETIFACES: 1044 case DIOCGIFSPEED: 1045 break; 1046 case DIOCRCLRTABLES: 1047 case DIOCRADDTABLES: 1048 case DIOCRDELTABLES: 1049 case DIOCRCLRTSTATS: 1050 case DIOCRCLRADDRS: 1051 case DIOCRADDADDRS: 1052 case DIOCRDELADDRS: 1053 case DIOCRSETADDRS: 1054 case DIOCRSETTFLAGS: 1055 if (((struct pfioc_table *)addr)->pfrio_flags & 1056 PFR_FLAG_DUMMY) { 1057 flags |= FWRITE; /* need write lock for dummy */ 1058 break; /* dummy operation ok */ 1059 } 1060 return (EACCES); 1061 case DIOCGETRULE: 1062 if (((struct pfioc_rule *)addr)->action == 1063 PF_GET_CLR_CNTR) 1064 return (EACCES); 1065 break; 1066 default: 1067 return (EACCES); 1068 } 1069 1070 CURVNET_SET(TD_TO_VNET(td)); 1071 1072 switch (cmd) { 1073 case DIOCSTART: 1074 PF_RULES_WLOCK(); 1075 if (V_pf_status.running) 1076 error = EEXIST; 1077 else { 1078 int cpu; 1079 1080 PF_RULES_WUNLOCK(); 1081 error = hook_pf(); 1082 if (error) { 1083 DPFPRINTF(PF_DEBUG_MISC, 1084 ("pf: pfil registration failed\n")); 1085 break; 1086 } 1087 PF_RULES_WLOCK(); 1088 V_pf_status.running = 1; 1089 V_pf_status.since = time_second; 1090 1091 CPU_FOREACH(cpu) 1092 V_pf_stateid[cpu] = time_second; 1093 1094 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1095 } 1096 PF_RULES_WUNLOCK(); 1097 break; 1098 1099 case DIOCSTOP: 1100 PF_RULES_WLOCK(); 1101 if (!V_pf_status.running) 1102 error = ENOENT; 1103 else { 1104 V_pf_status.running = 0; 1105 PF_RULES_WUNLOCK(); 1106 error = dehook_pf(); 1107 if (error) { 1108 V_pf_status.running = 1; 1109 DPFPRINTF(PF_DEBUG_MISC, 1110 ("pf: pfil unregistration failed\n")); 1111 } 1112 PF_RULES_WLOCK(); 1113 V_pf_status.since = time_second; 1114 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1115 } 1116 PF_RULES_WUNLOCK(); 1117 break; 1118 1119 case DIOCADDRULE: { 1120 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1121 struct pf_ruleset *ruleset; 1122 struct pf_rule *rule, *tail; 1123 struct pf_pooladdr *pa; 1124 struct pfi_kif *kif = NULL; 1125 int rs_num; 1126 1127 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1128 error = EINVAL; 1129 break; 1130 } 1131#ifndef INET 1132 if (pr->rule.af == AF_INET) { 1133 error = EAFNOSUPPORT; 1134 break; 1135 } 1136#endif /* INET */ 1137#ifndef INET6 1138 if (pr->rule.af == AF_INET6) { 1139 error = EAFNOSUPPORT; 1140 break; 1141 } 1142#endif /* INET6 */ 1143 1144 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 1145 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1146 if (rule->ifname[0]) 1147 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); 1148 rule->cuid = td->td_ucred->cr_ruid; 1149 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1150 TAILQ_INIT(&rule->rpool.list); 1151 1152#define ERROUT(x) { error = (x); goto DIOCADDRULE_error; } 1153 1154 PF_RULES_WLOCK(); 1155 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1156 ruleset = pf_find_ruleset(pr->anchor); 1157 if (ruleset == NULL) 1158 ERROUT(EINVAL); 1159 rs_num = pf_get_ruleset_number(pr->rule.action); 1160 if (rs_num >= PF_RULESET_MAX) 1161 ERROUT(EINVAL); 1162 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1163 DPFPRINTF(PF_DEBUG_MISC, 1164 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num, 1165 ruleset->rules[rs_num].inactive.ticket)); 1166 ERROUT(EBUSY); 1167 } 1168 if (pr->pool_ticket != V_ticket_pabuf) { 1169 DPFPRINTF(PF_DEBUG_MISC, 1170 ("pool_ticket: %d != %d\n", pr->pool_ticket, 1171 V_ticket_pabuf)); 1172 ERROUT(EBUSY); 1173 } 1174 1175 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1176 pf_rulequeue); 1177 if (tail) 1178 rule->nr = tail->nr + 1; 1179 else 1180 rule->nr = 0; 1181 if (rule->ifname[0]) { 1182 rule->kif = pfi_kif_attach(kif, rule->ifname); 1183 pfi_kif_ref(rule->kif); 1184 } else 1185 rule->kif = NULL; 1186 1187 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 1188 error = EBUSY; 1189 1190#ifdef ALTQ 1191 /* set queue IDs */ 1192 if (rule->qname[0] != 0) { 1193 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1194 error = EBUSY; 1195 else if (rule->pqname[0] != 0) { 1196 if ((rule->pqid = 1197 pf_qname2qid(rule->pqname)) == 0) 1198 error = EBUSY; 1199 } else 1200 rule->pqid = rule->qid; 1201 } 1202#endif 1203 if (rule->tagname[0]) 1204 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1205 error = EBUSY; 1206 if (rule->match_tagname[0]) 1207 if ((rule->match_tag = 1208 pf_tagname2tag(rule->match_tagname)) == 0) 1209 error = EBUSY; 1210 if (rule->rt && !rule->direction) 1211 error = EINVAL; 1212 if (!rule->log) 1213 rule->logif = 0; 1214 if (rule->logif >= PFLOGIFS_MAX) 1215 error = EINVAL; 1216 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1217 error = ENOMEM; 1218 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1219 error = ENOMEM; 1220 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1221 error = EINVAL; 1222 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 1223 if (pa->addr.type == PF_ADDR_TABLE) { 1224 pa->addr.p.tbl = pfr_attach_table(ruleset, 1225 pa->addr.v.tblname); 1226 if (pa->addr.p.tbl == NULL) 1227 error = ENOMEM; 1228 } 1229 1230 if (rule->overload_tblname[0]) { 1231 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1232 rule->overload_tblname)) == NULL) 1233 error = EINVAL; 1234 else 1235 rule->overload_tbl->pfrkt_flags |= 1236 PFR_TFLAG_ACTIVE; 1237 } 1238 1239 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list); 1240 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1241 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1242 (rule->rt > PF_FASTROUTE)) && 1243 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1244 error = EINVAL; 1245 1246 if (error) { 1247 pf_free_rule(rule); 1248 PF_RULES_WUNLOCK(); 1249 break; 1250 } 1251 1252 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1253 rule->evaluations = rule->packets[0] = rule->packets[1] = 1254 rule->bytes[0] = rule->bytes[1] = 0; 1255 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1256 rule, entries); 1257 ruleset->rules[rs_num].inactive.rcount++; 1258 PF_RULES_WUNLOCK(); 1259 break; 1260 1261#undef ERROUT 1262DIOCADDRULE_error: 1263 PF_RULES_WUNLOCK(); 1264 free(rule, M_PFRULE); 1265 if (kif) 1266 free(kif, PFI_MTYPE); 1267 break; 1268 } 1269 1270 case DIOCGETRULES: { 1271 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1272 struct pf_ruleset *ruleset; 1273 struct pf_rule *tail; 1274 int rs_num; 1275 1276 PF_RULES_WLOCK(); 1277 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1278 ruleset = pf_find_ruleset(pr->anchor); 1279 if (ruleset == NULL) { 1280 PF_RULES_WUNLOCK(); 1281 error = EINVAL; 1282 break; 1283 } 1284 rs_num = pf_get_ruleset_number(pr->rule.action); 1285 if (rs_num >= PF_RULESET_MAX) { 1286 PF_RULES_WUNLOCK(); 1287 error = EINVAL; 1288 break; 1289 } 1290 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1291 pf_rulequeue); 1292 if (tail) 1293 pr->nr = tail->nr + 1; 1294 else 1295 pr->nr = 0; 1296 pr->ticket = ruleset->rules[rs_num].active.ticket; 1297 PF_RULES_WUNLOCK(); 1298 break; 1299 } 1300 1301 case DIOCGETRULE: { 1302 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1303 struct pf_ruleset *ruleset; 1304 struct pf_rule *rule; 1305 int rs_num, i; 1306 1307 PF_RULES_WLOCK(); 1308 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1309 ruleset = pf_find_ruleset(pr->anchor); 1310 if (ruleset == NULL) { 1311 PF_RULES_WUNLOCK(); 1312 error = EINVAL; 1313 break; 1314 } 1315 rs_num = pf_get_ruleset_number(pr->rule.action); 1316 if (rs_num >= PF_RULESET_MAX) { 1317 PF_RULES_WUNLOCK(); 1318 error = EINVAL; 1319 break; 1320 } 1321 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1322 PF_RULES_WUNLOCK(); 1323 error = EBUSY; 1324 break; 1325 } 1326 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1327 while ((rule != NULL) && (rule->nr != pr->nr)) 1328 rule = TAILQ_NEXT(rule, entries); 1329 if (rule == NULL) { 1330 PF_RULES_WUNLOCK(); 1331 error = EBUSY; 1332 break; 1333 } 1334 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1335 if (pf_anchor_copyout(ruleset, rule, pr)) { 1336 PF_RULES_WUNLOCK(); 1337 error = EBUSY; 1338 break; 1339 } 1340 pf_addr_copyout(&pr->rule.src.addr); 1341 pf_addr_copyout(&pr->rule.dst.addr); 1342 for (i = 0; i < PF_SKIP_COUNT; ++i) 1343 if (rule->skip[i].ptr == NULL) 1344 pr->rule.skip[i].nr = -1; 1345 else 1346 pr->rule.skip[i].nr = 1347 rule->skip[i].ptr->nr; 1348 1349 if (pr->action == PF_GET_CLR_CNTR) { 1350 rule->evaluations = 0; 1351 rule->packets[0] = rule->packets[1] = 0; 1352 rule->bytes[0] = rule->bytes[1] = 0; 1353 rule->states_tot = 0; 1354 } 1355 PF_RULES_WUNLOCK(); 1356 break; 1357 } 1358 1359 case DIOCCHANGERULE: { 1360 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1361 struct pf_ruleset *ruleset; 1362 struct pf_rule *oldrule = NULL, *newrule = NULL; 1363 struct pfi_kif *kif = NULL; 1364 struct pf_pooladdr *pa; 1365 u_int32_t nr = 0; 1366 int rs_num; 1367 1368 if (pcr->action < PF_CHANGE_ADD_HEAD || 1369 pcr->action > PF_CHANGE_GET_TICKET) { 1370 error = EINVAL; 1371 break; 1372 } 1373 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1374 error = EINVAL; 1375 break; 1376 } 1377 1378 if (pcr->action != PF_CHANGE_REMOVE) { 1379#ifndef INET 1380 if (pcr->rule.af == AF_INET) { 1381 error = EAFNOSUPPORT; 1382 break; 1383 } 1384#endif /* INET */ 1385#ifndef INET6 1386 if (pcr->rule.af == AF_INET6) { 1387 error = EAFNOSUPPORT; 1388 break; 1389 } 1390#endif /* INET6 */ 1391 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK); 1392 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1393 newrule->cuid = td->td_ucred->cr_ruid; 1394 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1395 TAILQ_INIT(&newrule->rpool.list); 1396 /* Initialize refcounting. */ 1397 newrule->states_cur = 0; 1398 newrule->entries.tqe_prev = NULL; 1399 1400 if (newrule->ifname[0]) 1401 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); 1402 } 1403 1404#define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; } 1405 1406 PF_RULES_WLOCK(); 1407 if (!(pcr->action == PF_CHANGE_REMOVE || 1408 pcr->action == PF_CHANGE_GET_TICKET) && 1409 pcr->pool_ticket != V_ticket_pabuf) 1410 ERROUT(EBUSY); 1411 1412 ruleset = pf_find_ruleset(pcr->anchor); 1413 if (ruleset == NULL) 1414 ERROUT(EINVAL); 1415 1416 rs_num = pf_get_ruleset_number(pcr->rule.action); 1417 if (rs_num >= PF_RULESET_MAX) 1418 ERROUT(EINVAL); 1419 1420 if (pcr->action == PF_CHANGE_GET_TICKET) { 1421 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1422 ERROUT(0); 1423 } else if (pcr->ticket != 1424 ruleset->rules[rs_num].active.ticket) 1425 ERROUT(EINVAL); 1426 1427 if (pcr->action != PF_CHANGE_REMOVE) { 1428 if (newrule->ifname[0]) { 1429 newrule->kif = pfi_kif_attach(kif, 1430 newrule->ifname); 1431 pfi_kif_ref(newrule->kif); 1432 } else 1433 newrule->kif = NULL; 1434 1435 if (newrule->rtableid > 0 && 1436 newrule->rtableid >= rt_numfibs) 1437 error = EBUSY; 1438 1439#ifdef ALTQ 1440 /* set queue IDs */ 1441 if (newrule->qname[0] != 0) { 1442 if ((newrule->qid = 1443 pf_qname2qid(newrule->qname)) == 0) 1444 error = EBUSY; 1445 else if (newrule->pqname[0] != 0) { 1446 if ((newrule->pqid = 1447 pf_qname2qid(newrule->pqname)) == 0) 1448 error = EBUSY; 1449 } else 1450 newrule->pqid = newrule->qid; 1451 } 1452#endif /* ALTQ */ 1453 if (newrule->tagname[0]) 1454 if ((newrule->tag = 1455 pf_tagname2tag(newrule->tagname)) == 0) 1456 error = EBUSY; 1457 if (newrule->match_tagname[0]) 1458 if ((newrule->match_tag = pf_tagname2tag( 1459 newrule->match_tagname)) == 0) 1460 error = EBUSY; 1461 if (newrule->rt && !newrule->direction) 1462 error = EINVAL; 1463 if (!newrule->log) 1464 newrule->logif = 0; 1465 if (newrule->logif >= PFLOGIFS_MAX) 1466 error = EINVAL; 1467 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 1468 error = ENOMEM; 1469 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 1470 error = ENOMEM; 1471 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1472 error = EINVAL; 1473 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 1474 if (pa->addr.type == PF_ADDR_TABLE) { 1475 pa->addr.p.tbl = 1476 pfr_attach_table(ruleset, 1477 pa->addr.v.tblname); 1478 if (pa->addr.p.tbl == NULL) 1479 error = ENOMEM; 1480 } 1481 1482 if (newrule->overload_tblname[0]) { 1483 if ((newrule->overload_tbl = pfr_attach_table( 1484 ruleset, newrule->overload_tblname)) == 1485 NULL) 1486 error = EINVAL; 1487 else 1488 newrule->overload_tbl->pfrkt_flags |= 1489 PFR_TFLAG_ACTIVE; 1490 } 1491 1492 pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list); 1493 if (((((newrule->action == PF_NAT) || 1494 (newrule->action == PF_RDR) || 1495 (newrule->action == PF_BINAT) || 1496 (newrule->rt > PF_FASTROUTE)) && 1497 !newrule->anchor)) && 1498 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1499 error = EINVAL; 1500 1501 if (error) { 1502 pf_free_rule(newrule); 1503 PF_RULES_WUNLOCK(); 1504 break; 1505 } 1506 1507 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1508 newrule->evaluations = 0; 1509 newrule->packets[0] = newrule->packets[1] = 0; 1510 newrule->bytes[0] = newrule->bytes[1] = 0; 1511 } 1512 pf_empty_pool(&V_pf_pabuf); 1513 1514 if (pcr->action == PF_CHANGE_ADD_HEAD) 1515 oldrule = TAILQ_FIRST( 1516 ruleset->rules[rs_num].active.ptr); 1517 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1518 oldrule = TAILQ_LAST( 1519 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1520 else { 1521 oldrule = TAILQ_FIRST( 1522 ruleset->rules[rs_num].active.ptr); 1523 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1524 oldrule = TAILQ_NEXT(oldrule, entries); 1525 if (oldrule == NULL) { 1526 if (newrule != NULL) 1527 pf_free_rule(newrule); 1528 PF_RULES_WUNLOCK(); 1529 error = EINVAL; 1530 break; 1531 } 1532 } 1533 1534 if (pcr->action == PF_CHANGE_REMOVE) { 1535 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 1536 oldrule); 1537 ruleset->rules[rs_num].active.rcount--; 1538 } else { 1539 if (oldrule == NULL) 1540 TAILQ_INSERT_TAIL( 1541 ruleset->rules[rs_num].active.ptr, 1542 newrule, entries); 1543 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1544 pcr->action == PF_CHANGE_ADD_BEFORE) 1545 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1546 else 1547 TAILQ_INSERT_AFTER( 1548 ruleset->rules[rs_num].active.ptr, 1549 oldrule, newrule, entries); 1550 ruleset->rules[rs_num].active.rcount++; 1551 } 1552 1553 nr = 0; 1554 TAILQ_FOREACH(oldrule, 1555 ruleset->rules[rs_num].active.ptr, entries) 1556 oldrule->nr = nr++; 1557 1558 ruleset->rules[rs_num].active.ticket++; 1559 1560 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1561 pf_remove_if_empty_ruleset(ruleset); 1562 1563 PF_RULES_WUNLOCK(); 1564 break; 1565 1566#undef ERROUT 1567DIOCCHANGERULE_error: 1568 PF_RULES_WUNLOCK(); 1569 if (newrule != NULL) 1570 free(newrule, M_PFRULE); 1571 if (kif != NULL) 1572 free(kif, PFI_MTYPE); 1573 break; 1574 } 1575 1576 case DIOCCLRSTATES: { 1577 struct pf_state *s; 1578 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1579 u_int i, killed = 0; 1580 1581 for (i = 0; i <= V_pf_hashmask; i++) { 1582 struct pf_idhash *ih = &V_pf_idhash[i]; 1583 1584relock_DIOCCLRSTATES: 1585 PF_HASHROW_LOCK(ih); 1586 LIST_FOREACH(s, &ih->states, entry) 1587 if (!psk->psk_ifname[0] || 1588 !strcmp(psk->psk_ifname, 1589 s->kif->pfik_name)) { 1590 /* 1591 * Don't send out individual 1592 * delete messages. 1593 */ 1594 s->state_flags |= PFSTATE_NOSYNC; 1595 pf_unlink_state(s, PF_ENTER_LOCKED); 1596 killed++; 1597 goto relock_DIOCCLRSTATES; 1598 } 1599 PF_HASHROW_UNLOCK(ih); 1600 } 1601 psk->psk_killed = killed; 1602 if (pfsync_clear_states_ptr != NULL) 1603 pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname); 1604 break; 1605 } 1606 1607 case DIOCKILLSTATES: { 1608 struct pf_state *s; 1609 struct pf_state_key *sk; 1610 struct pf_addr *srcaddr, *dstaddr; 1611 u_int16_t srcport, dstport; 1612 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1613 u_int i, killed = 0; 1614 1615 if (psk->psk_pfcmp.id) { 1616 if (psk->psk_pfcmp.creatorid == 0) 1617 psk->psk_pfcmp.creatorid = V_pf_status.hostid; 1618 if ((s = pf_find_state_byid(psk->psk_pfcmp.id, 1619 psk->psk_pfcmp.creatorid))) { 1620 pf_unlink_state(s, PF_ENTER_LOCKED); 1621 psk->psk_killed = 1; 1622 } 1623 break; 1624 } 1625 1626 for (i = 0; i <= V_pf_hashmask; i++) { 1627 struct pf_idhash *ih = &V_pf_idhash[i]; 1628 1629relock_DIOCKILLSTATES: 1630 PF_HASHROW_LOCK(ih); 1631 LIST_FOREACH(s, &ih->states, entry) { 1632 sk = s->key[PF_SK_WIRE]; 1633 if (s->direction == PF_OUT) { 1634 srcaddr = &sk->addr[1]; 1635 dstaddr = &sk->addr[0]; 1636 srcport = sk->port[0]; 1637 dstport = sk->port[0]; 1638 } else { 1639 srcaddr = &sk->addr[0]; 1640 dstaddr = &sk->addr[1]; 1641 srcport = sk->port[0]; 1642 dstport = sk->port[0]; 1643 } 1644 1645 if ((!psk->psk_af || sk->af == psk->psk_af) 1646 && (!psk->psk_proto || psk->psk_proto == 1647 sk->proto) && 1648 PF_MATCHA(psk->psk_src.neg, 1649 &psk->psk_src.addr.v.a.addr, 1650 &psk->psk_src.addr.v.a.mask, 1651 srcaddr, sk->af) && 1652 PF_MATCHA(psk->psk_dst.neg, 1653 &psk->psk_dst.addr.v.a.addr, 1654 &psk->psk_dst.addr.v.a.mask, 1655 dstaddr, sk->af) && 1656 (psk->psk_src.port_op == 0 || 1657 pf_match_port(psk->psk_src.port_op, 1658 psk->psk_src.port[0], psk->psk_src.port[1], 1659 srcport)) && 1660 (psk->psk_dst.port_op == 0 || 1661 pf_match_port(psk->psk_dst.port_op, 1662 psk->psk_dst.port[0], psk->psk_dst.port[1], 1663 dstport)) && 1664 (!psk->psk_label[0] || 1665 (s->rule.ptr->label[0] && 1666 !strcmp(psk->psk_label, 1667 s->rule.ptr->label))) && 1668 (!psk->psk_ifname[0] || 1669 !strcmp(psk->psk_ifname, 1670 s->kif->pfik_name))) { 1671 pf_unlink_state(s, PF_ENTER_LOCKED); 1672 killed++; 1673 goto relock_DIOCKILLSTATES; 1674 } 1675 } 1676 PF_HASHROW_UNLOCK(ih); 1677 } 1678 psk->psk_killed = killed; 1679 break; 1680 } 1681 1682 case DIOCADDSTATE: { 1683 struct pfioc_state *ps = (struct pfioc_state *)addr; 1684 struct pfsync_state *sp = &ps->state; 1685 1686 if (sp->timeout >= PFTM_MAX) { 1687 error = EINVAL; 1688 break; 1689 } 1690 if (pfsync_state_import_ptr != NULL) { 1691 PF_RULES_RLOCK(); 1692 error = pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 1693 PF_RULES_RUNLOCK(); 1694 } else 1695 error = EOPNOTSUPP; 1696 break; 1697 } 1698 1699 case DIOCGETSTATE: { 1700 struct pfioc_state *ps = (struct pfioc_state *)addr; 1701 struct pf_state *s; 1702 1703 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 1704 if (s == NULL) { 1705 error = ENOENT; 1706 break; 1707 } 1708 1709 pfsync_state_export(&ps->state, s); 1710 PF_STATE_UNLOCK(s); 1711 break; 1712 } 1713 1714 case DIOCGETSTATES: { 1715 struct pfioc_states *ps = (struct pfioc_states *)addr; 1716 struct pf_state *s; 1717 struct pfsync_state *pstore, *p; 1718 int i, nr; 1719 1720 if (ps->ps_len == 0) { 1721 nr = uma_zone_get_cur(V_pf_state_z); 1722 ps->ps_len = sizeof(struct pfsync_state) * nr; 1723 break; 1724 } 1725 1726 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK); 1727 nr = 0; 1728 1729 for (i = 0; i <= V_pf_hashmask; i++) { 1730 struct pf_idhash *ih = &V_pf_idhash[i]; 1731 1732 PF_HASHROW_LOCK(ih); 1733 LIST_FOREACH(s, &ih->states, entry) { 1734 1735 if (s->timeout == PFTM_UNLINKED) 1736 continue; 1737 1738 if ((nr+1) * sizeof(*p) > ps->ps_len) { 1739 PF_HASHROW_UNLOCK(ih); 1740 goto DIOCGETSTATES_full; 1741 } 1742 pfsync_state_export(p, s); 1743 p++; 1744 nr++; 1745 } 1746 PF_HASHROW_UNLOCK(ih); 1747 } 1748DIOCGETSTATES_full: 1749 error = copyout(pstore, ps->ps_states, 1750 sizeof(struct pfsync_state) * nr); 1751 if (error) { 1752 free(pstore, M_TEMP); 1753 break; 1754 } 1755 ps->ps_len = sizeof(struct pfsync_state) * nr; 1756 free(pstore, M_TEMP); 1757 1758 break; 1759 } 1760 1761 case DIOCGETSTATUS: { 1762 struct pf_status *s = (struct pf_status *)addr; 1763 PF_RULES_RLOCK(); 1764 bcopy(&V_pf_status, s, sizeof(struct pf_status)); 1765 pfi_update_status(s->ifname, s); 1766 PF_RULES_RUNLOCK(); 1767 break; 1768 } 1769 1770 case DIOCSETSTATUSIF: { 1771 struct pfioc_if *pi = (struct pfioc_if *)addr; 1772 1773 if (pi->ifname[0] == 0) { 1774 bzero(V_pf_status.ifname, IFNAMSIZ); 1775 break; 1776 } 1777 PF_RULES_WLOCK(); 1778 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 1779 PF_RULES_WUNLOCK(); 1780 break; 1781 } 1782 1783 case DIOCCLRSTATUS: { 1784 PF_RULES_WLOCK(); 1785 bzero(V_pf_status.counters, sizeof(V_pf_status.counters)); 1786 bzero(V_pf_status.fcounters, sizeof(V_pf_status.fcounters)); 1787 bzero(V_pf_status.scounters, sizeof(V_pf_status.scounters)); 1788 V_pf_status.since = time_second; 1789 if (*V_pf_status.ifname) 1790 pfi_update_status(V_pf_status.ifname, NULL); 1791 PF_RULES_WUNLOCK(); 1792 break; 1793 } 1794 1795 case DIOCNATLOOK: { 1796 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1797 struct pf_state_key *sk; 1798 struct pf_state *state; 1799 struct pf_state_key_cmp key; 1800 int m = 0, direction = pnl->direction; 1801 int sidx, didx; 1802 1803 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 1804 sidx = (direction == PF_IN) ? 1 : 0; 1805 didx = (direction == PF_IN) ? 0 : 1; 1806 1807 if (!pnl->proto || 1808 PF_AZERO(&pnl->saddr, pnl->af) || 1809 PF_AZERO(&pnl->daddr, pnl->af) || 1810 ((pnl->proto == IPPROTO_TCP || 1811 pnl->proto == IPPROTO_UDP) && 1812 (!pnl->dport || !pnl->sport))) 1813 error = EINVAL; 1814 else { 1815 bzero(&key, sizeof(key)); 1816 key.af = pnl->af; 1817 key.proto = pnl->proto; 1818 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 1819 key.port[sidx] = pnl->sport; 1820 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 1821 key.port[didx] = pnl->dport; 1822 1823 state = pf_find_state_all(&key, direction, &m); 1824 1825 if (m > 1) 1826 error = E2BIG; /* more than one state */ 1827 else if (state != NULL) { 1828 /* XXXGL: not locked read */ 1829 sk = state->key[sidx]; 1830 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 1831 pnl->rsport = sk->port[sidx]; 1832 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 1833 pnl->rdport = sk->port[didx]; 1834 } else 1835 error = ENOENT; 1836 } 1837 break; 1838 } 1839 1840 case DIOCSETTIMEOUT: { 1841 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1842 int old; 1843 1844 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1845 pt->seconds < 0) { 1846 error = EINVAL; 1847 break; 1848 } 1849 PF_RULES_WLOCK(); 1850 old = V_pf_default_rule.timeout[pt->timeout]; 1851 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1852 pt->seconds = 1; 1853 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 1854 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 1855 wakeup(pf_purge_thread); 1856 pt->seconds = old; 1857 PF_RULES_WUNLOCK(); 1858 break; 1859 } 1860 1861 case DIOCGETTIMEOUT: { 1862 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1863 1864 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1865 error = EINVAL; 1866 break; 1867 } 1868 PF_RULES_RLOCK(); 1869 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 1870 PF_RULES_RUNLOCK(); 1871 break; 1872 } 1873 1874 case DIOCGETLIMIT: { 1875 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1876 1877 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1878 error = EINVAL; 1879 break; 1880 } 1881 PF_RULES_RLOCK(); 1882 pl->limit = V_pf_limits[pl->index].limit; 1883 PF_RULES_RUNLOCK(); 1884 break; 1885 } 1886 1887 case DIOCSETLIMIT: { 1888 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1889 int old_limit; 1890 1891 PF_RULES_WLOCK(); 1892 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1893 V_pf_limits[pl->index].zone == NULL) { 1894 PF_RULES_WUNLOCK(); 1895 error = EINVAL; 1896 break; 1897 } 1898 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 1899 old_limit = V_pf_limits[pl->index].limit; 1900 V_pf_limits[pl->index].limit = pl->limit; 1901 pl->limit = old_limit; 1902 PF_RULES_WUNLOCK(); 1903 break; 1904 } 1905 1906 case DIOCSETDEBUG: { 1907 u_int32_t *level = (u_int32_t *)addr; 1908 1909 PF_RULES_WLOCK(); 1910 V_pf_status.debug = *level; 1911 PF_RULES_WUNLOCK(); 1912 break; 1913 } 1914 1915 case DIOCCLRRULECTRS: { 1916 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 1917 struct pf_ruleset *ruleset = &pf_main_ruleset; 1918 struct pf_rule *rule; 1919 1920 PF_RULES_WLOCK(); 1921 TAILQ_FOREACH(rule, 1922 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 1923 rule->evaluations = 0; 1924 rule->packets[0] = rule->packets[1] = 0; 1925 rule->bytes[0] = rule->bytes[1] = 0; 1926 } 1927 PF_RULES_WUNLOCK(); 1928 break; 1929 } 1930 1931 case DIOCGIFSPEED: { 1932 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 1933 struct pf_ifspeed ps; 1934 struct ifnet *ifp; 1935 1936 if (psp->ifname[0] != 0) { 1937 /* Can we completely trust user-land? */ 1938 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 1939 ifp = ifunit(ps.ifname); 1940 if (ifp != NULL) 1941 psp->baudrate = ifp->if_baudrate; 1942 else 1943 error = EINVAL; 1944 } else 1945 error = EINVAL; 1946 break; 1947 } 1948 1949#ifdef ALTQ 1950 case DIOCSTARTALTQ: { 1951 struct pf_altq *altq; 1952 1953 PF_RULES_WLOCK(); 1954 /* enable all altq interfaces on active list */ 1955 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1956 if (altq->qname[0] == 0 && (altq->local_flags & 1957 PFALTQ_FLAG_IF_REMOVED) == 0) { 1958 error = pf_enable_altq(altq); 1959 if (error != 0) 1960 break; 1961 } 1962 } 1963 if (error == 0) 1964 V_pf_altq_running = 1; 1965 PF_RULES_WUNLOCK(); 1966 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 1967 break; 1968 } 1969 1970 case DIOCSTOPALTQ: { 1971 struct pf_altq *altq; 1972 1973 PF_RULES_WLOCK(); 1974 /* disable all altq interfaces on active list */ 1975 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1976 if (altq->qname[0] == 0 && (altq->local_flags & 1977 PFALTQ_FLAG_IF_REMOVED) == 0) { 1978 error = pf_disable_altq(altq); 1979 if (error != 0) 1980 break; 1981 } 1982 } 1983 if (error == 0) 1984 V_pf_altq_running = 0; 1985 PF_RULES_WUNLOCK(); 1986 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 1987 break; 1988 } 1989 1990 case DIOCADDALTQ: { 1991 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 1992 struct pf_altq *altq, *a; 1993 struct ifnet *ifp; 1994 1995 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK); 1996 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 1997 altq->local_flags = 0; 1998 1999 PF_RULES_WLOCK(); 2000 if (pa->ticket != V_ticket_altqs_inactive) { 2001 PF_RULES_WUNLOCK(); 2002 free(altq, M_PFALTQ); 2003 error = EBUSY; 2004 break; 2005 } 2006 2007 /* 2008 * if this is for a queue, find the discipline and 2009 * copy the necessary fields 2010 */ 2011 if (altq->qname[0] != 0) { 2012 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2013 PF_RULES_WUNLOCK(); 2014 error = EBUSY; 2015 free(altq, M_PFALTQ); 2016 break; 2017 } 2018 altq->altq_disc = NULL; 2019 TAILQ_FOREACH(a, V_pf_altqs_inactive, entries) { 2020 if (strncmp(a->ifname, altq->ifname, 2021 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2022 altq->altq_disc = a->altq_disc; 2023 break; 2024 } 2025 } 2026 } 2027 2028 if ((ifp = ifunit(altq->ifname)) == NULL) 2029 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 2030 else 2031 error = altq_add(altq); 2032 2033 if (error) { 2034 PF_RULES_WUNLOCK(); 2035 free(altq, M_PFALTQ); 2036 break; 2037 } 2038 2039 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 2040 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2041 PF_RULES_WUNLOCK(); 2042 break; 2043 } 2044 2045 case DIOCGETALTQS: { 2046 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2047 struct pf_altq *altq; 2048 2049 PF_RULES_RLOCK(); 2050 pa->nr = 0; 2051 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 2052 pa->nr++; 2053 pa->ticket = V_ticket_altqs_active; 2054 PF_RULES_RUNLOCK(); 2055 break; 2056 } 2057 2058 case DIOCGETALTQ: { 2059 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2060 struct pf_altq *altq; 2061 u_int32_t nr; 2062 2063 PF_RULES_RLOCK(); 2064 if (pa->ticket != V_ticket_altqs_active) { 2065 PF_RULES_RUNLOCK(); 2066 error = EBUSY; 2067 break; 2068 } 2069 nr = 0; 2070 altq = TAILQ_FIRST(V_pf_altqs_active); 2071 while ((altq != NULL) && (nr < pa->nr)) { 2072 altq = TAILQ_NEXT(altq, entries); 2073 nr++; 2074 } 2075 if (altq == NULL) { 2076 PF_RULES_RUNLOCK(); 2077 error = EBUSY; 2078 break; 2079 } 2080 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2081 PF_RULES_RUNLOCK(); 2082 break; 2083 } 2084 2085 case DIOCCHANGEALTQ: 2086 /* CHANGEALTQ not supported yet! */ 2087 error = ENODEV; 2088 break; 2089 2090 case DIOCGETQSTATS: { 2091 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2092 struct pf_altq *altq; 2093 u_int32_t nr; 2094 int nbytes; 2095 2096 PF_RULES_RLOCK(); 2097 if (pq->ticket != V_ticket_altqs_active) { 2098 PF_RULES_RUNLOCK(); 2099 error = EBUSY; 2100 break; 2101 } 2102 nbytes = pq->nbytes; 2103 nr = 0; 2104 altq = TAILQ_FIRST(V_pf_altqs_active); 2105 while ((altq != NULL) && (nr < pq->nr)) { 2106 altq = TAILQ_NEXT(altq, entries); 2107 nr++; 2108 } 2109 if (altq == NULL) { 2110 PF_RULES_RUNLOCK(); 2111 error = EBUSY; 2112 break; 2113 } 2114 2115 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 2116 PF_RULES_RUNLOCK(); 2117 error = ENXIO; 2118 break; 2119 } 2120 PF_RULES_RUNLOCK(); 2121 error = altq_getqstats(altq, pq->buf, &nbytes); 2122 if (error == 0) { 2123 pq->scheduler = altq->scheduler; 2124 pq->nbytes = nbytes; 2125 } 2126 break; 2127 } 2128#endif /* ALTQ */ 2129 2130 case DIOCBEGINADDRS: { 2131 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2132 2133 PF_RULES_WLOCK(); 2134 pf_empty_pool(&V_pf_pabuf); 2135 pp->ticket = ++V_ticket_pabuf; 2136 PF_RULES_WUNLOCK(); 2137 break; 2138 } 2139 2140 case DIOCADDADDR: { 2141 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2142 struct pf_pooladdr *pa; 2143 struct pfi_kif *kif = NULL; 2144 2145#ifndef INET 2146 if (pp->af == AF_INET) { 2147 error = EAFNOSUPPORT; 2148 break; 2149 } 2150#endif /* INET */ 2151#ifndef INET6 2152 if (pp->af == AF_INET6) { 2153 error = EAFNOSUPPORT; 2154 break; 2155 } 2156#endif /* INET6 */ 2157 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2158 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2159 pp->addr.addr.type != PF_ADDR_TABLE) { 2160 error = EINVAL; 2161 break; 2162 } 2163 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 2164 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2165 if (pa->ifname[0]) 2166 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); 2167 PF_RULES_WLOCK(); 2168 if (pp->ticket != V_ticket_pabuf) { 2169 PF_RULES_WUNLOCK(); 2170 if (pa->ifname[0]) 2171 free(kif, PFI_MTYPE); 2172 free(pa, M_PFRULE); 2173 error = EBUSY; 2174 break; 2175 } 2176 if (pa->ifname[0]) { 2177 pa->kif = pfi_kif_attach(kif, pa->ifname); 2178 pfi_kif_ref(pa->kif); 2179 } else 2180 pa->kif = NULL; 2181 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 2182 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 2183 if (pa->ifname[0]) 2184 pfi_kif_unref(pa->kif); 2185 PF_RULES_WUNLOCK(); 2186 free(pa, M_PFRULE); 2187 break; 2188 } 2189 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 2190 PF_RULES_WUNLOCK(); 2191 break; 2192 } 2193 2194 case DIOCGETADDRS: { 2195 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2196 struct pf_pool *pool; 2197 struct pf_pooladdr *pa; 2198 2199 PF_RULES_RLOCK(); 2200 pp->nr = 0; 2201 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2202 pp->r_num, 0, 1, 0); 2203 if (pool == NULL) { 2204 PF_RULES_RUNLOCK(); 2205 error = EBUSY; 2206 break; 2207 } 2208 TAILQ_FOREACH(pa, &pool->list, entries) 2209 pp->nr++; 2210 PF_RULES_RUNLOCK(); 2211 break; 2212 } 2213 2214 case DIOCGETADDR: { 2215 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2216 struct pf_pool *pool; 2217 struct pf_pooladdr *pa; 2218 u_int32_t nr = 0; 2219 2220 PF_RULES_RLOCK(); 2221 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2222 pp->r_num, 0, 1, 1); 2223 if (pool == NULL) { 2224 PF_RULES_RUNLOCK(); 2225 error = EBUSY; 2226 break; 2227 } 2228 pa = TAILQ_FIRST(&pool->list); 2229 while ((pa != NULL) && (nr < pp->nr)) { 2230 pa = TAILQ_NEXT(pa, entries); 2231 nr++; 2232 } 2233 if (pa == NULL) { 2234 PF_RULES_RUNLOCK(); 2235 error = EBUSY; 2236 break; 2237 } 2238 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2239 pf_addr_copyout(&pp->addr.addr); 2240 PF_RULES_RUNLOCK(); 2241 break; 2242 } 2243 2244 case DIOCCHANGEADDR: { 2245 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2246 struct pf_pool *pool; 2247 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2248 struct pf_ruleset *ruleset; 2249 struct pfi_kif *kif = NULL; 2250 2251 if (pca->action < PF_CHANGE_ADD_HEAD || 2252 pca->action > PF_CHANGE_REMOVE) { 2253 error = EINVAL; 2254 break; 2255 } 2256 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2257 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2258 pca->addr.addr.type != PF_ADDR_TABLE) { 2259 error = EINVAL; 2260 break; 2261 } 2262 2263 if (pca->action != PF_CHANGE_REMOVE) { 2264#ifndef INET 2265 if (pca->af == AF_INET) { 2266 error = EAFNOSUPPORT; 2267 break; 2268 } 2269#endif /* INET */ 2270#ifndef INET6 2271 if (pca->af == AF_INET6) { 2272 error = EAFNOSUPPORT; 2273 break; 2274 } 2275#endif /* INET6 */ 2276 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 2277 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2278 if (newpa->ifname[0]) 2279 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); 2280 newpa->kif = NULL; 2281 } 2282 2283#define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; } 2284 PF_RULES_WLOCK(); 2285 ruleset = pf_find_ruleset(pca->anchor); 2286 if (ruleset == NULL) 2287 ERROUT(EBUSY); 2288 2289 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2290 pca->r_num, pca->r_last, 1, 1); 2291 if (pool == NULL) 2292 ERROUT(EBUSY); 2293 2294 if (pca->action != PF_CHANGE_REMOVE) { 2295 if (newpa->ifname[0]) { 2296 newpa->kif = pfi_kif_attach(kif, newpa->ifname); 2297 pfi_kif_ref(newpa->kif); 2298 kif = NULL; 2299 } 2300 2301 switch (newpa->addr.type) { 2302 case PF_ADDR_DYNIFTL: 2303 error = pfi_dynaddr_setup(&newpa->addr, 2304 pca->af); 2305 break; 2306 case PF_ADDR_TABLE: 2307 newpa->addr.p.tbl = pfr_attach_table(ruleset, 2308 newpa->addr.v.tblname); 2309 if (newpa->addr.p.tbl == NULL) 2310 error = ENOMEM; 2311 break; 2312 } 2313 if (error) 2314 goto DIOCCHANGEADDR_error; 2315 } 2316 2317 switch (pca->action) { 2318 case PF_CHANGE_ADD_HEAD: 2319 oldpa = TAILQ_FIRST(&pool->list); 2320 break; 2321 case PF_CHANGE_ADD_TAIL: 2322 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2323 break; 2324 default: 2325 oldpa = TAILQ_FIRST(&pool->list); 2326 for (int i = 0; oldpa && i < pca->nr; i++) 2327 oldpa = TAILQ_NEXT(oldpa, entries); 2328 2329 if (oldpa == NULL) 2330 ERROUT(EINVAL); 2331 } 2332 2333 if (pca->action == PF_CHANGE_REMOVE) { 2334 TAILQ_REMOVE(&pool->list, oldpa, entries); 2335 switch (oldpa->addr.type) { 2336 case PF_ADDR_DYNIFTL: 2337 pfi_dynaddr_remove(oldpa->addr.p.dyn); 2338 break; 2339 case PF_ADDR_TABLE: 2340 pfr_detach_table(oldpa->addr.p.tbl); 2341 break; 2342 } 2343 if (oldpa->kif) 2344 pfi_kif_unref(oldpa->kif); 2345 free(oldpa, M_PFRULE); 2346 } else { 2347 if (oldpa == NULL) 2348 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2349 else if (pca->action == PF_CHANGE_ADD_HEAD || 2350 pca->action == PF_CHANGE_ADD_BEFORE) 2351 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2352 else 2353 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2354 newpa, entries); 2355 } 2356 2357 pool->cur = TAILQ_FIRST(&pool->list); 2358 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 2359 PF_RULES_WUNLOCK(); 2360 break; 2361 2362#undef ERROUT 2363DIOCCHANGEADDR_error: 2364 if (newpa->kif) 2365 pfi_kif_unref(newpa->kif); 2366 PF_RULES_WUNLOCK(); 2367 if (newpa != NULL) 2368 free(newpa, M_PFRULE); 2369 if (kif != NULL) 2370 free(kif, PFI_MTYPE); 2371 break; 2372 } 2373 2374 case DIOCGETRULESETS: { 2375 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2376 struct pf_ruleset *ruleset; 2377 struct pf_anchor *anchor; 2378 2379 PF_RULES_RLOCK(); 2380 pr->path[sizeof(pr->path) - 1] = 0; 2381 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2382 PF_RULES_RUNLOCK(); 2383 error = ENOENT; 2384 break; 2385 } 2386 pr->nr = 0; 2387 if (ruleset->anchor == NULL) { 2388 /* XXX kludge for pf_main_ruleset */ 2389 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors) 2390 if (anchor->parent == NULL) 2391 pr->nr++; 2392 } else { 2393 RB_FOREACH(anchor, pf_anchor_node, 2394 &ruleset->anchor->children) 2395 pr->nr++; 2396 } 2397 PF_RULES_RUNLOCK(); 2398 break; 2399 } 2400 2401 case DIOCGETRULESET: { 2402 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2403 struct pf_ruleset *ruleset; 2404 struct pf_anchor *anchor; 2405 u_int32_t nr = 0; 2406 2407 PF_RULES_RLOCK(); 2408 pr->path[sizeof(pr->path) - 1] = 0; 2409 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2410 PF_RULES_RUNLOCK(); 2411 error = ENOENT; 2412 break; 2413 } 2414 pr->name[0] = 0; 2415 if (ruleset->anchor == NULL) { 2416 /* XXX kludge for pf_main_ruleset */ 2417 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors) 2418 if (anchor->parent == NULL && nr++ == pr->nr) { 2419 strlcpy(pr->name, anchor->name, 2420 sizeof(pr->name)); 2421 break; 2422 } 2423 } else { 2424 RB_FOREACH(anchor, pf_anchor_node, 2425 &ruleset->anchor->children) 2426 if (nr++ == pr->nr) { 2427 strlcpy(pr->name, anchor->name, 2428 sizeof(pr->name)); 2429 break; 2430 } 2431 } 2432 if (!pr->name[0]) 2433 error = EBUSY; 2434 PF_RULES_RUNLOCK(); 2435 break; 2436 } 2437 2438 case DIOCRCLRTABLES: { 2439 struct pfioc_table *io = (struct pfioc_table *)addr; 2440 2441 if (io->pfrio_esize != 0) { 2442 error = ENODEV; 2443 break; 2444 } 2445 PF_RULES_WLOCK(); 2446 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2447 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2448 PF_RULES_WUNLOCK(); 2449 break; 2450 } 2451 2452 case DIOCRADDTABLES: { 2453 struct pfioc_table *io = (struct pfioc_table *)addr; 2454 struct pfr_table *pfrts; 2455 size_t totlen; 2456 2457 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2458 error = ENODEV; 2459 break; 2460 } 2461 totlen = io->pfrio_size * sizeof(struct pfr_table); 2462 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2463 error = copyin(io->pfrio_buffer, pfrts, totlen); 2464 if (error) { 2465 free(pfrts, M_TEMP); 2466 break; 2467 } 2468 PF_RULES_WLOCK(); 2469 error = pfr_add_tables(pfrts, io->pfrio_size, 2470 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2471 PF_RULES_WUNLOCK(); 2472 free(pfrts, M_TEMP); 2473 break; 2474 } 2475 2476 case DIOCRDELTABLES: { 2477 struct pfioc_table *io = (struct pfioc_table *)addr; 2478 struct pfr_table *pfrts; 2479 size_t totlen; 2480 2481 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2482 error = ENODEV; 2483 break; 2484 } 2485 totlen = io->pfrio_size * sizeof(struct pfr_table); 2486 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2487 error = copyin(io->pfrio_buffer, pfrts, totlen); 2488 if (error) { 2489 free(pfrts, M_TEMP); 2490 break; 2491 } 2492 PF_RULES_WLOCK(); 2493 error = pfr_del_tables(pfrts, io->pfrio_size, 2494 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2495 PF_RULES_WUNLOCK(); 2496 free(pfrts, M_TEMP); 2497 break; 2498 } 2499 2500 case DIOCRGETTABLES: { 2501 struct pfioc_table *io = (struct pfioc_table *)addr; 2502 struct pfr_table *pfrts; 2503 size_t totlen; 2504 2505 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2506 error = ENODEV; 2507 break; 2508 } 2509 totlen = io->pfrio_size * sizeof(struct pfr_table); 2510 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2511 PF_RULES_RLOCK(); 2512 error = pfr_get_tables(&io->pfrio_table, pfrts, 2513 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2514 PF_RULES_RUNLOCK(); 2515 if (error == 0) 2516 error = copyout(pfrts, io->pfrio_buffer, totlen); 2517 free(pfrts, M_TEMP); 2518 break; 2519 } 2520 2521 case DIOCRGETTSTATS: { 2522 struct pfioc_table *io = (struct pfioc_table *)addr; 2523 struct pfr_tstats *pfrtstats; 2524 size_t totlen; 2525 2526 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2527 error = ENODEV; 2528 break; 2529 } 2530 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 2531 pfrtstats = malloc(totlen, M_TEMP, M_WAITOK); 2532 PF_RULES_WLOCK(); 2533 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 2534 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2535 PF_RULES_WUNLOCK(); 2536 if (error == 0) 2537 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 2538 free(pfrtstats, M_TEMP); 2539 break; 2540 } 2541 2542 case DIOCRCLRTSTATS: { 2543 struct pfioc_table *io = (struct pfioc_table *)addr; 2544 struct pfr_table *pfrts; 2545 size_t totlen; 2546 2547 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2548 error = ENODEV; 2549 break; 2550 } 2551 totlen = io->pfrio_size * sizeof(struct pfr_table); 2552 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2553 error = copyin(io->pfrio_buffer, pfrts, totlen); 2554 if (error) { 2555 free(pfrts, M_TEMP); 2556 break; 2557 } 2558 PF_RULES_WLOCK(); 2559 error = pfr_clr_tstats(pfrts, io->pfrio_size, 2560 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2561 PF_RULES_WUNLOCK(); 2562 free(pfrts, M_TEMP); 2563 break; 2564 } 2565 2566 case DIOCRSETTFLAGS: { 2567 struct pfioc_table *io = (struct pfioc_table *)addr; 2568 struct pfr_table *pfrts; 2569 size_t totlen; 2570 2571 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2572 error = ENODEV; 2573 break; 2574 } 2575 totlen = io->pfrio_size * sizeof(struct pfr_table); 2576 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2577 error = copyin(io->pfrio_buffer, pfrts, totlen); 2578 if (error) { 2579 free(pfrts, M_TEMP); 2580 break; 2581 } 2582 PF_RULES_WLOCK(); 2583 error = pfr_set_tflags(pfrts, io->pfrio_size, 2584 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2585 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2586 PF_RULES_WUNLOCK(); 2587 free(pfrts, M_TEMP); 2588 break; 2589 } 2590 2591 case DIOCRCLRADDRS: { 2592 struct pfioc_table *io = (struct pfioc_table *)addr; 2593 2594 if (io->pfrio_esize != 0) { 2595 error = ENODEV; 2596 break; 2597 } 2598 PF_RULES_WLOCK(); 2599 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2600 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2601 PF_RULES_WUNLOCK(); 2602 break; 2603 } 2604 2605 case DIOCRADDADDRS: { 2606 struct pfioc_table *io = (struct pfioc_table *)addr; 2607 struct pfr_addr *pfras; 2608 size_t totlen; 2609 2610 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2611 error = ENODEV; 2612 break; 2613 } 2614 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2615 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2616 error = copyin(io->pfrio_buffer, pfras, totlen); 2617 if (error) { 2618 free(pfras, M_TEMP); 2619 break; 2620 } 2621 PF_RULES_WLOCK(); 2622 error = pfr_add_addrs(&io->pfrio_table, pfras, 2623 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2624 PFR_FLAG_USERIOCTL); 2625 PF_RULES_WUNLOCK(); 2626 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 2627 error = copyout(pfras, io->pfrio_buffer, totlen); 2628 free(pfras, M_TEMP); 2629 break; 2630 } 2631 2632 case DIOCRDELADDRS: { 2633 struct pfioc_table *io = (struct pfioc_table *)addr; 2634 struct pfr_addr *pfras; 2635 size_t totlen; 2636 2637 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2638 error = ENODEV; 2639 break; 2640 } 2641 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2642 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2643 error = copyin(io->pfrio_buffer, pfras, totlen); 2644 if (error) { 2645 free(pfras, M_TEMP); 2646 break; 2647 } 2648 PF_RULES_WLOCK(); 2649 error = pfr_del_addrs(&io->pfrio_table, pfras, 2650 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2651 PFR_FLAG_USERIOCTL); 2652 PF_RULES_WUNLOCK(); 2653 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 2654 error = copyout(pfras, io->pfrio_buffer, totlen); 2655 free(pfras, M_TEMP); 2656 break; 2657 } 2658 2659 case DIOCRSETADDRS: { 2660 struct pfioc_table *io = (struct pfioc_table *)addr; 2661 struct pfr_addr *pfras; 2662 size_t totlen; 2663 2664 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2665 error = ENODEV; 2666 break; 2667 } 2668 totlen = (io->pfrio_size + io->pfrio_size2) * 2669 sizeof(struct pfr_addr); 2670 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2671 error = copyin(io->pfrio_buffer, pfras, totlen); 2672 if (error) { 2673 free(pfras, M_TEMP); 2674 break; 2675 } 2676 PF_RULES_WLOCK(); 2677 error = pfr_set_addrs(&io->pfrio_table, pfras, 2678 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2679 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2680 PFR_FLAG_USERIOCTL, 0); 2681 PF_RULES_WUNLOCK(); 2682 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 2683 error = copyout(pfras, io->pfrio_buffer, totlen); 2684 free(pfras, M_TEMP); 2685 break; 2686 } 2687 2688 case DIOCRGETADDRS: { 2689 struct pfioc_table *io = (struct pfioc_table *)addr; 2690 struct pfr_addr *pfras; 2691 size_t totlen; 2692 2693 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2694 error = ENODEV; 2695 break; 2696 } 2697 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2698 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2699 PF_RULES_RLOCK(); 2700 error = pfr_get_addrs(&io->pfrio_table, pfras, 2701 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2702 PF_RULES_RUNLOCK(); 2703 if (error == 0) 2704 error = copyout(pfras, io->pfrio_buffer, totlen); 2705 free(pfras, M_TEMP); 2706 break; 2707 } 2708 2709 case DIOCRGETASTATS: { 2710 struct pfioc_table *io = (struct pfioc_table *)addr; 2711 struct pfr_astats *pfrastats; 2712 size_t totlen; 2713 2714 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2715 error = ENODEV; 2716 break; 2717 } 2718 totlen = io->pfrio_size * sizeof(struct pfr_astats); 2719 pfrastats = malloc(totlen, M_TEMP, M_WAITOK); 2720 PF_RULES_RLOCK(); 2721 error = pfr_get_astats(&io->pfrio_table, pfrastats, 2722 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2723 PF_RULES_RUNLOCK(); 2724 if (error == 0) 2725 error = copyout(pfrastats, io->pfrio_buffer, totlen); 2726 free(pfrastats, M_TEMP); 2727 break; 2728 } 2729 2730 case DIOCRCLRASTATS: { 2731 struct pfioc_table *io = (struct pfioc_table *)addr; 2732 struct pfr_addr *pfras; 2733 size_t totlen; 2734 2735 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2736 error = ENODEV; 2737 break; 2738 } 2739 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2740 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2741 error = copyin(io->pfrio_buffer, pfras, totlen); 2742 if (error) { 2743 free(pfras, M_TEMP); 2744 break; 2745 } 2746 PF_RULES_WLOCK(); 2747 error = pfr_clr_astats(&io->pfrio_table, pfras, 2748 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2749 PFR_FLAG_USERIOCTL); 2750 PF_RULES_WUNLOCK(); 2751 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 2752 error = copyout(pfras, io->pfrio_buffer, totlen); 2753 free(pfras, M_TEMP); 2754 break; 2755 } 2756 2757 case DIOCRTSTADDRS: { 2758 struct pfioc_table *io = (struct pfioc_table *)addr; 2759 struct pfr_addr *pfras; 2760 size_t totlen; 2761 2762 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2763 error = ENODEV; 2764 break; 2765 } 2766 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2767 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2768 error = copyin(io->pfrio_buffer, pfras, totlen); 2769 if (error) { 2770 free(pfras, M_TEMP); 2771 break; 2772 } 2773 PF_RULES_RLOCK(); 2774 error = pfr_tst_addrs(&io->pfrio_table, pfras, 2775 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2776 PFR_FLAG_USERIOCTL); 2777 PF_RULES_RUNLOCK(); 2778 if (error == 0) 2779 error = copyout(pfras, io->pfrio_buffer, totlen); 2780 free(pfras, M_TEMP); 2781 break; 2782 } 2783 2784 case DIOCRINADEFINE: { 2785 struct pfioc_table *io = (struct pfioc_table *)addr; 2786 struct pfr_addr *pfras; 2787 size_t totlen; 2788 2789 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2790 error = ENODEV; 2791 break; 2792 } 2793 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2794 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2795 error = copyin(io->pfrio_buffer, pfras, totlen); 2796 if (error) { 2797 free(pfras, M_TEMP); 2798 break; 2799 } 2800 PF_RULES_WLOCK(); 2801 error = pfr_ina_define(&io->pfrio_table, pfras, 2802 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2803 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2804 PF_RULES_WUNLOCK(); 2805 free(pfras, M_TEMP); 2806 break; 2807 } 2808 2809 case DIOCOSFPADD: { 2810 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2811 PF_RULES_WLOCK(); 2812 error = pf_osfp_add(io); 2813 PF_RULES_WUNLOCK(); 2814 break; 2815 } 2816 2817 case DIOCOSFPGET: { 2818 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2819 PF_RULES_RLOCK(); 2820 error = pf_osfp_get(io); 2821 PF_RULES_RUNLOCK(); 2822 break; 2823 } 2824 2825 case DIOCXBEGIN: { 2826 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2827 struct pfioc_trans_e *ioes, *ioe; 2828 size_t totlen; 2829 int i; 2830 2831 if (io->esize != sizeof(*ioe)) { 2832 error = ENODEV; 2833 break; 2834 } 2835 totlen = sizeof(struct pfioc_trans_e) * io->size; 2836 ioes = malloc(totlen, M_TEMP, M_WAITOK); 2837 error = copyin(io->array, ioes, totlen); 2838 if (error) { 2839 free(ioes, M_TEMP); 2840 break; 2841 } 2842 PF_RULES_WLOCK(); 2843 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 2844 switch (ioe->rs_num) { 2845#ifdef ALTQ 2846 case PF_RULESET_ALTQ: 2847 if (ioe->anchor[0]) { 2848 PF_RULES_WUNLOCK(); 2849 free(ioes, M_TEMP); 2850 error = EINVAL; 2851 goto fail; 2852 } 2853 if ((error = pf_begin_altq(&ioe->ticket))) { 2854 PF_RULES_WUNLOCK(); 2855 free(ioes, M_TEMP); 2856 goto fail; 2857 } 2858 break; 2859#endif /* ALTQ */ 2860 case PF_RULESET_TABLE: 2861 { 2862 struct pfr_table table; 2863 2864 bzero(&table, sizeof(table)); 2865 strlcpy(table.pfrt_anchor, ioe->anchor, 2866 sizeof(table.pfrt_anchor)); 2867 if ((error = pfr_ina_begin(&table, 2868 &ioe->ticket, NULL, 0))) { 2869 PF_RULES_WUNLOCK(); 2870 free(ioes, M_TEMP); 2871 goto fail; 2872 } 2873 break; 2874 } 2875 default: 2876 if ((error = pf_begin_rules(&ioe->ticket, 2877 ioe->rs_num, ioe->anchor))) { 2878 PF_RULES_WUNLOCK(); 2879 free(ioes, M_TEMP); 2880 goto fail; 2881 } 2882 break; 2883 } 2884 } 2885 PF_RULES_WUNLOCK(); 2886 error = copyout(ioes, io->array, totlen); 2887 free(ioes, M_TEMP); 2888 break; 2889 } 2890 2891 case DIOCXROLLBACK: { 2892 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2893 struct pfioc_trans_e *ioe, *ioes; 2894 size_t totlen; 2895 int i; 2896 2897 if (io->esize != sizeof(*ioe)) { 2898 error = ENODEV; 2899 break; 2900 } 2901 totlen = sizeof(struct pfioc_trans_e) * io->size; 2902 ioes = malloc(totlen, M_TEMP, M_WAITOK); 2903 error = copyin(io->array, ioes, totlen); 2904 if (error) { 2905 free(ioes, M_TEMP); 2906 break; 2907 } 2908 PF_RULES_WLOCK(); 2909 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 2910 switch (ioe->rs_num) { 2911#ifdef ALTQ 2912 case PF_RULESET_ALTQ: 2913 if (ioe->anchor[0]) { 2914 PF_RULES_WUNLOCK(); 2915 free(ioes, M_TEMP); 2916 error = EINVAL; 2917 goto fail; 2918 } 2919 if ((error = pf_rollback_altq(ioe->ticket))) { 2920 PF_RULES_WUNLOCK(); 2921 free(ioes, M_TEMP); 2922 goto fail; /* really bad */ 2923 } 2924 break; 2925#endif /* ALTQ */ 2926 case PF_RULESET_TABLE: 2927 { 2928 struct pfr_table table; 2929 2930 bzero(&table, sizeof(table)); 2931 strlcpy(table.pfrt_anchor, ioe->anchor, 2932 sizeof(table.pfrt_anchor)); 2933 if ((error = pfr_ina_rollback(&table, 2934 ioe->ticket, NULL, 0))) { 2935 PF_RULES_WUNLOCK(); 2936 free(ioes, M_TEMP); 2937 goto fail; /* really bad */ 2938 } 2939 break; 2940 } 2941 default: 2942 if ((error = pf_rollback_rules(ioe->ticket, 2943 ioe->rs_num, ioe->anchor))) { 2944 PF_RULES_WUNLOCK(); 2945 free(ioes, M_TEMP); 2946 goto fail; /* really bad */ 2947 } 2948 break; 2949 } 2950 } 2951 PF_RULES_WUNLOCK(); 2952 free(ioes, M_TEMP); 2953 break; 2954 } 2955 2956 case DIOCXCOMMIT: { 2957 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2958 struct pfioc_trans_e *ioe, *ioes; 2959 struct pf_ruleset *rs; 2960 size_t totlen; 2961 int i; 2962 2963 if (io->esize != sizeof(*ioe)) { 2964 error = ENODEV; 2965 break; 2966 } 2967 totlen = sizeof(struct pfioc_trans_e) * io->size; 2968 ioes = malloc(totlen, M_TEMP, M_WAITOK); 2969 error = copyin(io->array, ioes, totlen); 2970 if (error) { 2971 free(ioes, M_TEMP); 2972 break; 2973 } 2974 PF_RULES_WLOCK(); 2975 /* First makes sure everything will succeed. */ 2976 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 2977 switch (ioe->rs_num) { 2978#ifdef ALTQ 2979 case PF_RULESET_ALTQ: 2980 if (ioe->anchor[0]) { 2981 PF_RULES_WUNLOCK(); 2982 free(ioes, M_TEMP); 2983 error = EINVAL; 2984 goto fail; 2985 } 2986 if (!V_altqs_inactive_open || ioe->ticket != 2987 V_ticket_altqs_inactive) { 2988 PF_RULES_WUNLOCK(); 2989 free(ioes, M_TEMP); 2990 error = EBUSY; 2991 goto fail; 2992 } 2993 break; 2994#endif /* ALTQ */ 2995 case PF_RULESET_TABLE: 2996 rs = pf_find_ruleset(ioe->anchor); 2997 if (rs == NULL || !rs->topen || ioe->ticket != 2998 rs->tticket) { 2999 PF_RULES_WUNLOCK(); 3000 free(ioes, M_TEMP); 3001 error = EBUSY; 3002 goto fail; 3003 } 3004 break; 3005 default: 3006 if (ioe->rs_num < 0 || ioe->rs_num >= 3007 PF_RULESET_MAX) { 3008 PF_RULES_WUNLOCK(); 3009 free(ioes, M_TEMP); 3010 error = EINVAL; 3011 goto fail; 3012 } 3013 rs = pf_find_ruleset(ioe->anchor); 3014 if (rs == NULL || 3015 !rs->rules[ioe->rs_num].inactive.open || 3016 rs->rules[ioe->rs_num].inactive.ticket != 3017 ioe->ticket) { 3018 PF_RULES_WUNLOCK(); 3019 free(ioes, M_TEMP); 3020 error = EBUSY; 3021 goto fail; 3022 } 3023 break; 3024 } 3025 } 3026 /* Now do the commit - no errors should happen here. */ 3027 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 3028 switch (ioe->rs_num) { 3029#ifdef ALTQ 3030 case PF_RULESET_ALTQ: 3031 if ((error = pf_commit_altq(ioe->ticket))) { 3032 PF_RULES_WUNLOCK(); 3033 free(ioes, M_TEMP); 3034 goto fail; /* really bad */ 3035 } 3036 break; 3037#endif /* ALTQ */ 3038 case PF_RULESET_TABLE: 3039 { 3040 struct pfr_table table; 3041 3042 bzero(&table, sizeof(table)); 3043 strlcpy(table.pfrt_anchor, ioe->anchor, 3044 sizeof(table.pfrt_anchor)); 3045 if ((error = pfr_ina_commit(&table, 3046 ioe->ticket, NULL, NULL, 0))) { 3047 PF_RULES_WUNLOCK(); 3048 free(ioes, M_TEMP); 3049 goto fail; /* really bad */ 3050 } 3051 break; 3052 } 3053 default: 3054 if ((error = pf_commit_rules(ioe->ticket, 3055 ioe->rs_num, ioe->anchor))) { 3056 PF_RULES_WUNLOCK(); 3057 free(ioes, M_TEMP); 3058 goto fail; /* really bad */ 3059 } 3060 break; 3061 } 3062 } 3063 PF_RULES_WUNLOCK(); 3064 free(ioes, M_TEMP); 3065 break; 3066 } 3067 3068 case DIOCGETSRCNODES: { 3069 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3070 struct pf_srchash *sh; 3071 struct pf_src_node *n, *p, *pstore; 3072 uint32_t i, nr = 0; 3073 3074 if (psn->psn_len == 0) { 3075 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; 3076 i++, sh++) { 3077 PF_HASHROW_LOCK(sh); 3078 LIST_FOREACH(n, &sh->nodes, entry) 3079 nr++; 3080 PF_HASHROW_UNLOCK(sh); 3081 } 3082 psn->psn_len = sizeof(struct pf_src_node) * nr; 3083 break; 3084 } 3085 3086 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK); 3087 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; 3088 i++, sh++) { 3089 PF_HASHROW_LOCK(sh); 3090 LIST_FOREACH(n, &sh->nodes, entry) { 3091 int secs = time_uptime, diff; 3092 3093 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3094 break; 3095 3096 bcopy(n, p, sizeof(struct pf_src_node)); 3097 if (n->rule.ptr != NULL) 3098 p->rule.nr = n->rule.ptr->nr; 3099 p->creation = secs - p->creation; 3100 if (p->expire > secs) 3101 p->expire -= secs; 3102 else 3103 p->expire = 0; 3104 3105 /* Adjust the connection rate estimate. */ 3106 diff = secs - n->conn_rate.last; 3107 if (diff >= n->conn_rate.seconds) 3108 p->conn_rate.count = 0; 3109 else 3110 p->conn_rate.count -= 3111 n->conn_rate.count * diff / 3112 n->conn_rate.seconds; 3113 p++; 3114 nr++; 3115 } 3116 PF_HASHROW_UNLOCK(sh); 3117 } 3118 error = copyout(pstore, psn->psn_src_nodes, 3119 sizeof(struct pf_src_node) * nr); 3120 if (error) { 3121 free(pstore, M_TEMP); 3122 break; 3123 } 3124 psn->psn_len = sizeof(struct pf_src_node) * nr; 3125 free(pstore, M_TEMP); 3126 break; 3127 } 3128 3129 case DIOCCLRSRCNODES: { 3130 3131 pf_clear_srcnodes(NULL); 3132 pf_purge_expired_src_nodes(); 3133 V_pf_status.src_nodes = 0; 3134 break; 3135 } 3136 3137 case DIOCKILLSRCNODES: 3138 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 3139 break; 3140 3141 case DIOCSETHOSTID: { 3142 u_int32_t *hostid = (u_int32_t *)addr; 3143 3144 PF_RULES_WLOCK(); 3145 if (*hostid == 0) 3146 V_pf_status.hostid = arc4random(); 3147 else 3148 V_pf_status.hostid = *hostid; 3149 PF_RULES_WUNLOCK(); 3150 break; 3151 } 3152 3153 case DIOCOSFPFLUSH: 3154 PF_RULES_WLOCK(); 3155 pf_osfp_flush(); 3156 PF_RULES_WUNLOCK(); 3157 break; 3158 3159 case DIOCIGETIFACES: { 3160 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3161 struct pfi_kif *ifstore; 3162 size_t bufsiz; 3163 3164 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3165 error = ENODEV; 3166 break; 3167 } 3168 3169 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 3170 ifstore = malloc(bufsiz, M_TEMP, M_WAITOK); 3171 PF_RULES_RLOCK(); 3172 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 3173 PF_RULES_RUNLOCK(); 3174 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 3175 free(ifstore, M_TEMP); 3176 break; 3177 } 3178 3179 case DIOCSETIFFLAG: { 3180 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3181 3182 PF_RULES_WLOCK(); 3183 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3184 PF_RULES_WUNLOCK(); 3185 break; 3186 } 3187 3188 case DIOCCLRIFFLAG: { 3189 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3190 3191 PF_RULES_WLOCK(); 3192 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3193 PF_RULES_WUNLOCK(); 3194 break; 3195 } 3196 3197 default: 3198 error = ENODEV; 3199 break; 3200 } 3201fail: 3202 CURVNET_RESTORE(); 3203 3204 return (error); 3205} 3206 3207void 3208pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 3209{ 3210 bzero(sp, sizeof(struct pfsync_state)); 3211 3212 /* copy from state key */ 3213 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 3214 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 3215 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 3216 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 3217 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 3218 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 3219 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 3220 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 3221 sp->proto = st->key[PF_SK_WIRE]->proto; 3222 sp->af = st->key[PF_SK_WIRE]->af; 3223 3224 /* copy from state */ 3225 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 3226 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 3227 sp->creation = htonl(time_uptime - st->creation); 3228 sp->expire = pf_state_expires(st); 3229 if (sp->expire <= time_uptime) 3230 sp->expire = htonl(0); 3231 else 3232 sp->expire = htonl(sp->expire - time_uptime); 3233 3234 sp->direction = st->direction; 3235 sp->log = st->log; 3236 sp->timeout = st->timeout; 3237 sp->state_flags = st->state_flags; 3238 if (st->src_node) 3239 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 3240 if (st->nat_src_node) 3241 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 3242 3243 sp->id = st->id; 3244 sp->creatorid = st->creatorid; 3245 pf_state_peer_hton(&st->src, &sp->src); 3246 pf_state_peer_hton(&st->dst, &sp->dst); 3247 3248 if (st->rule.ptr == NULL) 3249 sp->rule = htonl(-1); 3250 else 3251 sp->rule = htonl(st->rule.ptr->nr); 3252 if (st->anchor.ptr == NULL) 3253 sp->anchor = htonl(-1); 3254 else 3255 sp->anchor = htonl(st->anchor.ptr->nr); 3256 if (st->nat_rule.ptr == NULL) 3257 sp->nat_rule = htonl(-1); 3258 else 3259 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 3260 3261 pf_state_counter_hton(st->packets[0], sp->packets[0]); 3262 pf_state_counter_hton(st->packets[1], sp->packets[1]); 3263 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 3264 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 3265 3266} 3267 3268static void 3269pf_tbladdr_copyout(struct pf_addr_wrap *aw) 3270{ 3271 struct pfr_ktable *kt; 3272 3273 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 3274 3275 kt = aw->p.tbl; 3276 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 3277 kt = kt->pfrkt_root; 3278 aw->p.tbl = NULL; 3279 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 3280 kt->pfrkt_cnt : -1; 3281} 3282 3283/* 3284 * XXX - Check for version missmatch!!! 3285 */ 3286static void 3287pf_clear_states(void) 3288{ 3289 struct pf_state *s; 3290 u_int i; 3291 3292 for (i = 0; i <= V_pf_hashmask; i++) { 3293 struct pf_idhash *ih = &V_pf_idhash[i]; 3294relock: 3295 PF_HASHROW_LOCK(ih); 3296 LIST_FOREACH(s, &ih->states, entry) { 3297 s->timeout = PFTM_PURGE; 3298 /* Don't send out individual delete messages. */ 3299 s->sync_state = PFSTATE_NOSYNC; 3300 pf_unlink_state(s, PF_ENTER_LOCKED); 3301 goto relock; 3302 } 3303 PF_HASHROW_UNLOCK(ih); 3304 } 3305} 3306 3307static int 3308pf_clear_tables(void) 3309{ 3310 struct pfioc_table io; 3311 int error; 3312 3313 bzero(&io, sizeof(io)); 3314 3315 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3316 io.pfrio_flags); 3317 3318 return (error); 3319} 3320 3321static void 3322pf_clear_srcnodes(struct pf_src_node *n) 3323{ 3324 struct pf_state *s; 3325 int i; 3326 3327 for (i = 0; i <= V_pf_hashmask; i++) { 3328 struct pf_idhash *ih = &V_pf_idhash[i]; 3329 3330 PF_HASHROW_LOCK(ih); 3331 LIST_FOREACH(s, &ih->states, entry) { 3332 if (n == NULL || n == s->src_node) 3333 s->src_node = NULL; 3334 if (n == NULL || n == s->nat_src_node) 3335 s->nat_src_node = NULL; 3336 } 3337 PF_HASHROW_UNLOCK(ih); 3338 } 3339 3340 if (n == NULL) { 3341 struct pf_srchash *sh; 3342 3343 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; 3344 i++, sh++) { 3345 PF_HASHROW_LOCK(sh); 3346 LIST_FOREACH(n, &sh->nodes, entry) { 3347 n->expire = 1; 3348 n->states = 0; 3349 } 3350 PF_HASHROW_UNLOCK(sh); 3351 } 3352 } else { 3353 /* XXX: hash slot should already be locked here. */ 3354 n->expire = 1; 3355 n->states = 0; 3356 } 3357} 3358 3359static void 3360pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 3361{ 3362 struct pf_src_node_list kill; 3363 3364 LIST_INIT(&kill); 3365 for (int i = 0; i <= V_pf_srchashmask; i++) { 3366 struct pf_srchash *sh = &V_pf_srchash[i]; 3367 struct pf_src_node *sn, *tmp; 3368 3369 PF_HASHROW_LOCK(sh); 3370 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 3371 if (PF_MATCHA(psnk->psnk_src.neg, 3372 &psnk->psnk_src.addr.v.a.addr, 3373 &psnk->psnk_src.addr.v.a.mask, 3374 &sn->addr, sn->af) && 3375 PF_MATCHA(psnk->psnk_dst.neg, 3376 &psnk->psnk_dst.addr.v.a.addr, 3377 &psnk->psnk_dst.addr.v.a.mask, 3378 &sn->raddr, sn->af)) { 3379 pf_unlink_src_node_locked(sn); 3380 LIST_INSERT_HEAD(&kill, sn, entry); 3381 sn->expire = 1; 3382 } 3383 PF_HASHROW_UNLOCK(sh); 3384 } 3385 3386 for (int i = 0; i <= V_pf_hashmask; i++) { 3387 struct pf_idhash *ih = &V_pf_idhash[i]; 3388 struct pf_state *s; 3389 3390 PF_HASHROW_LOCK(ih); 3391 LIST_FOREACH(s, &ih->states, entry) { 3392 if (s->src_node && s->src_node->expire == 1) { 3393#ifdef INVARIANTS 3394 s->src_node->states--; 3395#endif 3396 s->src_node = NULL; 3397 } 3398 if (s->nat_src_node && s->nat_src_node->expire == 1) { 3399#ifdef INVARIANTS 3400 s->nat_src_node->states--; 3401#endif 3402 s->nat_src_node = NULL; 3403 } 3404 } 3405 PF_HASHROW_UNLOCK(ih); 3406 } 3407 3408 psnk->psnk_killed = pf_free_src_nodes(&kill); 3409} 3410 3411/* 3412 * XXX - Check for version missmatch!!! 3413 */ 3414 3415/* 3416 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3417 */ 3418static int 3419shutdown_pf(void) 3420{ 3421 int error = 0; 3422 u_int32_t t[5]; 3423 char nn = '\0'; 3424 3425 V_pf_status.running = 0; 3426 do { 3427 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 3428 != 0) { 3429 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3430 break; 3431 } 3432 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 3433 != 0) { 3434 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3435 break; /* XXX: rollback? */ 3436 } 3437 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 3438 != 0) { 3439 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3440 break; /* XXX: rollback? */ 3441 } 3442 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3443 != 0) { 3444 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3445 break; /* XXX: rollback? */ 3446 } 3447 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3448 != 0) { 3449 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3450 break; /* XXX: rollback? */ 3451 } 3452 3453 /* XXX: these should always succeed here */ 3454 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3455 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3456 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3457 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3458 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3459 3460 if ((error = pf_clear_tables()) != 0) 3461 break; 3462 3463#ifdef ALTQ 3464 if ((error = pf_begin_altq(&t[0])) != 0) { 3465 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3466 break; 3467 } 3468 pf_commit_altq(t[0]); 3469#endif 3470 3471 pf_clear_states(); 3472 3473 pf_clear_srcnodes(NULL); 3474 3475 /* status does not use malloced mem so no need to cleanup */ 3476 /* fingerprints and interfaces have thier own cleanup code */ 3477 } while(0); 3478 3479 return (error); 3480} 3481 3482#ifdef INET 3483static int 3484pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3485 struct inpcb *inp) 3486{ 3487 int chk; 3488 3489 chk = pf_test(PF_IN, ifp, m, inp); 3490 if (chk && *m) { 3491 m_freem(*m); 3492 *m = NULL; 3493 } 3494 3495 return (chk); 3496} 3497 3498static int 3499pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3500 struct inpcb *inp) 3501{ 3502 int chk; 3503 3504 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3505 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3506 in_delayed_cksum(*m); 3507 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3508 } 3509 3510 chk = pf_test(PF_OUT, ifp, m, inp); 3511 if (chk && *m) { 3512 m_freem(*m); 3513 *m = NULL; 3514 } 3515 3516 return (chk); 3517} 3518#endif 3519 3520#ifdef INET6 3521static int 3522pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3523 struct inpcb *inp) 3524{ 3525 int chk; 3526 3527 /* 3528 * In case of loopback traffic IPv6 uses the real interface in 3529 * order to support scoped addresses. In order to support stateful 3530 * filtering we have change this to lo0 as it is the case in IPv4. 3531 */ 3532 CURVNET_SET(ifp->if_vnet); 3533 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 3534 CURVNET_RESTORE(); 3535 if (chk && *m) { 3536 m_freem(*m); 3537 *m = NULL; 3538 } 3539 return chk; 3540} 3541 3542static int 3543pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3544 struct inpcb *inp) 3545{ 3546 int chk; 3547 3548 /* We need a proper CSUM before we start (s. OpenBSD ip_output) */ 3549 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3550#ifdef INET 3551 /* XXX-BZ copy&paste error from r126261? */ 3552 in_delayed_cksum(*m); 3553#endif 3554 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3555 } 3556 CURVNET_SET(ifp->if_vnet); 3557 chk = pf_test6(PF_OUT, ifp, m, inp); 3558 CURVNET_RESTORE(); 3559 if (chk && *m) { 3560 m_freem(*m); 3561 *m = NULL; 3562 } 3563 return chk; 3564} 3565#endif /* INET6 */ 3566 3567static int 3568hook_pf(void) 3569{ 3570#ifdef INET 3571 struct pfil_head *pfh_inet; 3572#endif 3573#ifdef INET6 3574 struct pfil_head *pfh_inet6; 3575#endif 3576 3577 if (V_pf_pfil_hooked) 3578 return (0); 3579 3580#ifdef INET 3581 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3582 if (pfh_inet == NULL) 3583 return (ESRCH); /* XXX */ 3584 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3585 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3586#endif 3587#ifdef INET6 3588 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3589 if (pfh_inet6 == NULL) { 3590#ifdef INET 3591 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3592 pfh_inet); 3593 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3594 pfh_inet); 3595#endif 3596 return (ESRCH); /* XXX */ 3597 } 3598 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3599 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3600#endif 3601 3602 V_pf_pfil_hooked = 1; 3603 return (0); 3604} 3605 3606static int 3607dehook_pf(void) 3608{ 3609#ifdef INET 3610 struct pfil_head *pfh_inet; 3611#endif 3612#ifdef INET6 3613 struct pfil_head *pfh_inet6; 3614#endif 3615 3616 if (V_pf_pfil_hooked == 0) 3617 return (0); 3618 3619#ifdef INET 3620 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3621 if (pfh_inet == NULL) 3622 return (ESRCH); /* XXX */ 3623 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3624 pfh_inet); 3625 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3626 pfh_inet); 3627#endif 3628#ifdef INET6 3629 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3630 if (pfh_inet6 == NULL) 3631 return (ESRCH); /* XXX */ 3632 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3633 pfh_inet6); 3634 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3635 pfh_inet6); 3636#endif 3637 3638 V_pf_pfil_hooked = 0; 3639 return (0); 3640} 3641 3642static int 3643pf_load(void) 3644{ 3645 int error; 3646 3647 VNET_ITERATOR_DECL(vnet_iter); 3648 3649 VNET_LIST_RLOCK(); 3650 VNET_FOREACH(vnet_iter) { 3651 CURVNET_SET(vnet_iter); 3652 V_pf_pfil_hooked = 0; 3653 V_pf_end_threads = 0; 3654 TAILQ_INIT(&V_pf_tags); 3655 TAILQ_INIT(&V_pf_qids); 3656 CURVNET_RESTORE(); 3657 } 3658 VNET_LIST_RUNLOCK(); 3659 3660 rw_init(&pf_rules_lock, "pf rulesets"); 3661 3662 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3663 if ((error = pfattach()) != 0) 3664 return (error); 3665 3666 return (0); 3667} 3668 3669static int 3670pf_unload(void) 3671{ 3672 int error = 0; 3673 3674 PF_RULES_WLOCK(); 3675 V_pf_status.running = 0; 3676 PF_RULES_WUNLOCK(); 3677 swi_remove(V_pf_swi_cookie); 3678 error = dehook_pf(); 3679 if (error) { 3680 /* 3681 * Should not happen! 3682 * XXX Due to error code ESRCH, kldunload will show 3683 * a message like 'No such process'. 3684 */ 3685 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 3686 return error; 3687 } 3688 PF_RULES_WLOCK(); 3689 shutdown_pf(); 3690 V_pf_end_threads = 1; 3691 while (V_pf_end_threads < 2) { 3692 wakeup_one(pf_purge_thread); 3693 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0); 3694 } 3695 pf_normalize_cleanup(); 3696 pfi_cleanup(); 3697 pfr_cleanup(); 3698 pf_osfp_flush(); 3699 pf_cleanup(); 3700 PF_RULES_WUNLOCK(); 3701 destroy_dev(pf_dev); 3702 rw_destroy(&pf_rules_lock); 3703 3704 return (error); 3705} 3706 3707static int 3708pf_modevent(module_t mod, int type, void *data) 3709{ 3710 int error = 0; 3711 3712 switch(type) { 3713 case MOD_LOAD: 3714 error = pf_load(); 3715 break; 3716 case MOD_QUIESCE: 3717 /* 3718 * Module should not be unloaded due to race conditions. 3719 */ 3720 error = EBUSY; 3721 break; 3722 case MOD_UNLOAD: 3723 error = pf_unload(); 3724 break; 3725 default: 3726 error = EINVAL; 3727 break; 3728 } 3729 3730 return (error); 3731} 3732 3733static moduledata_t pf_mod = { 3734 "pf", 3735 pf_modevent, 3736 0 3737}; 3738 3739DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST); 3740MODULE_VERSION(pf, PF_MODVER); 3741