ip_fw_sockopt.c revision 346205
1/*- 2 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa 3 * Copyright (c) 2014 Yandex LLC 4 * Copyright (c) 2014 Alexander V. Chernikov 5 * 6 * Supported by: Valeria Paoli 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/11/sys/netpfil/ipfw/ip_fw_sockopt.c 346205 2019-04-14 12:05:08Z ae $"); 32 33/* 34 * Control socket and rule management routines for ipfw. 35 * Control is currently implemented via IP_FW3 setsockopt() code. 36 */ 37 38#include "opt_ipfw.h" 39#include "opt_inet.h" 40#ifndef INET 41#error IPFIREWALL requires INET. 42#endif /* INET */ 43#include "opt_inet6.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/malloc.h> 48#include <sys/mbuf.h> /* struct m_tag used by nested headers */ 49#include <sys/kernel.h> 50#include <sys/lock.h> 51#include <sys/priv.h> 52#include <sys/proc.h> 53#include <sys/rwlock.h> 54#include <sys/rmlock.h> 55#include <sys/socket.h> 56#include <sys/socketvar.h> 57#include <sys/sysctl.h> 58#include <sys/syslog.h> 59#include <sys/fnv_hash.h> 60#include <net/if.h> 61#include <net/pfil.h> 62#include <net/route.h> 63#include <net/vnet.h> 64#include <vm/vm.h> 65#include <vm/vm_extern.h> 66 67#include <netinet/in.h> 68#include <netinet/ip_var.h> /* hooks */ 69#include <netinet/ip_fw.h> 70 71#include <netpfil/ipfw/ip_fw_private.h> 72#include <netpfil/ipfw/ip_fw_table.h> 73 74#ifdef MAC 75#include <security/mac/mac_framework.h> 76#endif 77 78static int ipfw_ctl(struct sockopt *sopt); 79static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, 80 struct rule_check_info *ci); 81static int check_ipfw_rule1(struct ip_fw_rule *rule, int size, 82 struct rule_check_info *ci); 83static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 84 struct rule_check_info *ci); 85static int rewrite_rule_uidx(struct ip_fw_chain *chain, 86 struct rule_check_info *ci); 87 88#define NAMEDOBJ_HASH_SIZE 32 89 90struct namedobj_instance { 91 struct namedobjects_head *names; 92 struct namedobjects_head *values; 93 uint32_t nn_size; /* names hash size */ 94 uint32_t nv_size; /* number hash size */ 95 u_long *idx_mask; /* used items bitmask */ 96 uint32_t max_blocks; /* number of "long" blocks in bitmask */ 97 uint32_t count; /* number of items */ 98 uint16_t free_off[IPFW_MAX_SETS]; /* first possible free offset */ 99 objhash_hash_f *hash_f; 100 objhash_cmp_f *cmp_f; 101}; 102#define BLOCK_ITEMS (8 * sizeof(u_long)) /* Number of items for ffsl() */ 103 104static uint32_t objhash_hash_name(struct namedobj_instance *ni, 105 const void *key, uint32_t kopt); 106static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val); 107static int objhash_cmp_name(struct named_object *no, const void *name, 108 uint32_t set); 109 110MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); 111 112static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 113 struct sockopt_data *sd); 114static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 115 struct sockopt_data *sd); 116static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 117 struct sockopt_data *sd); 118static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 119 struct sockopt_data *sd); 120static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 121 struct sockopt_data *sd); 122static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 123 struct sockopt_data *sd); 124static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 125 struct sockopt_data *sd); 126static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 127 struct sockopt_data *sd); 128 129/* ctl3 handler data */ 130struct mtx ctl3_lock; 131#define CTL3_LOCK_INIT() mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF) 132#define CTL3_LOCK_DESTROY() mtx_destroy(&ctl3_lock) 133#define CTL3_LOCK() mtx_lock(&ctl3_lock) 134#define CTL3_UNLOCK() mtx_unlock(&ctl3_lock) 135 136static struct ipfw_sopt_handler *ctl3_handlers; 137static size_t ctl3_hsize; 138static uint64_t ctl3_refct, ctl3_gencnt; 139#define CTL3_SMALLBUF 4096 /* small page-size write buffer */ 140#define CTL3_LARGEBUF 16 * 1024 * 1024 /* handle large rulesets */ 141 142static int ipfw_flush_sopt_data(struct sockopt_data *sd); 143 144static struct ipfw_sopt_handler scodes[] = { 145 { IP_FW_XGET, 0, HDIR_GET, dump_config }, 146 { IP_FW_XADD, 0, HDIR_BOTH, add_rules }, 147 { IP_FW_XDEL, 0, HDIR_BOTH, del_rules }, 148 { IP_FW_XZERO, 0, HDIR_SET, clear_rules }, 149 { IP_FW_XRESETLOG, 0, HDIR_SET, clear_rules }, 150 { IP_FW_XMOVE, 0, HDIR_SET, move_rules }, 151 { IP_FW_SET_SWAP, 0, HDIR_SET, manage_sets }, 152 { IP_FW_SET_MOVE, 0, HDIR_SET, manage_sets }, 153 { IP_FW_SET_ENABLE, 0, HDIR_SET, manage_sets }, 154 { IP_FW_DUMP_SOPTCODES, 0, HDIR_GET, dump_soptcodes }, 155 { IP_FW_DUMP_SRVOBJECTS,0, HDIR_GET, dump_srvobjects }, 156}; 157 158static int 159set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule); 160static struct opcode_obj_rewrite *find_op_rw(ipfw_insn *cmd, 161 uint16_t *puidx, uint8_t *ptype); 162static int ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 163 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti); 164static int ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, 165 struct tid_info *ti, struct obj_idx *pidx, int *unresolved); 166static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule); 167static void unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, 168 struct obj_idx *oib, struct obj_idx *end); 169static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx, 170 struct sockopt_data *sd); 171 172/* 173 * Opcode object rewriter variables 174 */ 175struct opcode_obj_rewrite *ctl3_rewriters; 176static size_t ctl3_rsize; 177 178/* 179 * static variables followed by global ones 180 */ 181 182static VNET_DEFINE(uma_zone_t, ipfw_cntr_zone); 183#define V_ipfw_cntr_zone VNET(ipfw_cntr_zone) 184 185void 186ipfw_init_counters() 187{ 188 189 V_ipfw_cntr_zone = uma_zcreate("IPFW counters", 190 IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL, 191 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 192} 193 194void 195ipfw_destroy_counters() 196{ 197 198 uma_zdestroy(V_ipfw_cntr_zone); 199} 200 201struct ip_fw * 202ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize) 203{ 204 struct ip_fw *rule; 205 206 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO); 207 rule->cntr = uma_zalloc(V_ipfw_cntr_zone, M_WAITOK | M_ZERO); 208 rule->refcnt = 1; 209 210 return (rule); 211} 212 213void 214ipfw_free_rule(struct ip_fw *rule) 215{ 216 217 /* 218 * We don't release refcnt here, since this function 219 * can be called without any locks held. The caller 220 * must release reference under IPFW_UH_WLOCK, and then 221 * call this function if refcount becomes 1. 222 */ 223 if (rule->refcnt > 1) 224 return; 225 uma_zfree(V_ipfw_cntr_zone, rule->cntr); 226 free(rule, M_IPFW); 227} 228 229 230/* 231 * Find the smallest rule >= key, id. 232 * We could use bsearch but it is so simple that we code it directly 233 */ 234int 235ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id) 236{ 237 int i, lo, hi; 238 struct ip_fw *r; 239 240 for (lo = 0, hi = chain->n_rules - 1; lo < hi;) { 241 i = (lo + hi) / 2; 242 r = chain->map[i]; 243 if (r->rulenum < key) 244 lo = i + 1; /* continue from the next one */ 245 else if (r->rulenum > key) 246 hi = i; /* this might be good */ 247 else if (r->id < id) 248 lo = i + 1; /* continue from the next one */ 249 else /* r->id >= id */ 250 hi = i; /* this might be good */ 251 } 252 return hi; 253} 254 255/* 256 * Builds skipto cache on rule set @map. 257 */ 258static void 259update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map) 260{ 261 int *smap, rulenum; 262 int i, mi; 263 264 IPFW_UH_WLOCK_ASSERT(chain); 265 266 mi = 0; 267 rulenum = map[mi]->rulenum; 268 smap = chain->idxmap_back; 269 270 if (smap == NULL) 271 return; 272 273 for (i = 0; i < 65536; i++) { 274 smap[i] = mi; 275 /* Use the same rule index until i < rulenum */ 276 if (i != rulenum || i == 65535) 277 continue; 278 /* Find next rule with num > i */ 279 rulenum = map[++mi]->rulenum; 280 while (rulenum == i) 281 rulenum = map[++mi]->rulenum; 282 } 283} 284 285/* 286 * Swaps prepared (backup) index with current one. 287 */ 288static void 289swap_skipto_cache(struct ip_fw_chain *chain) 290{ 291 int *map; 292 293 IPFW_UH_WLOCK_ASSERT(chain); 294 IPFW_WLOCK_ASSERT(chain); 295 296 map = chain->idxmap; 297 chain->idxmap = chain->idxmap_back; 298 chain->idxmap_back = map; 299} 300 301/* 302 * Allocate and initialize skipto cache. 303 */ 304void 305ipfw_init_skipto_cache(struct ip_fw_chain *chain) 306{ 307 int *idxmap, *idxmap_back; 308 309 idxmap = malloc(65536 * sizeof(int), M_IPFW, M_WAITOK | M_ZERO); 310 idxmap_back = malloc(65536 * sizeof(int), M_IPFW, M_WAITOK); 311 312 /* 313 * Note we may be called at any time after initialization, 314 * for example, on first skipto rule, so we need to 315 * provide valid chain->idxmap on return 316 */ 317 318 IPFW_UH_WLOCK(chain); 319 if (chain->idxmap != NULL) { 320 IPFW_UH_WUNLOCK(chain); 321 free(idxmap, M_IPFW); 322 free(idxmap_back, M_IPFW); 323 return; 324 } 325 326 /* Set backup pointer first to permit building cache */ 327 chain->idxmap_back = idxmap_back; 328 update_skipto_cache(chain, chain->map); 329 IPFW_WLOCK(chain); 330 /* It is now safe to set chain->idxmap ptr */ 331 chain->idxmap = idxmap; 332 swap_skipto_cache(chain); 333 IPFW_WUNLOCK(chain); 334 IPFW_UH_WUNLOCK(chain); 335} 336 337/* 338 * Destroys skipto cache. 339 */ 340void 341ipfw_destroy_skipto_cache(struct ip_fw_chain *chain) 342{ 343 344 if (chain->idxmap != NULL) 345 free(chain->idxmap, M_IPFW); 346 if (chain->idxmap != NULL) 347 free(chain->idxmap_back, M_IPFW); 348} 349 350 351/* 352 * allocate a new map, returns the chain locked. extra is the number 353 * of entries to add or delete. 354 */ 355static struct ip_fw ** 356get_map(struct ip_fw_chain *chain, int extra, int locked) 357{ 358 359 for (;;) { 360 struct ip_fw **map; 361 int i, mflags; 362 363 mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK); 364 365 i = chain->n_rules + extra; 366 map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags); 367 if (map == NULL) { 368 printf("%s: cannot allocate map\n", __FUNCTION__); 369 return NULL; 370 } 371 if (!locked) 372 IPFW_UH_WLOCK(chain); 373 if (i >= chain->n_rules + extra) /* good */ 374 return map; 375 /* otherwise we lost the race, free and retry */ 376 if (!locked) 377 IPFW_UH_WUNLOCK(chain); 378 free(map, M_IPFW); 379 } 380} 381 382/* 383 * swap the maps. It is supposed to be called with IPFW_UH_WLOCK 384 */ 385static struct ip_fw ** 386swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len) 387{ 388 struct ip_fw **old_map; 389 390 IPFW_WLOCK(chain); 391 chain->id++; 392 chain->n_rules = new_len; 393 old_map = chain->map; 394 chain->map = new_map; 395 swap_skipto_cache(chain); 396 IPFW_WUNLOCK(chain); 397 return old_map; 398} 399 400 401static void 402export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr) 403{ 404 struct timeval boottime; 405 406 cntr->size = sizeof(*cntr); 407 408 if (krule->cntr != NULL) { 409 cntr->pcnt = counter_u64_fetch(krule->cntr); 410 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 411 cntr->timestamp = krule->timestamp; 412 } 413 if (cntr->timestamp > 0) { 414 getboottime(&boottime); 415 cntr->timestamp += boottime.tv_sec; 416 } 417} 418 419static void 420export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr) 421{ 422 struct timeval boottime; 423 424 if (krule->cntr != NULL) { 425 cntr->pcnt = counter_u64_fetch(krule->cntr); 426 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 427 cntr->timestamp = krule->timestamp; 428 } 429 if (cntr->timestamp > 0) { 430 getboottime(&boottime); 431 cntr->timestamp += boottime.tv_sec; 432 } 433} 434 435/* 436 * Copies rule @urule from v1 userland format (current). 437 * to kernel @krule. 438 * Assume @krule is zeroed. 439 */ 440static void 441import_rule1(struct rule_check_info *ci) 442{ 443 struct ip_fw_rule *urule; 444 struct ip_fw *krule; 445 446 urule = (struct ip_fw_rule *)ci->urule; 447 krule = (struct ip_fw *)ci->krule; 448 449 /* copy header */ 450 krule->act_ofs = urule->act_ofs; 451 krule->cmd_len = urule->cmd_len; 452 krule->rulenum = urule->rulenum; 453 krule->set = urule->set; 454 krule->flags = urule->flags; 455 456 /* Save rulenum offset */ 457 ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum); 458 459 /* Copy opcodes */ 460 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 461} 462 463/* 464 * Export rule into v1 format (Current). 465 * Layout: 466 * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT) 467 * [ ip_fw_rule ] OR 468 * [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs). 469 * ] 470 * Assume @data is zeroed. 471 */ 472static void 473export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs) 474{ 475 struct ip_fw_bcounter *cntr; 476 struct ip_fw_rule *urule; 477 ipfw_obj_tlv *tlv; 478 479 /* Fill in TLV header */ 480 tlv = (ipfw_obj_tlv *)data; 481 tlv->type = IPFW_TLV_RULE_ENT; 482 tlv->length = len; 483 484 if (rcntrs != 0) { 485 /* Copy counters */ 486 cntr = (struct ip_fw_bcounter *)(tlv + 1); 487 urule = (struct ip_fw_rule *)(cntr + 1); 488 export_cntr1_base(krule, cntr); 489 } else 490 urule = (struct ip_fw_rule *)(tlv + 1); 491 492 /* copy header */ 493 urule->act_ofs = krule->act_ofs; 494 urule->cmd_len = krule->cmd_len; 495 urule->rulenum = krule->rulenum; 496 urule->set = krule->set; 497 urule->flags = krule->flags; 498 urule->id = krule->id; 499 500 /* Copy opcodes */ 501 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 502} 503 504 505/* 506 * Copies rule @urule from FreeBSD8 userland format (v0) 507 * to kernel @krule. 508 * Assume @krule is zeroed. 509 */ 510static void 511import_rule0(struct rule_check_info *ci) 512{ 513 struct ip_fw_rule0 *urule; 514 struct ip_fw *krule; 515 int cmdlen, l; 516 ipfw_insn *cmd; 517 ipfw_insn_limit *lcmd; 518 ipfw_insn_if *cmdif; 519 520 urule = (struct ip_fw_rule0 *)ci->urule; 521 krule = (struct ip_fw *)ci->krule; 522 523 /* copy header */ 524 krule->act_ofs = urule->act_ofs; 525 krule->cmd_len = urule->cmd_len; 526 krule->rulenum = urule->rulenum; 527 krule->set = urule->set; 528 if ((urule->_pad & 1) != 0) 529 krule->flags |= IPFW_RULE_NOOPT; 530 531 /* Save rulenum offset */ 532 ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum); 533 534 /* Copy opcodes */ 535 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 536 537 /* 538 * Alter opcodes: 539 * 1) convert tablearg value from 65535 to 0 540 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room 541 * for targ). 542 * 3) convert table number in iface opcodes to u16 543 * 4) convert old `nat global` into new 65535 544 */ 545 l = krule->cmd_len; 546 cmd = krule->cmd; 547 cmdlen = 0; 548 549 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 550 cmdlen = F_LEN(cmd); 551 552 switch (cmd->opcode) { 553 /* Opcodes supporting tablearg */ 554 case O_TAG: 555 case O_TAGGED: 556 case O_PIPE: 557 case O_QUEUE: 558 case O_DIVERT: 559 case O_TEE: 560 case O_SKIPTO: 561 case O_CALLRETURN: 562 case O_NETGRAPH: 563 case O_NGTEE: 564 case O_NAT: 565 if (cmd->arg1 == IP_FW_TABLEARG) 566 cmd->arg1 = IP_FW_TARG; 567 else if (cmd->arg1 == 0) 568 cmd->arg1 = IP_FW_NAT44_GLOBAL; 569 break; 570 case O_SETFIB: 571 case O_SETDSCP: 572 if (cmd->arg1 == IP_FW_TABLEARG) 573 cmd->arg1 = IP_FW_TARG; 574 else 575 cmd->arg1 |= 0x8000; 576 break; 577 case O_LIMIT: 578 lcmd = (ipfw_insn_limit *)cmd; 579 if (lcmd->conn_limit == IP_FW_TABLEARG) 580 lcmd->conn_limit = IP_FW_TARG; 581 break; 582 /* Interface tables */ 583 case O_XMIT: 584 case O_RECV: 585 case O_VIA: 586 /* Interface table, possibly */ 587 cmdif = (ipfw_insn_if *)cmd; 588 if (cmdif->name[0] != '\1') 589 break; 590 591 cmdif->p.kidx = (uint16_t)cmdif->p.glob; 592 break; 593 } 594 } 595} 596 597/* 598 * Copies rule @krule from kernel to FreeBSD8 userland format (v0) 599 */ 600static void 601export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len) 602{ 603 int cmdlen, l; 604 ipfw_insn *cmd; 605 ipfw_insn_limit *lcmd; 606 ipfw_insn_if *cmdif; 607 608 /* copy header */ 609 memset(urule, 0, len); 610 urule->act_ofs = krule->act_ofs; 611 urule->cmd_len = krule->cmd_len; 612 urule->rulenum = krule->rulenum; 613 urule->set = krule->set; 614 if ((krule->flags & IPFW_RULE_NOOPT) != 0) 615 urule->_pad |= 1; 616 617 /* Copy opcodes */ 618 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 619 620 /* Export counters */ 621 export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt); 622 623 /* 624 * Alter opcodes: 625 * 1) convert tablearg value from 0 to 65535 626 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values. 627 * 3) convert table number in iface opcodes to int 628 */ 629 l = urule->cmd_len; 630 cmd = urule->cmd; 631 cmdlen = 0; 632 633 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 634 cmdlen = F_LEN(cmd); 635 636 switch (cmd->opcode) { 637 /* Opcodes supporting tablearg */ 638 case O_TAG: 639 case O_TAGGED: 640 case O_PIPE: 641 case O_QUEUE: 642 case O_DIVERT: 643 case O_TEE: 644 case O_SKIPTO: 645 case O_CALLRETURN: 646 case O_NETGRAPH: 647 case O_NGTEE: 648 case O_NAT: 649 if (cmd->arg1 == IP_FW_TARG) 650 cmd->arg1 = IP_FW_TABLEARG; 651 else if (cmd->arg1 == IP_FW_NAT44_GLOBAL) 652 cmd->arg1 = 0; 653 break; 654 case O_SETFIB: 655 case O_SETDSCP: 656 if (cmd->arg1 == IP_FW_TARG) 657 cmd->arg1 = IP_FW_TABLEARG; 658 else 659 cmd->arg1 &= ~0x8000; 660 break; 661 case O_LIMIT: 662 lcmd = (ipfw_insn_limit *)cmd; 663 if (lcmd->conn_limit == IP_FW_TARG) 664 lcmd->conn_limit = IP_FW_TABLEARG; 665 break; 666 /* Interface tables */ 667 case O_XMIT: 668 case O_RECV: 669 case O_VIA: 670 /* Interface table, possibly */ 671 cmdif = (ipfw_insn_if *)cmd; 672 if (cmdif->name[0] != '\1') 673 break; 674 675 cmdif->p.glob = cmdif->p.kidx; 676 break; 677 } 678 } 679} 680 681/* 682 * Add new rule(s) to the list possibly creating rule number for each. 683 * Update the rule_number in the input struct so the caller knows it as well. 684 * Must be called without IPFW_UH held 685 */ 686static int 687commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count) 688{ 689 int error, i, insert_before, tcount; 690 uint16_t rulenum, *pnum; 691 struct rule_check_info *ci; 692 struct ip_fw *krule; 693 struct ip_fw **map; /* the new array of pointers */ 694 695 /* Check if we need to do table/obj index remap */ 696 tcount = 0; 697 for (ci = rci, i = 0; i < count; ci++, i++) { 698 if (ci->object_opcodes == 0) 699 continue; 700 701 /* 702 * Rule has some object opcodes. 703 * We need to find (and create non-existing) 704 * kernel objects, and reference existing ones. 705 */ 706 error = rewrite_rule_uidx(chain, ci); 707 if (error != 0) { 708 709 /* 710 * rewrite failed, state for current rule 711 * has been reverted. Check if we need to 712 * revert more. 713 */ 714 if (tcount > 0) { 715 716 /* 717 * We have some more table rules 718 * we need to rollback. 719 */ 720 721 IPFW_UH_WLOCK(chain); 722 while (ci != rci) { 723 ci--; 724 if (ci->object_opcodes == 0) 725 continue; 726 unref_rule_objects(chain,ci->krule); 727 728 } 729 IPFW_UH_WUNLOCK(chain); 730 731 } 732 733 return (error); 734 } 735 736 tcount++; 737 } 738 739 /* get_map returns with IPFW_UH_WLOCK if successful */ 740 map = get_map(chain, count, 0 /* not locked */); 741 if (map == NULL) { 742 if (tcount > 0) { 743 /* Unbind tables */ 744 IPFW_UH_WLOCK(chain); 745 for (ci = rci, i = 0; i < count; ci++, i++) { 746 if (ci->object_opcodes == 0) 747 continue; 748 749 unref_rule_objects(chain, ci->krule); 750 } 751 IPFW_UH_WUNLOCK(chain); 752 } 753 754 return (ENOSPC); 755 } 756 757 if (V_autoinc_step < 1) 758 V_autoinc_step = 1; 759 else if (V_autoinc_step > 1000) 760 V_autoinc_step = 1000; 761 762 /* FIXME: Handle count > 1 */ 763 ci = rci; 764 krule = ci->krule; 765 rulenum = krule->rulenum; 766 767 /* find the insertion point, we will insert before */ 768 insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE; 769 i = ipfw_find_rule(chain, insert_before, 0); 770 /* duplicate first part */ 771 if (i > 0) 772 bcopy(chain->map, map, i * sizeof(struct ip_fw *)); 773 map[i] = krule; 774 /* duplicate remaining part, we always have the default rule */ 775 bcopy(chain->map + i, map + i + 1, 776 sizeof(struct ip_fw *) *(chain->n_rules - i)); 777 if (rulenum == 0) { 778 /* Compute rule number and write it back */ 779 rulenum = i > 0 ? map[i-1]->rulenum : 0; 780 if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step) 781 rulenum += V_autoinc_step; 782 krule->rulenum = rulenum; 783 /* Save number to userland rule */ 784 pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff); 785 *pnum = rulenum; 786 } 787 788 krule->id = chain->id + 1; 789 update_skipto_cache(chain, map); 790 map = swap_map(chain, map, chain->n_rules + 1); 791 chain->static_len += RULEUSIZE0(krule); 792 IPFW_UH_WUNLOCK(chain); 793 if (map) 794 free(map, M_IPFW); 795 return (0); 796} 797 798int 799ipfw_add_protected_rule(struct ip_fw_chain *chain, struct ip_fw *rule, 800 int locked) 801{ 802 struct ip_fw **map; 803 804 map = get_map(chain, 1, locked); 805 if (map == NULL) 806 return (ENOMEM); 807 if (chain->n_rules > 0) 808 bcopy(chain->map, map, 809 chain->n_rules * sizeof(struct ip_fw *)); 810 map[chain->n_rules] = rule; 811 rule->rulenum = IPFW_DEFAULT_RULE; 812 rule->set = RESVD_SET; 813 rule->id = chain->id + 1; 814 /* We add rule in the end of chain, no need to update skipto cache */ 815 map = swap_map(chain, map, chain->n_rules + 1); 816 chain->static_len += RULEUSIZE0(rule); 817 IPFW_UH_WUNLOCK(chain); 818 free(map, M_IPFW); 819 return (0); 820} 821 822/* 823 * Adds @rule to the list of rules to reap 824 */ 825void 826ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head, 827 struct ip_fw *rule) 828{ 829 830 IPFW_UH_WLOCK_ASSERT(chain); 831 832 /* Unlink rule from everywhere */ 833 unref_rule_objects(chain, rule); 834 835 rule->next = *head; 836 *head = rule; 837} 838 839/* 840 * Reclaim storage associated with a list of rules. This is 841 * typically the list created using remove_rule. 842 * A NULL pointer on input is handled correctly. 843 */ 844void 845ipfw_reap_rules(struct ip_fw *head) 846{ 847 struct ip_fw *rule; 848 849 while ((rule = head) != NULL) { 850 head = head->next; 851 ipfw_free_rule(rule); 852 } 853} 854 855/* 856 * Rules to keep are 857 * (default || reserved || !match_set || !match_number) 858 * where 859 * default ::= (rule->rulenum == IPFW_DEFAULT_RULE) 860 * // the default rule is always protected 861 * 862 * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET) 863 * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush") 864 * 865 * match_set ::= (cmd == 0 || rule->set == set) 866 * // set number is ignored for cmd == 0 867 * 868 * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum) 869 * // number is ignored for cmd == 1 or n == 0 870 * 871 */ 872int 873ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt) 874{ 875 876 /* Don't match default rule for modification queries */ 877 if (rule->rulenum == IPFW_DEFAULT_RULE && 878 (rt->flags & IPFW_RCFLAG_DEFAULT) == 0) 879 return (0); 880 881 /* Don't match rules in reserved set for flush requests */ 882 if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET) 883 return (0); 884 885 /* If we're filtering by set, don't match other sets */ 886 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set) 887 return (0); 888 889 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 && 890 (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule)) 891 return (0); 892 893 return (1); 894} 895 896struct manage_sets_args { 897 uint16_t set; 898 uint8_t new_set; 899}; 900 901static int 902swap_sets_cb(struct namedobj_instance *ni, struct named_object *no, 903 void *arg) 904{ 905 struct manage_sets_args *args; 906 907 args = (struct manage_sets_args *)arg; 908 if (no->set == (uint8_t)args->set) 909 no->set = args->new_set; 910 else if (no->set == args->new_set) 911 no->set = (uint8_t)args->set; 912 return (0); 913} 914 915static int 916move_sets_cb(struct namedobj_instance *ni, struct named_object *no, 917 void *arg) 918{ 919 struct manage_sets_args *args; 920 921 args = (struct manage_sets_args *)arg; 922 if (no->set == (uint8_t)args->set) 923 no->set = args->new_set; 924 return (0); 925} 926 927static int 928test_sets_cb(struct namedobj_instance *ni, struct named_object *no, 929 void *arg) 930{ 931 struct manage_sets_args *args; 932 933 args = (struct manage_sets_args *)arg; 934 if (no->set != (uint8_t)args->set) 935 return (0); 936 if (ipfw_objhash_lookup_name_type(ni, args->new_set, 937 no->etlv, no->name) != NULL) 938 return (EEXIST); 939 return (0); 940} 941 942/* 943 * Generic function to handler moving and swapping sets. 944 */ 945int 946ipfw_obj_manage_sets(struct namedobj_instance *ni, uint16_t type, 947 uint16_t set, uint8_t new_set, enum ipfw_sets_cmd cmd) 948{ 949 struct manage_sets_args args; 950 struct named_object *no; 951 952 args.set = set; 953 args.new_set = new_set; 954 switch (cmd) { 955 case SWAP_ALL: 956 return (ipfw_objhash_foreach_type(ni, swap_sets_cb, 957 &args, type)); 958 case TEST_ALL: 959 return (ipfw_objhash_foreach_type(ni, test_sets_cb, 960 &args, type)); 961 case MOVE_ALL: 962 return (ipfw_objhash_foreach_type(ni, move_sets_cb, 963 &args, type)); 964 case COUNT_ONE: 965 /* 966 * @set used to pass kidx. 967 * When @new_set is zero - reset object counter, 968 * otherwise increment it. 969 */ 970 no = ipfw_objhash_lookup_kidx(ni, set); 971 if (new_set != 0) 972 no->ocnt++; 973 else 974 no->ocnt = 0; 975 return (0); 976 case TEST_ONE: 977 /* @set used to pass kidx */ 978 no = ipfw_objhash_lookup_kidx(ni, set); 979 /* 980 * First check number of references: 981 * when it differs, this mean other rules are holding 982 * reference to given object, so it is not possible to 983 * change its set. Note that refcnt may account references 984 * to some going-to-be-added rules. Since we don't know 985 * their numbers (and even if they will be added) it is 986 * perfectly OK to return error here. 987 */ 988 if (no->ocnt != no->refcnt) 989 return (EBUSY); 990 if (ipfw_objhash_lookup_name_type(ni, new_set, type, 991 no->name) != NULL) 992 return (EEXIST); 993 return (0); 994 case MOVE_ONE: 995 /* @set used to pass kidx */ 996 no = ipfw_objhash_lookup_kidx(ni, set); 997 no->set = new_set; 998 return (0); 999 } 1000 return (EINVAL); 1001} 1002 1003/* 1004 * Delete rules matching range @rt. 1005 * Saves number of deleted rules in @ndel. 1006 * 1007 * Returns 0 on success. 1008 */ 1009static int 1010delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel) 1011{ 1012 struct ip_fw *reap, *rule, **map; 1013 int end, start; 1014 int i, n, ndyn, ofs; 1015 1016 reap = NULL; 1017 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 1018 1019 /* 1020 * Stage 1: Determine range to inspect. 1021 * Range is half-inclusive, e.g [start, end). 1022 */ 1023 start = 0; 1024 end = chain->n_rules - 1; 1025 1026 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) { 1027 start = ipfw_find_rule(chain, rt->start_rule, 0); 1028 1029 if (rt->end_rule >= IPFW_DEFAULT_RULE) 1030 rt->end_rule = IPFW_DEFAULT_RULE - 1; 1031 end = ipfw_find_rule(chain, rt->end_rule, UINT32_MAX); 1032 } 1033 1034 if (rt->flags & IPFW_RCFLAG_DYNAMIC) { 1035 /* 1036 * Requested deleting only for dynamic states. 1037 */ 1038 *ndel = 0; 1039 ipfw_expire_dyn_states(chain, rt); 1040 IPFW_UH_WUNLOCK(chain); 1041 return (0); 1042 } 1043 1044 /* Allocate new map of the same size */ 1045 map = get_map(chain, 0, 1 /* locked */); 1046 if (map == NULL) { 1047 IPFW_UH_WUNLOCK(chain); 1048 return (ENOMEM); 1049 } 1050 1051 n = 0; 1052 ndyn = 0; 1053 ofs = start; 1054 /* 1. bcopy the initial part of the map */ 1055 if (start > 0) 1056 bcopy(chain->map, map, start * sizeof(struct ip_fw *)); 1057 /* 2. copy active rules between start and end */ 1058 for (i = start; i < end; i++) { 1059 rule = chain->map[i]; 1060 if (ipfw_match_range(rule, rt) == 0) { 1061 map[ofs++] = rule; 1062 continue; 1063 } 1064 1065 n++; 1066 if (ipfw_is_dyn_rule(rule) != 0) 1067 ndyn++; 1068 } 1069 /* 3. copy the final part of the map */ 1070 bcopy(chain->map + end, map + ofs, 1071 (chain->n_rules - end) * sizeof(struct ip_fw *)); 1072 /* 4. recalculate skipto cache */ 1073 update_skipto_cache(chain, map); 1074 /* 5. swap the maps (under UH_WLOCK + WHLOCK) */ 1075 map = swap_map(chain, map, chain->n_rules - n); 1076 /* 6. Remove all dynamic states originated by deleted rules */ 1077 if (ndyn > 0) 1078 ipfw_expire_dyn_states(chain, rt); 1079 /* 7. now remove the rules deleted from the old map */ 1080 for (i = start; i < end; i++) { 1081 rule = map[i]; 1082 if (ipfw_match_range(rule, rt) == 0) 1083 continue; 1084 chain->static_len -= RULEUSIZE0(rule); 1085 ipfw_reap_add(chain, &reap, rule); 1086 } 1087 IPFW_UH_WUNLOCK(chain); 1088 1089 ipfw_reap_rules(reap); 1090 if (map != NULL) 1091 free(map, M_IPFW); 1092 *ndel = n; 1093 return (0); 1094} 1095 1096static int 1097move_objects(struct ip_fw_chain *ch, ipfw_range_tlv *rt) 1098{ 1099 struct opcode_obj_rewrite *rw; 1100 struct ip_fw *rule; 1101 ipfw_insn *cmd; 1102 int cmdlen, i, l, c; 1103 uint16_t kidx; 1104 1105 IPFW_UH_WLOCK_ASSERT(ch); 1106 1107 /* Stage 1: count number of references by given rules */ 1108 for (c = 0, i = 0; i < ch->n_rules - 1; i++) { 1109 rule = ch->map[i]; 1110 if (ipfw_match_range(rule, rt) == 0) 1111 continue; 1112 if (rule->set == rt->new_set) /* nothing to do */ 1113 continue; 1114 /* Search opcodes with named objects */ 1115 for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd; 1116 l > 0; l -= cmdlen, cmd += cmdlen) { 1117 cmdlen = F_LEN(cmd); 1118 rw = find_op_rw(cmd, &kidx, NULL); 1119 if (rw == NULL || rw->manage_sets == NULL) 1120 continue; 1121 /* 1122 * When manage_sets() returns non-zero value to 1123 * COUNT_ONE command, consider this as an object 1124 * doesn't support sets (e.g. disabled with sysctl). 1125 * So, skip checks for this object. 1126 */ 1127 if (rw->manage_sets(ch, kidx, 1, COUNT_ONE) != 0) 1128 continue; 1129 c++; 1130 } 1131 } 1132 if (c == 0) /* No objects found */ 1133 return (0); 1134 /* Stage 2: verify "ownership" */ 1135 for (c = 0, i = 0; (i < ch->n_rules - 1) && c == 0; i++) { 1136 rule = ch->map[i]; 1137 if (ipfw_match_range(rule, rt) == 0) 1138 continue; 1139 if (rule->set == rt->new_set) /* nothing to do */ 1140 continue; 1141 /* Search opcodes with named objects */ 1142 for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd; 1143 l > 0 && c == 0; l -= cmdlen, cmd += cmdlen) { 1144 cmdlen = F_LEN(cmd); 1145 rw = find_op_rw(cmd, &kidx, NULL); 1146 if (rw == NULL || rw->manage_sets == NULL) 1147 continue; 1148 /* Test for ownership and conflicting names */ 1149 c = rw->manage_sets(ch, kidx, 1150 (uint8_t)rt->new_set, TEST_ONE); 1151 } 1152 } 1153 /* Stage 3: change set and cleanup */ 1154 for (i = 0; i < ch->n_rules - 1; i++) { 1155 rule = ch->map[i]; 1156 if (ipfw_match_range(rule, rt) == 0) 1157 continue; 1158 if (rule->set == rt->new_set) /* nothing to do */ 1159 continue; 1160 /* Search opcodes with named objects */ 1161 for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd; 1162 l > 0; l -= cmdlen, cmd += cmdlen) { 1163 cmdlen = F_LEN(cmd); 1164 rw = find_op_rw(cmd, &kidx, NULL); 1165 if (rw == NULL || rw->manage_sets == NULL) 1166 continue; 1167 /* cleanup object counter */ 1168 rw->manage_sets(ch, kidx, 1169 0 /* reset counter */, COUNT_ONE); 1170 if (c != 0) 1171 continue; 1172 /* change set */ 1173 rw->manage_sets(ch, kidx, 1174 (uint8_t)rt->new_set, MOVE_ONE); 1175 } 1176 } 1177 return (c); 1178}/* 1179 * Changes set of given rule rannge @rt 1180 * with each other. 1181 * 1182 * Returns 0 on success. 1183 */ 1184static int 1185move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 1186{ 1187 struct ip_fw *rule; 1188 int i; 1189 1190 IPFW_UH_WLOCK(chain); 1191 1192 /* 1193 * Move rules with matching paramenerts to a new set. 1194 * This one is much more complex. We have to ensure 1195 * that all referenced tables (if any) are referenced 1196 * by given rule subset only. Otherwise, we can't move 1197 * them to new set and have to return error. 1198 */ 1199 if ((i = move_objects(chain, rt)) != 0) { 1200 IPFW_UH_WUNLOCK(chain); 1201 return (i); 1202 } 1203 1204 /* XXX: We have to do swap holding WLOCK */ 1205 for (i = 0; i < chain->n_rules; i++) { 1206 rule = chain->map[i]; 1207 if (ipfw_match_range(rule, rt) == 0) 1208 continue; 1209 rule->set = rt->new_set; 1210 } 1211 1212 IPFW_UH_WUNLOCK(chain); 1213 1214 return (0); 1215} 1216 1217/* 1218 * Clear counters for a specific rule. 1219 * Normally run under IPFW_UH_RLOCK, but these are idempotent ops 1220 * so we only care that rules do not disappear. 1221 */ 1222static void 1223clear_counters(struct ip_fw *rule, int log_only) 1224{ 1225 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule); 1226 1227 if (log_only == 0) 1228 IPFW_ZERO_RULE_COUNTER(rule); 1229 if (l->o.opcode == O_LOG) 1230 l->log_left = l->max_log; 1231} 1232 1233/* 1234 * Flushes rules counters and/or log values on matching range. 1235 * 1236 * Returns number of items cleared. 1237 */ 1238static int 1239clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only) 1240{ 1241 struct ip_fw *rule; 1242 int num; 1243 int i; 1244 1245 num = 0; 1246 rt->flags |= IPFW_RCFLAG_DEFAULT; 1247 1248 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 1249 for (i = 0; i < chain->n_rules; i++) { 1250 rule = chain->map[i]; 1251 if (ipfw_match_range(rule, rt) == 0) 1252 continue; 1253 clear_counters(rule, log_only); 1254 num++; 1255 } 1256 IPFW_UH_WUNLOCK(chain); 1257 1258 return (num); 1259} 1260 1261static int 1262check_range_tlv(ipfw_range_tlv *rt) 1263{ 1264 1265 if (rt->head.length != sizeof(*rt)) 1266 return (1); 1267 if (rt->start_rule > rt->end_rule) 1268 return (1); 1269 if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS) 1270 return (1); 1271 1272 if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags) 1273 return (1); 1274 1275 return (0); 1276} 1277 1278/* 1279 * Delete rules matching specified parameters 1280 * Data layout (v0)(current): 1281 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1282 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1283 * 1284 * Saves number of deleted rules in ipfw_range_tlv->new_set. 1285 * 1286 * Returns 0 on success. 1287 */ 1288static int 1289del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1290 struct sockopt_data *sd) 1291{ 1292 ipfw_range_header *rh; 1293 int error, ndel; 1294 1295 if (sd->valsize != sizeof(*rh)) 1296 return (EINVAL); 1297 1298 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1299 1300 if (check_range_tlv(&rh->range) != 0) 1301 return (EINVAL); 1302 1303 ndel = 0; 1304 if ((error = delete_range(chain, &rh->range, &ndel)) != 0) 1305 return (error); 1306 1307 /* Save number of rules deleted */ 1308 rh->range.new_set = ndel; 1309 return (0); 1310} 1311 1312/* 1313 * Move rules/sets matching specified parameters 1314 * Data layout (v0)(current): 1315 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1316 * 1317 * Returns 0 on success. 1318 */ 1319static int 1320move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1321 struct sockopt_data *sd) 1322{ 1323 ipfw_range_header *rh; 1324 1325 if (sd->valsize != sizeof(*rh)) 1326 return (EINVAL); 1327 1328 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1329 1330 if (check_range_tlv(&rh->range) != 0) 1331 return (EINVAL); 1332 1333 return (move_range(chain, &rh->range)); 1334} 1335 1336/* 1337 * Clear rule accounting data matching specified parameters 1338 * Data layout (v0)(current): 1339 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1340 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1341 * 1342 * Saves number of cleared rules in ipfw_range_tlv->new_set. 1343 * 1344 * Returns 0 on success. 1345 */ 1346static int 1347clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1348 struct sockopt_data *sd) 1349{ 1350 ipfw_range_header *rh; 1351 int log_only, num; 1352 char *msg; 1353 1354 if (sd->valsize != sizeof(*rh)) 1355 return (EINVAL); 1356 1357 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1358 1359 if (check_range_tlv(&rh->range) != 0) 1360 return (EINVAL); 1361 1362 log_only = (op3->opcode == IP_FW_XRESETLOG); 1363 1364 num = clear_range(chain, &rh->range, log_only); 1365 1366 if (rh->range.flags & IPFW_RCFLAG_ALL) 1367 msg = log_only ? "All logging counts reset" : 1368 "Accounting cleared"; 1369 else 1370 msg = log_only ? "logging count reset" : "cleared"; 1371 1372 if (V_fw_verbose) { 1373 int lev = LOG_SECURITY | LOG_NOTICE; 1374 log(lev, "ipfw: %s.\n", msg); 1375 } 1376 1377 /* Save number of rules cleared */ 1378 rh->range.new_set = num; 1379 return (0); 1380} 1381 1382static void 1383enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 1384{ 1385 uint32_t v_set; 1386 1387 IPFW_UH_WLOCK_ASSERT(chain); 1388 1389 /* Change enabled/disabled sets mask */ 1390 v_set = (V_set_disable | rt->set) & ~rt->new_set; 1391 v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */ 1392 IPFW_WLOCK(chain); 1393 V_set_disable = v_set; 1394 IPFW_WUNLOCK(chain); 1395} 1396 1397static int 1398swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv) 1399{ 1400 struct opcode_obj_rewrite *rw; 1401 struct ip_fw *rule; 1402 int i; 1403 1404 IPFW_UH_WLOCK_ASSERT(chain); 1405 1406 if (rt->set == rt->new_set) /* nothing to do */ 1407 return (0); 1408 1409 if (mv != 0) { 1410 /* 1411 * Berfore moving the rules we need to check that 1412 * there aren't any conflicting named objects. 1413 */ 1414 for (rw = ctl3_rewriters; 1415 rw < ctl3_rewriters + ctl3_rsize; rw++) { 1416 if (rw->manage_sets == NULL) 1417 continue; 1418 i = rw->manage_sets(chain, (uint8_t)rt->set, 1419 (uint8_t)rt->new_set, TEST_ALL); 1420 if (i != 0) 1421 return (EEXIST); 1422 } 1423 } 1424 /* Swap or move two sets */ 1425 for (i = 0; i < chain->n_rules - 1; i++) { 1426 rule = chain->map[i]; 1427 if (rule->set == (uint8_t)rt->set) 1428 rule->set = (uint8_t)rt->new_set; 1429 else if (rule->set == (uint8_t)rt->new_set && mv == 0) 1430 rule->set = (uint8_t)rt->set; 1431 } 1432 for (rw = ctl3_rewriters; rw < ctl3_rewriters + ctl3_rsize; rw++) { 1433 if (rw->manage_sets == NULL) 1434 continue; 1435 rw->manage_sets(chain, (uint8_t)rt->set, 1436 (uint8_t)rt->new_set, mv != 0 ? MOVE_ALL: SWAP_ALL); 1437 } 1438 return (0); 1439} 1440 1441/* 1442 * Swaps or moves set 1443 * Data layout (v0)(current): 1444 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1445 * 1446 * Returns 0 on success. 1447 */ 1448static int 1449manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1450 struct sockopt_data *sd) 1451{ 1452 ipfw_range_header *rh; 1453 int ret; 1454 1455 if (sd->valsize != sizeof(*rh)) 1456 return (EINVAL); 1457 1458 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1459 1460 if (rh->range.head.length != sizeof(ipfw_range_tlv)) 1461 return (1); 1462 /* enable_sets() expects bitmasks. */ 1463 if (op3->opcode != IP_FW_SET_ENABLE && 1464 (rh->range.set >= IPFW_MAX_SETS || 1465 rh->range.new_set >= IPFW_MAX_SETS)) 1466 return (EINVAL); 1467 1468 ret = 0; 1469 IPFW_UH_WLOCK(chain); 1470 switch (op3->opcode) { 1471 case IP_FW_SET_SWAP: 1472 case IP_FW_SET_MOVE: 1473 ret = swap_sets(chain, &rh->range, 1474 op3->opcode == IP_FW_SET_MOVE); 1475 break; 1476 case IP_FW_SET_ENABLE: 1477 enable_sets(chain, &rh->range); 1478 break; 1479 } 1480 IPFW_UH_WUNLOCK(chain); 1481 1482 return (ret); 1483} 1484 1485/** 1486 * Remove all rules with given number, or do set manipulation. 1487 * Assumes chain != NULL && *chain != NULL. 1488 * 1489 * The argument is an uint32_t. The low 16 bit are the rule or set number; 1490 * the next 8 bits are the new set; the top 8 bits indicate the command: 1491 * 1492 * 0 delete rules numbered "rulenum" 1493 * 1 delete rules in set "rulenum" 1494 * 2 move rules "rulenum" to set "new_set" 1495 * 3 move rules from set "rulenum" to set "new_set" 1496 * 4 swap sets "rulenum" and "new_set" 1497 * 5 delete rules "rulenum" and set "new_set" 1498 */ 1499static int 1500del_entry(struct ip_fw_chain *chain, uint32_t arg) 1501{ 1502 uint32_t num; /* rule number or old_set */ 1503 uint8_t cmd, new_set; 1504 int do_del, ndel; 1505 int error = 0; 1506 ipfw_range_tlv rt; 1507 1508 num = arg & 0xffff; 1509 cmd = (arg >> 24) & 0xff; 1510 new_set = (arg >> 16) & 0xff; 1511 1512 if (cmd > 5 || new_set > RESVD_SET) 1513 return EINVAL; 1514 if (cmd == 0 || cmd == 2 || cmd == 5) { 1515 if (num >= IPFW_DEFAULT_RULE) 1516 return EINVAL; 1517 } else { 1518 if (num > RESVD_SET) /* old_set */ 1519 return EINVAL; 1520 } 1521 1522 /* Convert old requests into new representation */ 1523 memset(&rt, 0, sizeof(rt)); 1524 rt.start_rule = num; 1525 rt.end_rule = num; 1526 rt.set = num; 1527 rt.new_set = new_set; 1528 do_del = 0; 1529 1530 switch (cmd) { 1531 case 0: /* delete rules numbered "rulenum" */ 1532 if (num == 0) 1533 rt.flags |= IPFW_RCFLAG_ALL; 1534 else 1535 rt.flags |= IPFW_RCFLAG_RANGE; 1536 do_del = 1; 1537 break; 1538 case 1: /* delete rules in set "rulenum" */ 1539 rt.flags |= IPFW_RCFLAG_SET; 1540 do_del = 1; 1541 break; 1542 case 5: /* delete rules "rulenum" and set "new_set" */ 1543 rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET; 1544 rt.set = new_set; 1545 rt.new_set = 0; 1546 do_del = 1; 1547 break; 1548 case 2: /* move rules "rulenum" to set "new_set" */ 1549 rt.flags |= IPFW_RCFLAG_RANGE; 1550 break; 1551 case 3: /* move rules from set "rulenum" to set "new_set" */ 1552 IPFW_UH_WLOCK(chain); 1553 error = swap_sets(chain, &rt, 1); 1554 IPFW_UH_WUNLOCK(chain); 1555 return (error); 1556 case 4: /* swap sets "rulenum" and "new_set" */ 1557 IPFW_UH_WLOCK(chain); 1558 error = swap_sets(chain, &rt, 0); 1559 IPFW_UH_WUNLOCK(chain); 1560 return (error); 1561 default: 1562 return (ENOTSUP); 1563 } 1564 1565 if (do_del != 0) { 1566 if ((error = delete_range(chain, &rt, &ndel)) != 0) 1567 return (error); 1568 1569 if (ndel == 0 && (cmd != 1 && num != 0)) 1570 return (EINVAL); 1571 1572 return (0); 1573 } 1574 1575 return (move_range(chain, &rt)); 1576} 1577 1578/** 1579 * Reset some or all counters on firewall rules. 1580 * The argument `arg' is an u_int32_t. The low 16 bit are the rule number, 1581 * the next 8 bits are the set number, the top 8 bits are the command: 1582 * 0 work with rules from all set's; 1583 * 1 work with rules only from specified set. 1584 * Specified rule number is zero if we want to clear all entries. 1585 * log_only is 1 if we only want to reset logs, zero otherwise. 1586 */ 1587static int 1588zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only) 1589{ 1590 struct ip_fw *rule; 1591 char *msg; 1592 int i; 1593 1594 uint16_t rulenum = arg & 0xffff; 1595 uint8_t set = (arg >> 16) & 0xff; 1596 uint8_t cmd = (arg >> 24) & 0xff; 1597 1598 if (cmd > 1) 1599 return (EINVAL); 1600 if (cmd == 1 && set > RESVD_SET) 1601 return (EINVAL); 1602 1603 IPFW_UH_RLOCK(chain); 1604 if (rulenum == 0) { 1605 V_norule_counter = 0; 1606 for (i = 0; i < chain->n_rules; i++) { 1607 rule = chain->map[i]; 1608 /* Skip rules not in our set. */ 1609 if (cmd == 1 && rule->set != set) 1610 continue; 1611 clear_counters(rule, log_only); 1612 } 1613 msg = log_only ? "All logging counts reset" : 1614 "Accounting cleared"; 1615 } else { 1616 int cleared = 0; 1617 for (i = 0; i < chain->n_rules; i++) { 1618 rule = chain->map[i]; 1619 if (rule->rulenum == rulenum) { 1620 if (cmd == 0 || rule->set == set) 1621 clear_counters(rule, log_only); 1622 cleared = 1; 1623 } 1624 if (rule->rulenum > rulenum) 1625 break; 1626 } 1627 if (!cleared) { /* we did not find any matching rules */ 1628 IPFW_UH_RUNLOCK(chain); 1629 return (EINVAL); 1630 } 1631 msg = log_only ? "logging count reset" : "cleared"; 1632 } 1633 IPFW_UH_RUNLOCK(chain); 1634 1635 if (V_fw_verbose) { 1636 int lev = LOG_SECURITY | LOG_NOTICE; 1637 1638 if (rulenum) 1639 log(lev, "ipfw: Entry %d %s.\n", rulenum, msg); 1640 else 1641 log(lev, "ipfw: %s.\n", msg); 1642 } 1643 return (0); 1644} 1645 1646 1647/* 1648 * Check rule head in FreeBSD11 format 1649 * 1650 */ 1651static int 1652check_ipfw_rule1(struct ip_fw_rule *rule, int size, 1653 struct rule_check_info *ci) 1654{ 1655 int l; 1656 1657 if (size < sizeof(*rule)) { 1658 printf("ipfw: rule too short\n"); 1659 return (EINVAL); 1660 } 1661 1662 /* Check for valid cmd_len */ 1663 l = roundup2(RULESIZE(rule), sizeof(uint64_t)); 1664 if (l != size) { 1665 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1666 return (EINVAL); 1667 } 1668 if (rule->act_ofs >= rule->cmd_len) { 1669 printf("ipfw: bogus action offset (%u > %u)\n", 1670 rule->act_ofs, rule->cmd_len - 1); 1671 return (EINVAL); 1672 } 1673 1674 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1675 return (EINVAL); 1676 1677 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1678} 1679 1680/* 1681 * Check rule head in FreeBSD8 format 1682 * 1683 */ 1684static int 1685check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 1686 struct rule_check_info *ci) 1687{ 1688 int l; 1689 1690 if (size < sizeof(*rule)) { 1691 printf("ipfw: rule too short\n"); 1692 return (EINVAL); 1693 } 1694 1695 /* Check for valid cmd_len */ 1696 l = sizeof(*rule) + rule->cmd_len * 4 - 4; 1697 if (l != size) { 1698 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1699 return (EINVAL); 1700 } 1701 if (rule->act_ofs >= rule->cmd_len) { 1702 printf("ipfw: bogus action offset (%u > %u)\n", 1703 rule->act_ofs, rule->cmd_len - 1); 1704 return (EINVAL); 1705 } 1706 1707 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1708 return (EINVAL); 1709 1710 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1711} 1712 1713static int 1714check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci) 1715{ 1716 int cmdlen, l; 1717 int have_action; 1718 1719 have_action = 0; 1720 1721 /* 1722 * Now go for the individual checks. Very simple ones, basically only 1723 * instruction sizes. 1724 */ 1725 for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) { 1726 cmdlen = F_LEN(cmd); 1727 if (cmdlen > l) { 1728 printf("ipfw: opcode %d size truncated\n", 1729 cmd->opcode); 1730 return EINVAL; 1731 } 1732 switch (cmd->opcode) { 1733 case O_PROBE_STATE: 1734 case O_KEEP_STATE: 1735 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1736 goto bad_size; 1737 ci->object_opcodes++; 1738 break; 1739 case O_PROTO: 1740 case O_IP_SRC_ME: 1741 case O_IP_DST_ME: 1742 case O_LAYER2: 1743 case O_IN: 1744 case O_FRAG: 1745 case O_DIVERTED: 1746 case O_IPOPT: 1747 case O_IPTOS: 1748 case O_IPPRECEDENCE: 1749 case O_IPVER: 1750 case O_SOCKARG: 1751 case O_TCPFLAGS: 1752 case O_TCPOPTS: 1753 case O_ESTAB: 1754 case O_VERREVPATH: 1755 case O_VERSRCREACH: 1756 case O_ANTISPOOF: 1757 case O_IPSEC: 1758#ifdef INET6 1759 case O_IP6_SRC_ME: 1760 case O_IP6_DST_ME: 1761 case O_EXT_HDR: 1762 case O_IP6: 1763#endif 1764 case O_IP4: 1765 case O_TAG: 1766 case O_SKIP_ACTION: 1767 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1768 goto bad_size; 1769 break; 1770 1771 case O_EXTERNAL_ACTION: 1772 if (cmd->arg1 == 0 || 1773 cmdlen != F_INSN_SIZE(ipfw_insn)) { 1774 printf("ipfw: invalid external " 1775 "action opcode\n"); 1776 return (EINVAL); 1777 } 1778 ci->object_opcodes++; 1779 /* 1780 * Do we have O_EXTERNAL_INSTANCE or O_EXTERNAL_DATA 1781 * opcode? 1782 */ 1783 if (l != cmdlen) { 1784 l -= cmdlen; 1785 cmd += cmdlen; 1786 cmdlen = F_LEN(cmd); 1787 if (cmd->opcode == O_EXTERNAL_DATA) 1788 goto check_action; 1789 if (cmd->opcode != O_EXTERNAL_INSTANCE) { 1790 printf("ipfw: invalid opcode " 1791 "next to external action %u\n", 1792 cmd->opcode); 1793 return (EINVAL); 1794 } 1795 if (cmd->arg1 == 0 || 1796 cmdlen != F_INSN_SIZE(ipfw_insn)) { 1797 printf("ipfw: invalid external " 1798 "action instance opcode\n"); 1799 return (EINVAL); 1800 } 1801 ci->object_opcodes++; 1802 } 1803 goto check_action; 1804 1805 case O_FIB: 1806 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1807 goto bad_size; 1808 if (cmd->arg1 >= rt_numfibs) { 1809 printf("ipfw: invalid fib number %d\n", 1810 cmd->arg1); 1811 return EINVAL; 1812 } 1813 break; 1814 1815 case O_SETFIB: 1816 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1817 goto bad_size; 1818 if ((cmd->arg1 != IP_FW_TARG) && 1819 ((cmd->arg1 & 0x7FFF) >= rt_numfibs)) { 1820 printf("ipfw: invalid fib number %d\n", 1821 cmd->arg1 & 0x7FFF); 1822 return EINVAL; 1823 } 1824 goto check_action; 1825 1826 case O_UID: 1827 case O_GID: 1828 case O_JAIL: 1829 case O_IP_SRC: 1830 case O_IP_DST: 1831 case O_TCPSEQ: 1832 case O_TCPACK: 1833 case O_PROB: 1834 case O_ICMPTYPE: 1835 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1836 goto bad_size; 1837 break; 1838 1839 case O_LIMIT: 1840 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) 1841 goto bad_size; 1842 ci->object_opcodes++; 1843 break; 1844 1845 case O_LOG: 1846 if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) 1847 goto bad_size; 1848 1849 ((ipfw_insn_log *)cmd)->log_left = 1850 ((ipfw_insn_log *)cmd)->max_log; 1851 1852 break; 1853 1854 case O_IP_SRC_MASK: 1855 case O_IP_DST_MASK: 1856 /* only odd command lengths */ 1857 if ((cmdlen & 1) == 0) 1858 goto bad_size; 1859 break; 1860 1861 case O_IP_SRC_SET: 1862 case O_IP_DST_SET: 1863 if (cmd->arg1 == 0 || cmd->arg1 > 256) { 1864 printf("ipfw: invalid set size %d\n", 1865 cmd->arg1); 1866 return EINVAL; 1867 } 1868 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1869 (cmd->arg1+31)/32 ) 1870 goto bad_size; 1871 break; 1872 1873 case O_IP_SRC_LOOKUP: 1874 if (cmdlen > F_INSN_SIZE(ipfw_insn_u32)) 1875 goto bad_size; 1876 case O_IP_DST_LOOKUP: 1877 if (cmd->arg1 >= V_fw_tables_max) { 1878 printf("ipfw: invalid table number %d\n", 1879 cmd->arg1); 1880 return (EINVAL); 1881 } 1882 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1883 cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 && 1884 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1885 goto bad_size; 1886 ci->object_opcodes++; 1887 break; 1888 case O_IP_FLOW_LOOKUP: 1889 if (cmd->arg1 >= V_fw_tables_max) { 1890 printf("ipfw: invalid table number %d\n", 1891 cmd->arg1); 1892 return (EINVAL); 1893 } 1894 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1895 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1896 goto bad_size; 1897 ci->object_opcodes++; 1898 break; 1899 case O_MACADDR2: 1900 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) 1901 goto bad_size; 1902 break; 1903 1904 case O_NOP: 1905 case O_IPID: 1906 case O_IPTTL: 1907 case O_IPLEN: 1908 case O_TCPDATALEN: 1909 case O_TCPWIN: 1910 case O_TAGGED: 1911 if (cmdlen < 1 || cmdlen > 31) 1912 goto bad_size; 1913 break; 1914 1915 case O_DSCP: 1916 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1) 1917 goto bad_size; 1918 break; 1919 1920 case O_MAC_TYPE: 1921 case O_IP_SRCPORT: 1922 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ 1923 if (cmdlen < 2 || cmdlen > 31) 1924 goto bad_size; 1925 break; 1926 1927 case O_RECV: 1928 case O_XMIT: 1929 case O_VIA: 1930 if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) 1931 goto bad_size; 1932 ci->object_opcodes++; 1933 break; 1934 1935 case O_ALTQ: 1936 if (cmdlen != F_INSN_SIZE(ipfw_insn_altq)) 1937 goto bad_size; 1938 break; 1939 1940 case O_PIPE: 1941 case O_QUEUE: 1942 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1943 goto bad_size; 1944 goto check_action; 1945 1946 case O_FORWARD_IP: 1947 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) 1948 goto bad_size; 1949 goto check_action; 1950#ifdef INET6 1951 case O_FORWARD_IP6: 1952 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6)) 1953 goto bad_size; 1954 goto check_action; 1955#endif /* INET6 */ 1956 1957 case O_DIVERT: 1958 case O_TEE: 1959 if (ip_divert_ptr == NULL) 1960 return EINVAL; 1961 else 1962 goto check_size; 1963 case O_NETGRAPH: 1964 case O_NGTEE: 1965 if (ng_ipfw_input_p == NULL) 1966 return EINVAL; 1967 else 1968 goto check_size; 1969 case O_NAT: 1970 if (!IPFW_NAT_LOADED) 1971 return EINVAL; 1972 if (cmdlen != F_INSN_SIZE(ipfw_insn_nat)) 1973 goto bad_size; 1974 goto check_action; 1975 case O_CHECK_STATE: 1976 ci->object_opcodes++; 1977 /* FALLTHROUGH */ 1978 case O_FORWARD_MAC: /* XXX not implemented yet */ 1979 case O_COUNT: 1980 case O_ACCEPT: 1981 case O_DENY: 1982 case O_REJECT: 1983 case O_SETDSCP: 1984#ifdef INET6 1985 case O_UNREACH6: 1986#endif 1987 case O_SKIPTO: 1988 case O_REASS: 1989 case O_CALLRETURN: 1990check_size: 1991 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1992 goto bad_size; 1993check_action: 1994 if (have_action) { 1995 printf("ipfw: opcode %d, multiple actions" 1996 " not allowed\n", 1997 cmd->opcode); 1998 return (EINVAL); 1999 } 2000 have_action = 1; 2001 if (l != cmdlen) { 2002 printf("ipfw: opcode %d, action must be" 2003 " last opcode\n", 2004 cmd->opcode); 2005 return (EINVAL); 2006 } 2007 break; 2008#ifdef INET6 2009 case O_IP6_SRC: 2010 case O_IP6_DST: 2011 if (cmdlen != F_INSN_SIZE(struct in6_addr) + 2012 F_INSN_SIZE(ipfw_insn)) 2013 goto bad_size; 2014 break; 2015 2016 case O_FLOW6ID: 2017 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 2018 ((ipfw_insn_u32 *)cmd)->o.arg1) 2019 goto bad_size; 2020 break; 2021 2022 case O_IP6_SRC_MASK: 2023 case O_IP6_DST_MASK: 2024 if ( !(cmdlen & 1) || cmdlen > 127) 2025 goto bad_size; 2026 break; 2027 case O_ICMP6TYPE: 2028 if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) ) 2029 goto bad_size; 2030 break; 2031#endif 2032 2033 default: 2034 switch (cmd->opcode) { 2035#ifndef INET6 2036 case O_IP6_SRC_ME: 2037 case O_IP6_DST_ME: 2038 case O_EXT_HDR: 2039 case O_IP6: 2040 case O_UNREACH6: 2041 case O_IP6_SRC: 2042 case O_IP6_DST: 2043 case O_FLOW6ID: 2044 case O_IP6_SRC_MASK: 2045 case O_IP6_DST_MASK: 2046 case O_ICMP6TYPE: 2047 printf("ipfw: no IPv6 support in kernel\n"); 2048 return (EPROTONOSUPPORT); 2049#endif 2050 default: 2051 printf("ipfw: opcode %d, unknown opcode\n", 2052 cmd->opcode); 2053 return (EINVAL); 2054 } 2055 } 2056 } 2057 if (have_action == 0) { 2058 printf("ipfw: missing action\n"); 2059 return (EINVAL); 2060 } 2061 return 0; 2062 2063bad_size: 2064 printf("ipfw: opcode %d size %d wrong\n", 2065 cmd->opcode, cmdlen); 2066 return (EINVAL); 2067} 2068 2069 2070/* 2071 * Translation of requests for compatibility with FreeBSD 7.2/8. 2072 * a static variable tells us if we have an old client from userland, 2073 * and if necessary we translate requests and responses between the 2074 * two formats. 2075 */ 2076static int is7 = 0; 2077 2078struct ip_fw7 { 2079 struct ip_fw7 *next; /* linked list of rules */ 2080 struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */ 2081 /* 'next_rule' is used to pass up 'set_disable' status */ 2082 2083 uint16_t act_ofs; /* offset of action in 32-bit units */ 2084 uint16_t cmd_len; /* # of 32-bit words in cmd */ 2085 uint16_t rulenum; /* rule number */ 2086 uint8_t set; /* rule set (0..31) */ 2087 // #define RESVD_SET 31 /* set for default and persistent rules */ 2088 uint8_t _pad; /* padding */ 2089 // uint32_t id; /* rule id, only in v.8 */ 2090 /* These fields are present in all rules. */ 2091 uint64_t pcnt; /* Packet counter */ 2092 uint64_t bcnt; /* Byte counter */ 2093 uint32_t timestamp; /* tv_sec of last match */ 2094 2095 ipfw_insn cmd[1]; /* storage for commands */ 2096}; 2097 2098static int convert_rule_to_7(struct ip_fw_rule0 *rule); 2099static int convert_rule_to_8(struct ip_fw_rule0 *rule); 2100 2101#ifndef RULESIZE7 2102#define RULESIZE7(rule) (sizeof(struct ip_fw7) + \ 2103 ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4) 2104#endif 2105 2106 2107/* 2108 * Copy the static and dynamic rules to the supplied buffer 2109 * and return the amount of space actually used. 2110 * Must be run under IPFW_UH_RLOCK 2111 */ 2112static size_t 2113ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space) 2114{ 2115 char *bp = buf; 2116 char *ep = bp + space; 2117 struct ip_fw *rule; 2118 struct ip_fw_rule0 *dst; 2119 struct timeval boottime; 2120 int error, i, l, warnflag; 2121 time_t boot_seconds; 2122 2123 warnflag = 0; 2124 2125 getboottime(&boottime); 2126 boot_seconds = boottime.tv_sec; 2127 for (i = 0; i < chain->n_rules; i++) { 2128 rule = chain->map[i]; 2129 2130 if (is7) { 2131 /* Convert rule to FreeBSd 7.2 format */ 2132 l = RULESIZE7(rule); 2133 if (bp + l + sizeof(uint32_t) <= ep) { 2134 bcopy(rule, bp, l + sizeof(uint32_t)); 2135 error = set_legacy_obj_kidx(chain, 2136 (struct ip_fw_rule0 *)bp); 2137 if (error != 0) 2138 return (0); 2139 error = convert_rule_to_7((struct ip_fw_rule0 *) bp); 2140 if (error) 2141 return 0; /*XXX correct? */ 2142 /* 2143 * XXX HACK. Store the disable mask in the "next" 2144 * pointer in a wild attempt to keep the ABI the same. 2145 * Why do we do this on EVERY rule? 2146 */ 2147 bcopy(&V_set_disable, 2148 &(((struct ip_fw7 *)bp)->next_rule), 2149 sizeof(V_set_disable)); 2150 if (((struct ip_fw7 *)bp)->timestamp) 2151 ((struct ip_fw7 *)bp)->timestamp += boot_seconds; 2152 bp += l; 2153 } 2154 continue; /* go to next rule */ 2155 } 2156 2157 l = RULEUSIZE0(rule); 2158 if (bp + l > ep) { /* should not happen */ 2159 printf("overflow dumping static rules\n"); 2160 break; 2161 } 2162 dst = (struct ip_fw_rule0 *)bp; 2163 export_rule0(rule, dst, l); 2164 error = set_legacy_obj_kidx(chain, dst); 2165 2166 /* 2167 * XXX HACK. Store the disable mask in the "next" 2168 * pointer in a wild attempt to keep the ABI the same. 2169 * Why do we do this on EVERY rule? 2170 * 2171 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask 2172 * so we need to fail _after_ saving at least one mask. 2173 */ 2174 bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable)); 2175 if (dst->timestamp) 2176 dst->timestamp += boot_seconds; 2177 bp += l; 2178 2179 if (error != 0) { 2180 if (error == 2) { 2181 /* Non-fatal table rewrite error. */ 2182 warnflag = 1; 2183 continue; 2184 } 2185 printf("Stop on rule %d. Fail to convert table\n", 2186 rule->rulenum); 2187 break; 2188 } 2189 } 2190 if (warnflag != 0) 2191 printf("ipfw: process %s is using legacy interfaces," 2192 " consider rebuilding\n", ""); 2193 ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */ 2194 return (bp - (char *)buf); 2195} 2196 2197 2198struct dump_args { 2199 uint32_t b; /* start rule */ 2200 uint32_t e; /* end rule */ 2201 uint32_t rcount; /* number of rules */ 2202 uint32_t rsize; /* rules size */ 2203 uint32_t tcount; /* number of tables */ 2204 int rcounters; /* counters */ 2205 uint32_t *bmask; /* index bitmask of used named objects */ 2206}; 2207 2208void 2209ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv) 2210{ 2211 2212 ntlv->head.type = no->etlv; 2213 ntlv->head.length = sizeof(*ntlv); 2214 ntlv->idx = no->kidx; 2215 strlcpy(ntlv->name, no->name, sizeof(ntlv->name)); 2216} 2217 2218/* 2219 * Export named object info in instance @ni, identified by @kidx 2220 * to ipfw_obj_ntlv. TLV is allocated from @sd space. 2221 * 2222 * Returns 0 on success. 2223 */ 2224static int 2225export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx, 2226 struct sockopt_data *sd) 2227{ 2228 struct named_object *no; 2229 ipfw_obj_ntlv *ntlv; 2230 2231 no = ipfw_objhash_lookup_kidx(ni, kidx); 2232 KASSERT(no != NULL, ("invalid object kernel index passed")); 2233 2234 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 2235 if (ntlv == NULL) 2236 return (ENOMEM); 2237 2238 ipfw_export_obj_ntlv(no, ntlv); 2239 return (0); 2240} 2241 2242static int 2243export_named_objects(struct namedobj_instance *ni, struct dump_args *da, 2244 struct sockopt_data *sd) 2245{ 2246 int error, i; 2247 2248 for (i = 0; i < IPFW_TABLES_MAX && da->tcount > 0; i++) { 2249 if ((da->bmask[i / 32] & (1 << (i % 32))) == 0) 2250 continue; 2251 if ((error = export_objhash_ntlv(ni, i, sd)) != 0) 2252 return (error); 2253 da->tcount--; 2254 } 2255 return (0); 2256} 2257 2258static int 2259dump_named_objects(struct ip_fw_chain *ch, struct dump_args *da, 2260 struct sockopt_data *sd) 2261{ 2262 ipfw_obj_ctlv *ctlv; 2263 int error; 2264 2265 MPASS(da->tcount > 0); 2266 /* Header first */ 2267 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 2268 if (ctlv == NULL) 2269 return (ENOMEM); 2270 ctlv->head.type = IPFW_TLV_TBLNAME_LIST; 2271 ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) + 2272 sizeof(*ctlv); 2273 ctlv->count = da->tcount; 2274 ctlv->objsize = sizeof(ipfw_obj_ntlv); 2275 2276 /* Dump table names first (if any) */ 2277 error = export_named_objects(ipfw_get_table_objhash(ch), da, sd); 2278 if (error != 0) 2279 return (error); 2280 /* Then dump another named objects */ 2281 da->bmask += IPFW_TABLES_MAX / 32; 2282 return (export_named_objects(CHAIN_TO_SRV(ch), da, sd)); 2283} 2284 2285/* 2286 * Dumps static rules with table TLVs in buffer @sd. 2287 * 2288 * Returns 0 on success. 2289 */ 2290static int 2291dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da, 2292 struct sockopt_data *sd) 2293{ 2294 ipfw_obj_ctlv *ctlv; 2295 struct ip_fw *krule; 2296 caddr_t dst; 2297 int i, l; 2298 2299 /* Dump rules */ 2300 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 2301 if (ctlv == NULL) 2302 return (ENOMEM); 2303 ctlv->head.type = IPFW_TLV_RULE_LIST; 2304 ctlv->head.length = da->rsize + sizeof(*ctlv); 2305 ctlv->count = da->rcount; 2306 2307 for (i = da->b; i < da->e; i++) { 2308 krule = chain->map[i]; 2309 2310 l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv); 2311 if (da->rcounters != 0) 2312 l += sizeof(struct ip_fw_bcounter); 2313 dst = (caddr_t)ipfw_get_sopt_space(sd, l); 2314 if (dst == NULL) 2315 return (ENOMEM); 2316 2317 export_rule1(krule, dst, l, da->rcounters); 2318 } 2319 2320 return (0); 2321} 2322 2323int 2324ipfw_mark_object_kidx(uint32_t *bmask, uint16_t etlv, uint16_t kidx) 2325{ 2326 uint32_t bidx; 2327 2328 /* 2329 * Maintain separate bitmasks for table and non-table objects. 2330 */ 2331 bidx = (etlv == IPFW_TLV_TBL_NAME) ? 0: IPFW_TABLES_MAX / 32; 2332 bidx += kidx / 32; 2333 if ((bmask[bidx] & (1 << (kidx % 32))) != 0) 2334 return (0); 2335 2336 bmask[bidx] |= 1 << (kidx % 32); 2337 return (1); 2338} 2339 2340/* 2341 * Marks every object index used in @rule with bit in @bmask. 2342 * Used to generate bitmask of referenced tables/objects for given ruleset 2343 * or its part. 2344 */ 2345static void 2346mark_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 2347 struct dump_args *da) 2348{ 2349 struct opcode_obj_rewrite *rw; 2350 ipfw_insn *cmd; 2351 int cmdlen, l; 2352 uint16_t kidx; 2353 uint8_t subtype; 2354 2355 l = rule->cmd_len; 2356 cmd = rule->cmd; 2357 cmdlen = 0; 2358 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2359 cmdlen = F_LEN(cmd); 2360 2361 rw = find_op_rw(cmd, &kidx, &subtype); 2362 if (rw == NULL) 2363 continue; 2364 2365 if (ipfw_mark_object_kidx(da->bmask, rw->etlv, kidx)) 2366 da->tcount++; 2367 } 2368} 2369 2370/* 2371 * Dumps requested objects data 2372 * Data layout (version 0)(current): 2373 * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags 2374 * size = ipfw_cfg_lheader.size 2375 * Reply: [ ipfw_cfg_lheader 2376 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 2377 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) 2378 * ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ] 2379 * ] (optional) 2380 * [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional) 2381 * ] 2382 * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize. 2383 * The rest (size, count) are set to zero and needs to be ignored. 2384 * 2385 * Returns 0 on success. 2386 */ 2387static int 2388dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2389 struct sockopt_data *sd) 2390{ 2391 struct dump_args da; 2392 ipfw_cfg_lheader *hdr; 2393 struct ip_fw *rule; 2394 size_t sz, rnum; 2395 uint32_t hdr_flags, *bmask; 2396 int error, i; 2397 2398 hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr)); 2399 if (hdr == NULL) 2400 return (EINVAL); 2401 2402 error = 0; 2403 bmask = NULL; 2404 memset(&da, 0, sizeof(da)); 2405 /* 2406 * Allocate needed state. 2407 * Note we allocate 2xspace mask, for table & srv 2408 */ 2409 if (hdr->flags & (IPFW_CFG_GET_STATIC | IPFW_CFG_GET_STATES)) 2410 da.bmask = bmask = malloc( 2411 sizeof(uint32_t) * IPFW_TABLES_MAX * 2 / 32, M_TEMP, 2412 M_WAITOK | M_ZERO); 2413 IPFW_UH_RLOCK(chain); 2414 2415 /* 2416 * STAGE 1: Determine size/count for objects in range. 2417 * Prepare used tables bitmask. 2418 */ 2419 sz = sizeof(ipfw_cfg_lheader); 2420 da.e = chain->n_rules; 2421 2422 if (hdr->end_rule != 0) { 2423 /* Handle custom range */ 2424 if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE) 2425 rnum = IPFW_DEFAULT_RULE; 2426 da.b = ipfw_find_rule(chain, rnum, 0); 2427 rnum = (hdr->end_rule < IPFW_DEFAULT_RULE) ? 2428 hdr->end_rule + 1: IPFW_DEFAULT_RULE; 2429 da.e = ipfw_find_rule(chain, rnum, UINT32_MAX) + 1; 2430 } 2431 2432 if (hdr->flags & IPFW_CFG_GET_STATIC) { 2433 for (i = da.b; i < da.e; i++) { 2434 rule = chain->map[i]; 2435 da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv); 2436 da.rcount++; 2437 /* Update bitmask of used objects for given range */ 2438 mark_rule_objects(chain, rule, &da); 2439 } 2440 /* Add counters if requested */ 2441 if (hdr->flags & IPFW_CFG_GET_COUNTERS) { 2442 da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount; 2443 da.rcounters = 1; 2444 } 2445 sz += da.rsize + sizeof(ipfw_obj_ctlv); 2446 } 2447 2448 if (hdr->flags & IPFW_CFG_GET_STATES) { 2449 sz += sizeof(ipfw_obj_ctlv) + 2450 ipfw_dyn_get_count(bmask, &i) * sizeof(ipfw_obj_dyntlv); 2451 da.tcount += i; 2452 } 2453 2454 if (da.tcount > 0) 2455 sz += da.tcount * sizeof(ipfw_obj_ntlv) + 2456 sizeof(ipfw_obj_ctlv); 2457 2458 /* 2459 * Fill header anyway. 2460 * Note we have to save header fields to stable storage 2461 * buffer inside @sd can be flushed after dumping rules 2462 */ 2463 hdr->size = sz; 2464 hdr->set_mask = ~V_set_disable; 2465 hdr_flags = hdr->flags; 2466 hdr = NULL; 2467 2468 if (sd->valsize < sz) { 2469 error = ENOMEM; 2470 goto cleanup; 2471 } 2472 2473 /* STAGE2: Store actual data */ 2474 if (da.tcount > 0) { 2475 error = dump_named_objects(chain, &da, sd); 2476 if (error != 0) 2477 goto cleanup; 2478 } 2479 2480 if (hdr_flags & IPFW_CFG_GET_STATIC) { 2481 error = dump_static_rules(chain, &da, sd); 2482 if (error != 0) 2483 goto cleanup; 2484 } 2485 2486 if (hdr_flags & IPFW_CFG_GET_STATES) 2487 error = ipfw_dump_states(chain, sd); 2488 2489cleanup: 2490 IPFW_UH_RUNLOCK(chain); 2491 2492 if (bmask != NULL) 2493 free(bmask, M_TEMP); 2494 2495 return (error); 2496} 2497 2498int 2499ipfw_check_object_name_generic(const char *name) 2500{ 2501 int nsize; 2502 2503 nsize = sizeof(((ipfw_obj_ntlv *)0)->name); 2504 if (strnlen(name, nsize) == nsize) 2505 return (EINVAL); 2506 if (name[0] == '\0') 2507 return (EINVAL); 2508 return (0); 2509} 2510 2511/* 2512 * Creates non-existent objects referenced by rule. 2513 * 2514 * Return 0 on success. 2515 */ 2516int 2517create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd, 2518 struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti) 2519{ 2520 struct opcode_obj_rewrite *rw; 2521 struct obj_idx *p; 2522 uint16_t kidx; 2523 int error; 2524 2525 /* 2526 * Compatibility stuff: do actual creation for non-existing, 2527 * but referenced objects. 2528 */ 2529 for (p = oib; p < pidx; p++) { 2530 if (p->kidx != 0) 2531 continue; 2532 2533 ti->uidx = p->uidx; 2534 ti->type = p->type; 2535 ti->atype = 0; 2536 2537 rw = find_op_rw(cmd + p->off, NULL, NULL); 2538 KASSERT(rw != NULL, ("Unable to find handler for op %d", 2539 (cmd + p->off)->opcode)); 2540 2541 if (rw->create_object == NULL) 2542 error = EOPNOTSUPP; 2543 else 2544 error = rw->create_object(ch, ti, &kidx); 2545 if (error == 0) { 2546 p->kidx = kidx; 2547 continue; 2548 } 2549 2550 /* 2551 * Error happened. We have to rollback everything. 2552 * Drop all already acquired references. 2553 */ 2554 IPFW_UH_WLOCK(ch); 2555 unref_oib_objects(ch, cmd, oib, pidx); 2556 IPFW_UH_WUNLOCK(ch); 2557 2558 return (error); 2559 } 2560 2561 return (0); 2562} 2563 2564/* 2565 * Compatibility function for old ipfw(8) binaries. 2566 * Rewrites table/nat kernel indices with userland ones. 2567 * Convert tables matching '/^\d+$/' to their atoi() value. 2568 * Use number 65535 for other tables. 2569 * 2570 * Returns 0 on success. 2571 */ 2572static int 2573set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule) 2574{ 2575 struct opcode_obj_rewrite *rw; 2576 struct named_object *no; 2577 ipfw_insn *cmd; 2578 char *end; 2579 long val; 2580 int cmdlen, error, l; 2581 uint16_t kidx, uidx; 2582 uint8_t subtype; 2583 2584 error = 0; 2585 2586 l = rule->cmd_len; 2587 cmd = rule->cmd; 2588 cmdlen = 0; 2589 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2590 cmdlen = F_LEN(cmd); 2591 2592 /* Check if is index in given opcode */ 2593 rw = find_op_rw(cmd, &kidx, &subtype); 2594 if (rw == NULL) 2595 continue; 2596 2597 /* Try to find referenced kernel object */ 2598 no = rw->find_bykidx(ch, kidx); 2599 if (no == NULL) 2600 continue; 2601 2602 val = strtol(no->name, &end, 10); 2603 if (*end == '\0' && val < 65535) { 2604 uidx = val; 2605 } else { 2606 2607 /* 2608 * We are called via legacy opcode. 2609 * Save error and show table as fake number 2610 * not to make ipfw(8) hang. 2611 */ 2612 uidx = 65535; 2613 error = 2; 2614 } 2615 2616 rw->update(cmd, uidx); 2617 } 2618 2619 return (error); 2620} 2621 2622 2623/* 2624 * Unreferences all already-referenced objects in given @cmd rule, 2625 * using information in @oib. 2626 * 2627 * Used to rollback partially converted rule on error. 2628 */ 2629static void 2630unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib, 2631 struct obj_idx *end) 2632{ 2633 struct opcode_obj_rewrite *rw; 2634 struct named_object *no; 2635 struct obj_idx *p; 2636 2637 IPFW_UH_WLOCK_ASSERT(ch); 2638 2639 for (p = oib; p < end; p++) { 2640 if (p->kidx == 0) 2641 continue; 2642 2643 rw = find_op_rw(cmd + p->off, NULL, NULL); 2644 KASSERT(rw != NULL, ("Unable to find handler for op %d", 2645 (cmd + p->off)->opcode)); 2646 2647 /* Find & unref by existing idx */ 2648 no = rw->find_bykidx(ch, p->kidx); 2649 KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx)); 2650 no->refcnt--; 2651 } 2652} 2653 2654/* 2655 * Remove references from every object used in @rule. 2656 * Used at rule removal code. 2657 */ 2658static void 2659unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule) 2660{ 2661 struct opcode_obj_rewrite *rw; 2662 struct named_object *no; 2663 ipfw_insn *cmd; 2664 int cmdlen, l; 2665 uint16_t kidx; 2666 uint8_t subtype; 2667 2668 IPFW_UH_WLOCK_ASSERT(ch); 2669 2670 l = rule->cmd_len; 2671 cmd = rule->cmd; 2672 cmdlen = 0; 2673 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2674 cmdlen = F_LEN(cmd); 2675 2676 rw = find_op_rw(cmd, &kidx, &subtype); 2677 if (rw == NULL) 2678 continue; 2679 no = rw->find_bykidx(ch, kidx); 2680 2681 KASSERT(no != NULL, ("object id %d not found", kidx)); 2682 KASSERT(no->subtype == subtype, 2683 ("wrong type %d (%d) for object id %d", 2684 no->subtype, subtype, kidx)); 2685 KASSERT(no->refcnt > 0, ("refcount for object %d is %d", 2686 kidx, no->refcnt)); 2687 2688 if (no->refcnt == 1 && rw->destroy_object != NULL) 2689 rw->destroy_object(ch, no); 2690 else 2691 no->refcnt--; 2692 } 2693} 2694 2695 2696/* 2697 * Find and reference object (if any) stored in instruction @cmd. 2698 * 2699 * Saves object info in @pidx, sets 2700 * - @unresolved to 1 if object should exists but not found 2701 * 2702 * Returns non-zero value in case of error. 2703 */ 2704static int 2705ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti, 2706 struct obj_idx *pidx, int *unresolved) 2707{ 2708 struct named_object *no; 2709 struct opcode_obj_rewrite *rw; 2710 int error; 2711 2712 /* Check if this opcode is candidate for rewrite */ 2713 rw = find_op_rw(cmd, &ti->uidx, &ti->type); 2714 if (rw == NULL) 2715 return (0); 2716 2717 /* Need to rewrite. Save necessary fields */ 2718 pidx->uidx = ti->uidx; 2719 pidx->type = ti->type; 2720 2721 /* Try to find referenced kernel object */ 2722 error = rw->find_byname(ch, ti, &no); 2723 if (error != 0) 2724 return (error); 2725 if (no == NULL) { 2726 /* 2727 * Report about unresolved object for automaic 2728 * creation. 2729 */ 2730 *unresolved = 1; 2731 return (0); 2732 } 2733 2734 /* 2735 * Object is already exist. 2736 * Its subtype should match with expected value. 2737 */ 2738 if (ti->type != no->subtype) 2739 return (EINVAL); 2740 2741 /* Bump refcount and update kidx. */ 2742 no->refcnt++; 2743 rw->update(cmd, no->kidx); 2744 return (0); 2745} 2746 2747/* 2748 * Finds and bumps refcount for objects referenced by given @rule. 2749 * Auto-creates non-existing tables. 2750 * Fills in @oib array with userland/kernel indexes. 2751 * 2752 * Returns 0 on success. 2753 */ 2754static int 2755ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 2756 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti) 2757{ 2758 struct obj_idx *pidx; 2759 ipfw_insn *cmd; 2760 int cmdlen, error, l, unresolved; 2761 2762 pidx = oib; 2763 l = rule->cmd_len; 2764 cmd = rule->cmd; 2765 cmdlen = 0; 2766 error = 0; 2767 2768 IPFW_UH_WLOCK(ch); 2769 2770 /* Increase refcount on each existing referenced table. */ 2771 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2772 cmdlen = F_LEN(cmd); 2773 unresolved = 0; 2774 2775 error = ref_opcode_object(ch, cmd, ti, pidx, &unresolved); 2776 if (error != 0) 2777 break; 2778 /* 2779 * Compatibility stuff for old clients: 2780 * prepare to automaitcally create non-existing objects. 2781 */ 2782 if (unresolved != 0) { 2783 pidx->off = rule->cmd_len - l; 2784 pidx++; 2785 } 2786 } 2787 2788 if (error != 0) { 2789 /* Unref everything we have already done */ 2790 unref_oib_objects(ch, rule->cmd, oib, pidx); 2791 IPFW_UH_WUNLOCK(ch); 2792 return (error); 2793 } 2794 IPFW_UH_WUNLOCK(ch); 2795 2796 /* Perform auto-creation for non-existing objects */ 2797 if (pidx != oib) 2798 error = create_objects_compat(ch, rule->cmd, oib, pidx, ti); 2799 2800 /* Calculate real number of dynamic objects */ 2801 ci->object_opcodes = (uint16_t)(pidx - oib); 2802 2803 return (error); 2804} 2805 2806/* 2807 * Checks is opcode is referencing table of appropriate type. 2808 * Adds reference count for found table if true. 2809 * Rewrites user-supplied opcode values with kernel ones. 2810 * 2811 * Returns 0 on success and appropriate error code otherwise. 2812 */ 2813static int 2814rewrite_rule_uidx(struct ip_fw_chain *chain, struct rule_check_info *ci) 2815{ 2816 int error; 2817 ipfw_insn *cmd; 2818 uint8_t type; 2819 struct obj_idx *p, *pidx_first, *pidx_last; 2820 struct tid_info ti; 2821 2822 /* 2823 * Prepare an array for storing opcode indices. 2824 * Use stack allocation by default. 2825 */ 2826 if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) { 2827 /* Stack */ 2828 pidx_first = ci->obuf; 2829 } else 2830 pidx_first = malloc( 2831 ci->object_opcodes * sizeof(struct obj_idx), 2832 M_IPFW, M_WAITOK | M_ZERO); 2833 2834 error = 0; 2835 type = 0; 2836 memset(&ti, 0, sizeof(ti)); 2837 2838 /* Use set rule is assigned to. */ 2839 ti.set = ci->krule->set; 2840 if (ci->ctlv != NULL) { 2841 ti.tlvs = (void *)(ci->ctlv + 1); 2842 ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv); 2843 } 2844 2845 /* Reference all used tables and other objects */ 2846 error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti); 2847 if (error != 0) 2848 goto free; 2849 /* 2850 * Note that ref_rule_objects() might have updated ci->object_opcodes 2851 * to reflect actual number of object opcodes. 2852 */ 2853 2854 /* Perform rewrite of remaining opcodes */ 2855 p = pidx_first; 2856 pidx_last = pidx_first + ci->object_opcodes; 2857 for (p = pidx_first; p < pidx_last; p++) { 2858 cmd = ci->krule->cmd + p->off; 2859 update_opcode_kidx(cmd, p->kidx); 2860 } 2861 2862free: 2863 if (pidx_first != ci->obuf) 2864 free(pidx_first, M_IPFW); 2865 2866 return (error); 2867} 2868 2869/* 2870 * Adds one or more rules to ipfw @chain. 2871 * Data layout (version 0)(current): 2872 * Request: 2873 * [ 2874 * ip_fw3_opheader 2875 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1) 2876 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3) 2877 * ] 2878 * Reply: 2879 * [ 2880 * ip_fw3_opheader 2881 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 2882 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] 2883 * ] 2884 * 2885 * Rules in reply are modified to store their actual ruleset number. 2886 * 2887 * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending 2888 * according to their idx field and there has to be no duplicates. 2889 * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending. 2890 * (*3) Each ip_fw structure needs to be aligned to u64 boundary. 2891 * 2892 * Returns 0 on success. 2893 */ 2894static int 2895add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2896 struct sockopt_data *sd) 2897{ 2898 ipfw_obj_ctlv *ctlv, *rtlv, *tstate; 2899 ipfw_obj_ntlv *ntlv; 2900 int clen, error, idx; 2901 uint32_t count, read; 2902 struct ip_fw_rule *r; 2903 struct rule_check_info rci, *ci, *cbuf; 2904 int i, rsize; 2905 2906 op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize); 2907 ctlv = (ipfw_obj_ctlv *)(op3 + 1); 2908 2909 read = sizeof(ip_fw3_opheader); 2910 rtlv = NULL; 2911 tstate = NULL; 2912 cbuf = NULL; 2913 memset(&rci, 0, sizeof(struct rule_check_info)); 2914 2915 if (read + sizeof(*ctlv) > sd->valsize) 2916 return (EINVAL); 2917 2918 if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) { 2919 clen = ctlv->head.length; 2920 /* Check size and alignment */ 2921 if (clen > sd->valsize || clen < sizeof(*ctlv)) 2922 return (EINVAL); 2923 if ((clen % sizeof(uint64_t)) != 0) 2924 return (EINVAL); 2925 2926 /* 2927 * Some table names or other named objects. 2928 * Check for validness. 2929 */ 2930 count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv); 2931 if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv)) 2932 return (EINVAL); 2933 2934 /* 2935 * Check each TLV. 2936 * Ensure TLVs are sorted ascending and 2937 * there are no duplicates. 2938 */ 2939 idx = -1; 2940 ntlv = (ipfw_obj_ntlv *)(ctlv + 1); 2941 while (count > 0) { 2942 if (ntlv->head.length != sizeof(ipfw_obj_ntlv)) 2943 return (EINVAL); 2944 2945 error = ipfw_check_object_name_generic(ntlv->name); 2946 if (error != 0) 2947 return (error); 2948 2949 if (ntlv->idx <= idx) 2950 return (EINVAL); 2951 2952 idx = ntlv->idx; 2953 count--; 2954 ntlv++; 2955 } 2956 2957 tstate = ctlv; 2958 read += ctlv->head.length; 2959 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 2960 } 2961 2962 if (read + sizeof(*ctlv) > sd->valsize) 2963 return (EINVAL); 2964 2965 if (ctlv->head.type == IPFW_TLV_RULE_LIST) { 2966 clen = ctlv->head.length; 2967 if (clen + read > sd->valsize || clen < sizeof(*ctlv)) 2968 return (EINVAL); 2969 if ((clen % sizeof(uint64_t)) != 0) 2970 return (EINVAL); 2971 2972 /* 2973 * TODO: Permit adding multiple rules at once 2974 */ 2975 if (ctlv->count != 1) 2976 return (ENOTSUP); 2977 2978 clen -= sizeof(*ctlv); 2979 2980 if (ctlv->count > clen / sizeof(struct ip_fw_rule)) 2981 return (EINVAL); 2982 2983 /* Allocate state for each rule or use stack */ 2984 if (ctlv->count == 1) { 2985 memset(&rci, 0, sizeof(struct rule_check_info)); 2986 cbuf = &rci; 2987 } else 2988 cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP, 2989 M_WAITOK | M_ZERO); 2990 ci = cbuf; 2991 2992 /* 2993 * Check each rule for validness. 2994 * Ensure numbered rules are sorted ascending 2995 * and properly aligned 2996 */ 2997 idx = 0; 2998 r = (struct ip_fw_rule *)(ctlv + 1); 2999 count = 0; 3000 error = 0; 3001 while (clen > 0) { 3002 rsize = roundup2(RULESIZE(r), sizeof(uint64_t)); 3003 if (rsize > clen || ctlv->count <= count) { 3004 error = EINVAL; 3005 break; 3006 } 3007 3008 ci->ctlv = tstate; 3009 error = check_ipfw_rule1(r, rsize, ci); 3010 if (error != 0) 3011 break; 3012 3013 /* Check sorting */ 3014 if (r->rulenum != 0 && r->rulenum < idx) { 3015 printf("rulenum %d idx %d\n", r->rulenum, idx); 3016 error = EINVAL; 3017 break; 3018 } 3019 idx = r->rulenum; 3020 3021 ci->urule = (caddr_t)r; 3022 3023 rsize = roundup2(rsize, sizeof(uint64_t)); 3024 clen -= rsize; 3025 r = (struct ip_fw_rule *)((caddr_t)r + rsize); 3026 count++; 3027 ci++; 3028 } 3029 3030 if (ctlv->count != count || error != 0) { 3031 if (cbuf != &rci) 3032 free(cbuf, M_TEMP); 3033 return (EINVAL); 3034 } 3035 3036 rtlv = ctlv; 3037 read += ctlv->head.length; 3038 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 3039 } 3040 3041 if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) { 3042 if (cbuf != NULL && cbuf != &rci) 3043 free(cbuf, M_TEMP); 3044 return (EINVAL); 3045 } 3046 3047 /* 3048 * Passed rules seems to be valid. 3049 * Allocate storage and try to add them to chain. 3050 */ 3051 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) { 3052 clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule); 3053 ci->krule = ipfw_alloc_rule(chain, clen); 3054 import_rule1(ci); 3055 } 3056 3057 if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) { 3058 /* Free allocate krules */ 3059 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) 3060 ipfw_free_rule(ci->krule); 3061 } 3062 3063 if (cbuf != NULL && cbuf != &rci) 3064 free(cbuf, M_TEMP); 3065 3066 return (error); 3067} 3068 3069/* 3070 * Lists all sopts currently registered. 3071 * Data layout (v0)(current): 3072 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 3073 * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ] 3074 * 3075 * Returns 0 on success 3076 */ 3077static int 3078dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 3079 struct sockopt_data *sd) 3080{ 3081 struct _ipfw_obj_lheader *olh; 3082 ipfw_sopt_info *i; 3083 struct ipfw_sopt_handler *sh; 3084 uint32_t count, n, size; 3085 3086 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 3087 if (olh == NULL) 3088 return (EINVAL); 3089 if (sd->valsize < olh->size) 3090 return (EINVAL); 3091 3092 CTL3_LOCK(); 3093 count = ctl3_hsize; 3094 size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader); 3095 3096 /* Fill in header regadless of buffer size */ 3097 olh->count = count; 3098 olh->objsize = sizeof(ipfw_sopt_info); 3099 3100 if (size > olh->size) { 3101 olh->size = size; 3102 CTL3_UNLOCK(); 3103 return (ENOMEM); 3104 } 3105 olh->size = size; 3106 3107 for (n = 1; n <= count; n++) { 3108 i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i)); 3109 KASSERT(i != NULL, ("previously checked buffer is not enough")); 3110 sh = &ctl3_handlers[n]; 3111 i->opcode = sh->opcode; 3112 i->version = sh->version; 3113 i->refcnt = sh->refcnt; 3114 } 3115 CTL3_UNLOCK(); 3116 3117 return (0); 3118} 3119 3120/* 3121 * Compares two opcodes. 3122 * Used both in qsort() and bsearch(). 3123 * 3124 * Returns 0 if match is found. 3125 */ 3126static int 3127compare_opcodes(const void *_a, const void *_b) 3128{ 3129 const struct opcode_obj_rewrite *a, *b; 3130 3131 a = (const struct opcode_obj_rewrite *)_a; 3132 b = (const struct opcode_obj_rewrite *)_b; 3133 3134 if (a->opcode < b->opcode) 3135 return (-1); 3136 else if (a->opcode > b->opcode) 3137 return (1); 3138 3139 return (0); 3140} 3141 3142/* 3143 * XXX: Rewrite bsearch() 3144 */ 3145static int 3146find_op_rw_range(uint16_t op, struct opcode_obj_rewrite **plo, 3147 struct opcode_obj_rewrite **phi) 3148{ 3149 struct opcode_obj_rewrite *ctl3_max, *lo, *hi, h, *rw; 3150 3151 memset(&h, 0, sizeof(h)); 3152 h.opcode = op; 3153 3154 rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters, 3155 ctl3_rsize, sizeof(h), compare_opcodes); 3156 if (rw == NULL) 3157 return (1); 3158 3159 /* Find the first element matching the same opcode */ 3160 lo = rw; 3161 for ( ; lo > ctl3_rewriters && (lo - 1)->opcode == op; lo--) 3162 ; 3163 3164 /* Find the last element matching the same opcode */ 3165 hi = rw; 3166 ctl3_max = ctl3_rewriters + ctl3_rsize; 3167 for ( ; (hi + 1) < ctl3_max && (hi + 1)->opcode == op; hi++) 3168 ; 3169 3170 *plo = lo; 3171 *phi = hi; 3172 3173 return (0); 3174} 3175 3176/* 3177 * Finds opcode object rewriter based on @code. 3178 * 3179 * Returns pointer to handler or NULL. 3180 */ 3181static struct opcode_obj_rewrite * 3182find_op_rw(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 3183{ 3184 struct opcode_obj_rewrite *rw, *lo, *hi; 3185 uint16_t uidx; 3186 uint8_t subtype; 3187 3188 if (find_op_rw_range(cmd->opcode, &lo, &hi) != 0) 3189 return (NULL); 3190 3191 for (rw = lo; rw <= hi; rw++) { 3192 if (rw->classifier(cmd, &uidx, &subtype) == 0) { 3193 if (puidx != NULL) 3194 *puidx = uidx; 3195 if (ptype != NULL) 3196 *ptype = subtype; 3197 return (rw); 3198 } 3199 } 3200 3201 return (NULL); 3202} 3203int 3204classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx) 3205{ 3206 3207 if (find_op_rw(cmd, puidx, NULL) == NULL) 3208 return (1); 3209 return (0); 3210} 3211 3212void 3213update_opcode_kidx(ipfw_insn *cmd, uint16_t idx) 3214{ 3215 struct opcode_obj_rewrite *rw; 3216 3217 rw = find_op_rw(cmd, NULL, NULL); 3218 KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode)); 3219 rw->update(cmd, idx); 3220} 3221 3222void 3223ipfw_init_obj_rewriter() 3224{ 3225 3226 ctl3_rewriters = NULL; 3227 ctl3_rsize = 0; 3228} 3229 3230void 3231ipfw_destroy_obj_rewriter() 3232{ 3233 3234 if (ctl3_rewriters != NULL) 3235 free(ctl3_rewriters, M_IPFW); 3236 ctl3_rewriters = NULL; 3237 ctl3_rsize = 0; 3238} 3239 3240/* 3241 * Adds one or more opcode object rewrite handlers to the global array. 3242 * Function may sleep. 3243 */ 3244void 3245ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count) 3246{ 3247 size_t sz; 3248 struct opcode_obj_rewrite *tmp; 3249 3250 CTL3_LOCK(); 3251 3252 for (;;) { 3253 sz = ctl3_rsize + count; 3254 CTL3_UNLOCK(); 3255 tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO); 3256 CTL3_LOCK(); 3257 if (ctl3_rsize + count <= sz) 3258 break; 3259 3260 /* Retry */ 3261 free(tmp, M_IPFW); 3262 } 3263 3264 /* Merge old & new arrays */ 3265 sz = ctl3_rsize + count; 3266 memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw)); 3267 memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw)); 3268 qsort(tmp, sz, sizeof(*rw), compare_opcodes); 3269 /* Switch new and free old */ 3270 if (ctl3_rewriters != NULL) 3271 free(ctl3_rewriters, M_IPFW); 3272 ctl3_rewriters = tmp; 3273 ctl3_rsize = sz; 3274 3275 CTL3_UNLOCK(); 3276} 3277 3278/* 3279 * Removes one or more object rewrite handlers from the global array. 3280 */ 3281int 3282ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count) 3283{ 3284 size_t sz; 3285 struct opcode_obj_rewrite *ctl3_max, *ktmp, *lo, *hi; 3286 int i; 3287 3288 CTL3_LOCK(); 3289 3290 for (i = 0; i < count; i++) { 3291 if (find_op_rw_range(rw[i].opcode, &lo, &hi) != 0) 3292 continue; 3293 3294 for (ktmp = lo; ktmp <= hi; ktmp++) { 3295 if (ktmp->classifier != rw[i].classifier) 3296 continue; 3297 3298 ctl3_max = ctl3_rewriters + ctl3_rsize; 3299 sz = (ctl3_max - (ktmp + 1)) * sizeof(*ktmp); 3300 memmove(ktmp, ktmp + 1, sz); 3301 ctl3_rsize--; 3302 break; 3303 } 3304 3305 } 3306 3307 if (ctl3_rsize == 0) { 3308 if (ctl3_rewriters != NULL) 3309 free(ctl3_rewriters, M_IPFW); 3310 ctl3_rewriters = NULL; 3311 } 3312 3313 CTL3_UNLOCK(); 3314 3315 return (0); 3316} 3317 3318static int 3319export_objhash_ntlv_internal(struct namedobj_instance *ni, 3320 struct named_object *no, void *arg) 3321{ 3322 struct sockopt_data *sd; 3323 ipfw_obj_ntlv *ntlv; 3324 3325 sd = (struct sockopt_data *)arg; 3326 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 3327 if (ntlv == NULL) 3328 return (ENOMEM); 3329 ipfw_export_obj_ntlv(no, ntlv); 3330 return (0); 3331} 3332 3333/* 3334 * Lists all service objects. 3335 * Data layout (v0)(current): 3336 * Request: [ ipfw_obj_lheader ] size = ipfw_obj_lheader.size 3337 * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ] 3338 * Returns 0 on success 3339 */ 3340static int 3341dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 3342 struct sockopt_data *sd) 3343{ 3344 ipfw_obj_lheader *hdr; 3345 int count; 3346 3347 hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr)); 3348 if (hdr == NULL) 3349 return (EINVAL); 3350 3351 IPFW_UH_RLOCK(chain); 3352 count = ipfw_objhash_count(CHAIN_TO_SRV(chain)); 3353 hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv); 3354 if (sd->valsize < hdr->size) { 3355 IPFW_UH_RUNLOCK(chain); 3356 return (ENOMEM); 3357 } 3358 hdr->count = count; 3359 hdr->objsize = sizeof(ipfw_obj_ntlv); 3360 if (count > 0) 3361 ipfw_objhash_foreach(CHAIN_TO_SRV(chain), 3362 export_objhash_ntlv_internal, sd); 3363 IPFW_UH_RUNLOCK(chain); 3364 return (0); 3365} 3366 3367/* 3368 * Compares two sopt handlers (code, version and handler ptr). 3369 * Used both as qsort() and bsearch(). 3370 * Does not compare handler for latter case. 3371 * 3372 * Returns 0 if match is found. 3373 */ 3374static int 3375compare_sh(const void *_a, const void *_b) 3376{ 3377 const struct ipfw_sopt_handler *a, *b; 3378 3379 a = (const struct ipfw_sopt_handler *)_a; 3380 b = (const struct ipfw_sopt_handler *)_b; 3381 3382 if (a->opcode < b->opcode) 3383 return (-1); 3384 else if (a->opcode > b->opcode) 3385 return (1); 3386 3387 if (a->version < b->version) 3388 return (-1); 3389 else if (a->version > b->version) 3390 return (1); 3391 3392 /* bsearch helper */ 3393 if (a->handler == NULL) 3394 return (0); 3395 3396 if ((uintptr_t)a->handler < (uintptr_t)b->handler) 3397 return (-1); 3398 else if ((uintptr_t)a->handler > (uintptr_t)b->handler) 3399 return (1); 3400 3401 return (0); 3402} 3403 3404/* 3405 * Finds sopt handler based on @code and @version. 3406 * 3407 * Returns pointer to handler or NULL. 3408 */ 3409static struct ipfw_sopt_handler * 3410find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler) 3411{ 3412 struct ipfw_sopt_handler *sh, h; 3413 3414 memset(&h, 0, sizeof(h)); 3415 h.opcode = code; 3416 h.version = version; 3417 h.handler = handler; 3418 3419 sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers, 3420 ctl3_hsize, sizeof(h), compare_sh); 3421 3422 return (sh); 3423} 3424 3425static int 3426find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh) 3427{ 3428 struct ipfw_sopt_handler *sh; 3429 3430 CTL3_LOCK(); 3431 if ((sh = find_sh(opcode, version, NULL)) == NULL) { 3432 CTL3_UNLOCK(); 3433 printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n", 3434 opcode, version); 3435 return (EINVAL); 3436 } 3437 sh->refcnt++; 3438 ctl3_refct++; 3439 /* Copy handler data to requested buffer */ 3440 *psh = *sh; 3441 CTL3_UNLOCK(); 3442 3443 return (0); 3444} 3445 3446static void 3447find_unref_sh(struct ipfw_sopt_handler *psh) 3448{ 3449 struct ipfw_sopt_handler *sh; 3450 3451 CTL3_LOCK(); 3452 sh = find_sh(psh->opcode, psh->version, NULL); 3453 KASSERT(sh != NULL, ("ctl3 handler disappeared")); 3454 sh->refcnt--; 3455 ctl3_refct--; 3456 CTL3_UNLOCK(); 3457} 3458 3459void 3460ipfw_init_sopt_handler() 3461{ 3462 3463 CTL3_LOCK_INIT(); 3464 IPFW_ADD_SOPT_HANDLER(1, scodes); 3465} 3466 3467void 3468ipfw_destroy_sopt_handler() 3469{ 3470 3471 IPFW_DEL_SOPT_HANDLER(1, scodes); 3472 CTL3_LOCK_DESTROY(); 3473} 3474 3475/* 3476 * Adds one or more sockopt handlers to the global array. 3477 * Function may sleep. 3478 */ 3479void 3480ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 3481{ 3482 size_t sz; 3483 struct ipfw_sopt_handler *tmp; 3484 3485 CTL3_LOCK(); 3486 3487 for (;;) { 3488 sz = ctl3_hsize + count; 3489 CTL3_UNLOCK(); 3490 tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO); 3491 CTL3_LOCK(); 3492 if (ctl3_hsize + count <= sz) 3493 break; 3494 3495 /* Retry */ 3496 free(tmp, M_IPFW); 3497 } 3498 3499 /* Merge old & new arrays */ 3500 sz = ctl3_hsize + count; 3501 memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh)); 3502 memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh)); 3503 qsort(tmp, sz, sizeof(*sh), compare_sh); 3504 /* Switch new and free old */ 3505 if (ctl3_handlers != NULL) 3506 free(ctl3_handlers, M_IPFW); 3507 ctl3_handlers = tmp; 3508 ctl3_hsize = sz; 3509 ctl3_gencnt++; 3510 3511 CTL3_UNLOCK(); 3512} 3513 3514/* 3515 * Removes one or more sockopt handlers from the global array. 3516 */ 3517int 3518ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 3519{ 3520 size_t sz; 3521 struct ipfw_sopt_handler *tmp, *h; 3522 int i; 3523 3524 CTL3_LOCK(); 3525 3526 for (i = 0; i < count; i++) { 3527 tmp = &sh[i]; 3528 h = find_sh(tmp->opcode, tmp->version, tmp->handler); 3529 if (h == NULL) 3530 continue; 3531 3532 sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h); 3533 memmove(h, h + 1, sz); 3534 ctl3_hsize--; 3535 } 3536 3537 if (ctl3_hsize == 0) { 3538 if (ctl3_handlers != NULL) 3539 free(ctl3_handlers, M_IPFW); 3540 ctl3_handlers = NULL; 3541 } 3542 3543 ctl3_gencnt++; 3544 3545 CTL3_UNLOCK(); 3546 3547 return (0); 3548} 3549 3550/* 3551 * Writes data accumulated in @sd to sockopt buffer. 3552 * Zeroes internal @sd buffer. 3553 */ 3554static int 3555ipfw_flush_sopt_data(struct sockopt_data *sd) 3556{ 3557 struct sockopt *sopt; 3558 int error; 3559 size_t sz; 3560 3561 sz = sd->koff; 3562 if (sz == 0) 3563 return (0); 3564 3565 sopt = sd->sopt; 3566 3567 if (sopt->sopt_dir == SOPT_GET) { 3568 error = copyout(sd->kbuf, sopt->sopt_val, sz); 3569 if (error != 0) 3570 return (error); 3571 } 3572 3573 memset(sd->kbuf, 0, sd->ksize); 3574 sd->ktotal += sz; 3575 sd->koff = 0; 3576 if (sd->ktotal + sd->ksize < sd->valsize) 3577 sd->kavail = sd->ksize; 3578 else 3579 sd->kavail = sd->valsize - sd->ktotal; 3580 3581 /* Update sopt buffer data */ 3582 sopt->sopt_valsize = sd->ktotal; 3583 sopt->sopt_val = sd->sopt_val + sd->ktotal; 3584 3585 return (0); 3586} 3587 3588/* 3589 * Ensures that @sd buffer has contiguous @neeeded number of 3590 * bytes. 3591 * 3592 * Returns pointer to requested space or NULL. 3593 */ 3594caddr_t 3595ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed) 3596{ 3597 int error; 3598 caddr_t addr; 3599 3600 if (sd->kavail < needed) { 3601 /* 3602 * Flush data and try another time. 3603 */ 3604 error = ipfw_flush_sopt_data(sd); 3605 3606 if (sd->kavail < needed || error != 0) 3607 return (NULL); 3608 } 3609 3610 addr = sd->kbuf + sd->koff; 3611 sd->koff += needed; 3612 sd->kavail -= needed; 3613 return (addr); 3614} 3615 3616/* 3617 * Requests @needed contiguous bytes from @sd buffer. 3618 * Function is used to notify subsystem that we are 3619 * interesed in first @needed bytes (request header) 3620 * and the rest buffer can be safely zeroed. 3621 * 3622 * Returns pointer to requested space or NULL. 3623 */ 3624caddr_t 3625ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed) 3626{ 3627 caddr_t addr; 3628 3629 if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL) 3630 return (NULL); 3631 3632 if (sd->kavail > 0) 3633 memset(sd->kbuf + sd->koff, 0, sd->kavail); 3634 3635 return (addr); 3636} 3637 3638/* 3639 * New sockopt handler. 3640 */ 3641int 3642ipfw_ctl3(struct sockopt *sopt) 3643{ 3644 int error, locked; 3645 size_t size, valsize; 3646 struct ip_fw_chain *chain; 3647 char xbuf[256]; 3648 struct sockopt_data sdata; 3649 struct ipfw_sopt_handler h; 3650 ip_fw3_opheader *op3 = NULL; 3651 3652 error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW); 3653 if (error != 0) 3654 return (error); 3655 3656 if (sopt->sopt_name != IP_FW3) 3657 return (ipfw_ctl(sopt)); 3658 3659 chain = &V_layer3_chain; 3660 error = 0; 3661 3662 /* Save original valsize before it is altered via sooptcopyin() */ 3663 valsize = sopt->sopt_valsize; 3664 memset(&sdata, 0, sizeof(sdata)); 3665 /* Read op3 header first to determine actual operation */ 3666 op3 = (ip_fw3_opheader *)xbuf; 3667 error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3)); 3668 if (error != 0) 3669 return (error); 3670 sopt->sopt_valsize = valsize; 3671 3672 /* 3673 * Find and reference command. 3674 */ 3675 error = find_ref_sh(op3->opcode, op3->version, &h); 3676 if (error != 0) 3677 return (error); 3678 3679 /* 3680 * Disallow modifications in really-really secure mode, but still allow 3681 * the logging counters to be reset. 3682 */ 3683 if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) { 3684 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 3685 if (error != 0) { 3686 find_unref_sh(&h); 3687 return (error); 3688 } 3689 } 3690 3691 /* 3692 * Fill in sockopt_data structure that may be useful for 3693 * IP_FW3 get requests. 3694 */ 3695 locked = 0; 3696 if (valsize <= sizeof(xbuf)) { 3697 /* use on-stack buffer */ 3698 sdata.kbuf = xbuf; 3699 sdata.ksize = sizeof(xbuf); 3700 sdata.kavail = valsize; 3701 } else { 3702 3703 /* 3704 * Determine opcode type/buffer size: 3705 * allocate sliding-window buf for data export or 3706 * contiguous buffer for special ops. 3707 */ 3708 if ((h.dir & HDIR_SET) != 0) { 3709 /* Set request. Allocate contigous buffer. */ 3710 if (valsize > CTL3_LARGEBUF) { 3711 find_unref_sh(&h); 3712 return (EFBIG); 3713 } 3714 3715 size = valsize; 3716 } else { 3717 /* Get request. Allocate sliding window buffer */ 3718 size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF; 3719 3720 if (size < valsize) { 3721 /* We have to wire user buffer */ 3722 error = vslock(sopt->sopt_val, valsize); 3723 if (error != 0) 3724 return (error); 3725 locked = 1; 3726 } 3727 } 3728 3729 sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 3730 sdata.ksize = size; 3731 sdata.kavail = size; 3732 } 3733 3734 sdata.sopt = sopt; 3735 sdata.sopt_val = sopt->sopt_val; 3736 sdata.valsize = valsize; 3737 3738 /* 3739 * Copy either all request (if valsize < bsize_max) 3740 * or first bsize_max bytes to guarantee most consumers 3741 * that all necessary data has been copied). 3742 * Anyway, copy not less than sizeof(ip_fw3_opheader). 3743 */ 3744 if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize, 3745 sizeof(ip_fw3_opheader))) != 0) 3746 return (error); 3747 op3 = (ip_fw3_opheader *)sdata.kbuf; 3748 3749 /* Finally, run handler */ 3750 error = h.handler(chain, op3, &sdata); 3751 find_unref_sh(&h); 3752 3753 /* Flush state and free buffers */ 3754 if (error == 0) 3755 error = ipfw_flush_sopt_data(&sdata); 3756 else 3757 ipfw_flush_sopt_data(&sdata); 3758 3759 if (locked != 0) 3760 vsunlock(sdata.sopt_val, valsize); 3761 3762 /* Restore original pointer and set number of bytes written */ 3763 sopt->sopt_val = sdata.sopt_val; 3764 sopt->sopt_valsize = sdata.ktotal; 3765 if (sdata.kbuf != xbuf) 3766 free(sdata.kbuf, M_TEMP); 3767 3768 return (error); 3769} 3770 3771/** 3772 * {set|get}sockopt parser. 3773 */ 3774int 3775ipfw_ctl(struct sockopt *sopt) 3776{ 3777#define RULE_MAXSIZE (512*sizeof(u_int32_t)) 3778 int error; 3779 size_t size, valsize; 3780 struct ip_fw *buf; 3781 struct ip_fw_rule0 *rule; 3782 struct ip_fw_chain *chain; 3783 u_int32_t rulenum[2]; 3784 uint32_t opt; 3785 struct rule_check_info ci; 3786 IPFW_RLOCK_TRACKER; 3787 3788 chain = &V_layer3_chain; 3789 error = 0; 3790 3791 /* Save original valsize before it is altered via sooptcopyin() */ 3792 valsize = sopt->sopt_valsize; 3793 opt = sopt->sopt_name; 3794 3795 /* 3796 * Disallow modifications in really-really secure mode, but still allow 3797 * the logging counters to be reset. 3798 */ 3799 if (opt == IP_FW_ADD || 3800 (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) { 3801 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 3802 if (error != 0) 3803 return (error); 3804 } 3805 3806 switch (opt) { 3807 case IP_FW_GET: 3808 /* 3809 * pass up a copy of the current rules. Static rules 3810 * come first (the last of which has number IPFW_DEFAULT_RULE), 3811 * followed by a possibly empty list of dynamic rule. 3812 * The last dynamic rule has NULL in the "next" field. 3813 * 3814 * Note that the calculated size is used to bound the 3815 * amount of data returned to the user. The rule set may 3816 * change between calculating the size and returning the 3817 * data in which case we'll just return what fits. 3818 */ 3819 for (;;) { 3820 int len = 0, want; 3821 3822 size = chain->static_len; 3823 size += ipfw_dyn_len(); 3824 if (size >= sopt->sopt_valsize) 3825 break; 3826 buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 3827 IPFW_UH_RLOCK(chain); 3828 /* check again how much space we need */ 3829 want = chain->static_len + ipfw_dyn_len(); 3830 if (size >= want) 3831 len = ipfw_getrules(chain, buf, size); 3832 IPFW_UH_RUNLOCK(chain); 3833 if (size >= want) 3834 error = sooptcopyout(sopt, buf, len); 3835 free(buf, M_TEMP); 3836 if (size >= want) 3837 break; 3838 } 3839 break; 3840 3841 case IP_FW_FLUSH: 3842 /* locking is done within del_entry() */ 3843 error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */ 3844 break; 3845 3846 case IP_FW_ADD: 3847 rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK); 3848 error = sooptcopyin(sopt, rule, RULE_MAXSIZE, 3849 sizeof(struct ip_fw7) ); 3850 3851 memset(&ci, 0, sizeof(struct rule_check_info)); 3852 3853 /* 3854 * If the size of commands equals RULESIZE7 then we assume 3855 * a FreeBSD7.2 binary is talking to us (set is7=1). 3856 * is7 is persistent so the next 'ipfw list' command 3857 * will use this format. 3858 * NOTE: If wrong version is guessed (this can happen if 3859 * the first ipfw command is 'ipfw [pipe] list') 3860 * the ipfw binary may crash or loop infinitly... 3861 */ 3862 size = sopt->sopt_valsize; 3863 if (size == RULESIZE7(rule)) { 3864 is7 = 1; 3865 error = convert_rule_to_8(rule); 3866 if (error) { 3867 free(rule, M_TEMP); 3868 return error; 3869 } 3870 size = RULESIZE(rule); 3871 } else 3872 is7 = 0; 3873 if (error == 0) 3874 error = check_ipfw_rule0(rule, size, &ci); 3875 if (error == 0) { 3876 /* locking is done within add_rule() */ 3877 struct ip_fw *krule; 3878 krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule)); 3879 ci.urule = (caddr_t)rule; 3880 ci.krule = krule; 3881 import_rule0(&ci); 3882 error = commit_rules(chain, &ci, 1); 3883 if (error != 0) 3884 ipfw_free_rule(ci.krule); 3885 else if (sopt->sopt_dir == SOPT_GET) { 3886 if (is7) { 3887 error = convert_rule_to_7(rule); 3888 size = RULESIZE7(rule); 3889 if (error) { 3890 free(rule, M_TEMP); 3891 return error; 3892 } 3893 } 3894 error = sooptcopyout(sopt, rule, size); 3895 } 3896 } 3897 free(rule, M_TEMP); 3898 break; 3899 3900 case IP_FW_DEL: 3901 /* 3902 * IP_FW_DEL is used for deleting single rules or sets, 3903 * and (ab)used to atomically manipulate sets. Argument size 3904 * is used to distinguish between the two: 3905 * sizeof(u_int32_t) 3906 * delete single rule or set of rules, 3907 * or reassign rules (or sets) to a different set. 3908 * 2*sizeof(u_int32_t) 3909 * atomic disable/enable sets. 3910 * first u_int32_t contains sets to be disabled, 3911 * second u_int32_t contains sets to be enabled. 3912 */ 3913 error = sooptcopyin(sopt, rulenum, 3914 2*sizeof(u_int32_t), sizeof(u_int32_t)); 3915 if (error) 3916 break; 3917 size = sopt->sopt_valsize; 3918 if (size == sizeof(u_int32_t) && rulenum[0] != 0) { 3919 /* delete or reassign, locking done in del_entry() */ 3920 error = del_entry(chain, rulenum[0]); 3921 } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */ 3922 IPFW_UH_WLOCK(chain); 3923 V_set_disable = 3924 (V_set_disable | rulenum[0]) & ~rulenum[1] & 3925 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */ 3926 IPFW_UH_WUNLOCK(chain); 3927 } else 3928 error = EINVAL; 3929 break; 3930 3931 case IP_FW_ZERO: 3932 case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */ 3933 rulenum[0] = 0; 3934 if (sopt->sopt_val != 0) { 3935 error = sooptcopyin(sopt, rulenum, 3936 sizeof(u_int32_t), sizeof(u_int32_t)); 3937 if (error) 3938 break; 3939 } 3940 error = zero_entry(chain, rulenum[0], 3941 sopt->sopt_name == IP_FW_RESETLOG); 3942 break; 3943 3944 /*--- TABLE opcodes ---*/ 3945 case IP_FW_TABLE_ADD: 3946 case IP_FW_TABLE_DEL: 3947 { 3948 ipfw_table_entry ent; 3949 struct tentry_info tei; 3950 struct tid_info ti; 3951 struct table_value v; 3952 3953 error = sooptcopyin(sopt, &ent, 3954 sizeof(ent), sizeof(ent)); 3955 if (error) 3956 break; 3957 3958 memset(&tei, 0, sizeof(tei)); 3959 tei.paddr = &ent.addr; 3960 tei.subtype = AF_INET; 3961 tei.masklen = ent.masklen; 3962 ipfw_import_table_value_legacy(ent.value, &v); 3963 tei.pvalue = &v; 3964 memset(&ti, 0, sizeof(ti)); 3965 ti.uidx = ent.tbl; 3966 ti.type = IPFW_TABLE_CIDR; 3967 3968 error = (opt == IP_FW_TABLE_ADD) ? 3969 add_table_entry(chain, &ti, &tei, 0, 1) : 3970 del_table_entry(chain, &ti, &tei, 0, 1); 3971 } 3972 break; 3973 3974 3975 case IP_FW_TABLE_FLUSH: 3976 { 3977 u_int16_t tbl; 3978 struct tid_info ti; 3979 3980 error = sooptcopyin(sopt, &tbl, 3981 sizeof(tbl), sizeof(tbl)); 3982 if (error) 3983 break; 3984 memset(&ti, 0, sizeof(ti)); 3985 ti.uidx = tbl; 3986 error = flush_table(chain, &ti); 3987 } 3988 break; 3989 3990 case IP_FW_TABLE_GETSIZE: 3991 { 3992 u_int32_t tbl, cnt; 3993 struct tid_info ti; 3994 3995 if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl), 3996 sizeof(tbl)))) 3997 break; 3998 memset(&ti, 0, sizeof(ti)); 3999 ti.uidx = tbl; 4000 IPFW_RLOCK(chain); 4001 error = ipfw_count_table(chain, &ti, &cnt); 4002 IPFW_RUNLOCK(chain); 4003 if (error) 4004 break; 4005 error = sooptcopyout(sopt, &cnt, sizeof(cnt)); 4006 } 4007 break; 4008 4009 case IP_FW_TABLE_LIST: 4010 { 4011 ipfw_table *tbl; 4012 struct tid_info ti; 4013 4014 if (sopt->sopt_valsize < sizeof(*tbl)) { 4015 error = EINVAL; 4016 break; 4017 } 4018 size = sopt->sopt_valsize; 4019 tbl = malloc(size, M_TEMP, M_WAITOK); 4020 error = sooptcopyin(sopt, tbl, size, sizeof(*tbl)); 4021 if (error) { 4022 free(tbl, M_TEMP); 4023 break; 4024 } 4025 tbl->size = (size - sizeof(*tbl)) / 4026 sizeof(ipfw_table_entry); 4027 memset(&ti, 0, sizeof(ti)); 4028 ti.uidx = tbl->tbl; 4029 IPFW_RLOCK(chain); 4030 error = ipfw_dump_table_legacy(chain, &ti, tbl); 4031 IPFW_RUNLOCK(chain); 4032 if (error) { 4033 free(tbl, M_TEMP); 4034 break; 4035 } 4036 error = sooptcopyout(sopt, tbl, size); 4037 free(tbl, M_TEMP); 4038 } 4039 break; 4040 4041 /*--- NAT operations are protected by the IPFW_LOCK ---*/ 4042 case IP_FW_NAT_CFG: 4043 if (IPFW_NAT_LOADED) 4044 error = ipfw_nat_cfg_ptr(sopt); 4045 else { 4046 printf("IP_FW_NAT_CFG: %s\n", 4047 "ipfw_nat not present, please load it"); 4048 error = EINVAL; 4049 } 4050 break; 4051 4052 case IP_FW_NAT_DEL: 4053 if (IPFW_NAT_LOADED) 4054 error = ipfw_nat_del_ptr(sopt); 4055 else { 4056 printf("IP_FW_NAT_DEL: %s\n", 4057 "ipfw_nat not present, please load it"); 4058 error = EINVAL; 4059 } 4060 break; 4061 4062 case IP_FW_NAT_GET_CONFIG: 4063 if (IPFW_NAT_LOADED) 4064 error = ipfw_nat_get_cfg_ptr(sopt); 4065 else { 4066 printf("IP_FW_NAT_GET_CFG: %s\n", 4067 "ipfw_nat not present, please load it"); 4068 error = EINVAL; 4069 } 4070 break; 4071 4072 case IP_FW_NAT_GET_LOG: 4073 if (IPFW_NAT_LOADED) 4074 error = ipfw_nat_get_log_ptr(sopt); 4075 else { 4076 printf("IP_FW_NAT_GET_LOG: %s\n", 4077 "ipfw_nat not present, please load it"); 4078 error = EINVAL; 4079 } 4080 break; 4081 4082 default: 4083 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name); 4084 error = EINVAL; 4085 } 4086 4087 return (error); 4088#undef RULE_MAXSIZE 4089} 4090#define RULE_MAXSIZE (256*sizeof(u_int32_t)) 4091 4092/* Functions to convert rules 7.2 <==> 8.0 */ 4093static int 4094convert_rule_to_7(struct ip_fw_rule0 *rule) 4095{ 4096 /* Used to modify original rule */ 4097 struct ip_fw7 *rule7 = (struct ip_fw7 *)rule; 4098 /* copy of original rule, version 8 */ 4099 struct ip_fw_rule0 *tmp; 4100 4101 /* Used to copy commands */ 4102 ipfw_insn *ccmd, *dst; 4103 int ll = 0, ccmdlen = 0; 4104 4105 tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 4106 if (tmp == NULL) { 4107 return 1; //XXX error 4108 } 4109 bcopy(rule, tmp, RULE_MAXSIZE); 4110 4111 /* Copy fields */ 4112 //rule7->_pad = tmp->_pad; 4113 rule7->set = tmp->set; 4114 rule7->rulenum = tmp->rulenum; 4115 rule7->cmd_len = tmp->cmd_len; 4116 rule7->act_ofs = tmp->act_ofs; 4117 rule7->next_rule = (struct ip_fw7 *)tmp->next_rule; 4118 rule7->cmd_len = tmp->cmd_len; 4119 rule7->pcnt = tmp->pcnt; 4120 rule7->bcnt = tmp->bcnt; 4121 rule7->timestamp = tmp->timestamp; 4122 4123 /* Copy commands */ 4124 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ; 4125 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 4126 ccmdlen = F_LEN(ccmd); 4127 4128 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 4129 4130 if (dst->opcode > O_NAT) 4131 /* O_REASS doesn't exists in 7.2 version, so 4132 * decrement opcode if it is after O_REASS 4133 */ 4134 dst->opcode--; 4135 4136 if (ccmdlen > ll) { 4137 printf("ipfw: opcode %d size truncated\n", 4138 ccmd->opcode); 4139 return EINVAL; 4140 } 4141 } 4142 free(tmp, M_TEMP); 4143 4144 return 0; 4145} 4146 4147static int 4148convert_rule_to_8(struct ip_fw_rule0 *rule) 4149{ 4150 /* Used to modify original rule */ 4151 struct ip_fw7 *rule7 = (struct ip_fw7 *) rule; 4152 4153 /* Used to copy commands */ 4154 ipfw_insn *ccmd, *dst; 4155 int ll = 0, ccmdlen = 0; 4156 4157 /* Copy of original rule */ 4158 struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 4159 if (tmp == NULL) { 4160 return 1; //XXX error 4161 } 4162 4163 bcopy(rule7, tmp, RULE_MAXSIZE); 4164 4165 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ; 4166 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 4167 ccmdlen = F_LEN(ccmd); 4168 4169 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 4170 4171 if (dst->opcode > O_NAT) 4172 /* O_REASS doesn't exists in 7.2 version, so 4173 * increment opcode if it is after O_REASS 4174 */ 4175 dst->opcode++; 4176 4177 if (ccmdlen > ll) { 4178 printf("ipfw: opcode %d size truncated\n", 4179 ccmd->opcode); 4180 return EINVAL; 4181 } 4182 } 4183 4184 rule->_pad = tmp->_pad; 4185 rule->set = tmp->set; 4186 rule->rulenum = tmp->rulenum; 4187 rule->cmd_len = tmp->cmd_len; 4188 rule->act_ofs = tmp->act_ofs; 4189 rule->next_rule = (struct ip_fw *)tmp->next_rule; 4190 rule->cmd_len = tmp->cmd_len; 4191 rule->id = 0; /* XXX see if is ok = 0 */ 4192 rule->pcnt = tmp->pcnt; 4193 rule->bcnt = tmp->bcnt; 4194 rule->timestamp = tmp->timestamp; 4195 4196 free (tmp, M_TEMP); 4197 return 0; 4198} 4199 4200/* 4201 * Named object api 4202 * 4203 */ 4204 4205void 4206ipfw_init_srv(struct ip_fw_chain *ch) 4207{ 4208 4209 ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT); 4210 ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT, 4211 M_IPFW, M_WAITOK | M_ZERO); 4212} 4213 4214void 4215ipfw_destroy_srv(struct ip_fw_chain *ch) 4216{ 4217 4218 free(ch->srvstate, M_IPFW); 4219 ipfw_objhash_destroy(ch->srvmap); 4220} 4221 4222/* 4223 * Allocate new bitmask which can be used to enlarge/shrink 4224 * named instance index. 4225 */ 4226void 4227ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks) 4228{ 4229 size_t size; 4230 int max_blocks; 4231 u_long *idx_mask; 4232 4233 KASSERT((items % BLOCK_ITEMS) == 0, 4234 ("bitmask size needs to power of 2 and greater or equal to %zu", 4235 BLOCK_ITEMS)); 4236 4237 max_blocks = items / BLOCK_ITEMS; 4238 size = items / 8; 4239 idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK); 4240 /* Mark all as free */ 4241 memset(idx_mask, 0xFF, size * IPFW_MAX_SETS); 4242 *idx_mask &= ~(u_long)1; /* Skip index 0 */ 4243 4244 *idx = idx_mask; 4245 *pblocks = max_blocks; 4246} 4247 4248/* 4249 * Copy current bitmask index to new one. 4250 */ 4251void 4252ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks) 4253{ 4254 int old_blocks, new_blocks; 4255 u_long *old_idx, *new_idx; 4256 int i; 4257 4258 old_idx = ni->idx_mask; 4259 old_blocks = ni->max_blocks; 4260 new_idx = *idx; 4261 new_blocks = *blocks; 4262 4263 for (i = 0; i < IPFW_MAX_SETS; i++) { 4264 memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i], 4265 old_blocks * sizeof(u_long)); 4266 } 4267} 4268 4269/* 4270 * Swaps current @ni index with new one. 4271 */ 4272void 4273ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks) 4274{ 4275 int old_blocks; 4276 u_long *old_idx; 4277 4278 old_idx = ni->idx_mask; 4279 old_blocks = ni->max_blocks; 4280 4281 ni->idx_mask = *idx; 4282 ni->max_blocks = *blocks; 4283 4284 /* Save old values */ 4285 *idx = old_idx; 4286 *blocks = old_blocks; 4287} 4288 4289void 4290ipfw_objhash_bitmap_free(void *idx, int blocks) 4291{ 4292 4293 free(idx, M_IPFW); 4294} 4295 4296/* 4297 * Creates named hash instance. 4298 * Must be called without holding any locks. 4299 * Return pointer to new instance. 4300 */ 4301struct namedobj_instance * 4302ipfw_objhash_create(uint32_t items) 4303{ 4304 struct namedobj_instance *ni; 4305 int i; 4306 size_t size; 4307 4308 size = sizeof(struct namedobj_instance) + 4309 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE + 4310 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE; 4311 4312 ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO); 4313 ni->nn_size = NAMEDOBJ_HASH_SIZE; 4314 ni->nv_size = NAMEDOBJ_HASH_SIZE; 4315 4316 ni->names = (struct namedobjects_head *)(ni +1); 4317 ni->values = &ni->names[ni->nn_size]; 4318 4319 for (i = 0; i < ni->nn_size; i++) 4320 TAILQ_INIT(&ni->names[i]); 4321 4322 for (i = 0; i < ni->nv_size; i++) 4323 TAILQ_INIT(&ni->values[i]); 4324 4325 /* Set default hashing/comparison functions */ 4326 ni->hash_f = objhash_hash_name; 4327 ni->cmp_f = objhash_cmp_name; 4328 4329 /* Allocate bitmask separately due to possible resize */ 4330 ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks); 4331 4332 return (ni); 4333} 4334 4335void 4336ipfw_objhash_destroy(struct namedobj_instance *ni) 4337{ 4338 4339 free(ni->idx_mask, M_IPFW); 4340 free(ni, M_IPFW); 4341} 4342 4343void 4344ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f, 4345 objhash_cmp_f *cmp_f) 4346{ 4347 4348 ni->hash_f = hash_f; 4349 ni->cmp_f = cmp_f; 4350} 4351 4352static uint32_t 4353objhash_hash_name(struct namedobj_instance *ni, const void *name, uint32_t set) 4354{ 4355 4356 return (fnv_32_str((const char *)name, FNV1_32_INIT)); 4357} 4358 4359static int 4360objhash_cmp_name(struct named_object *no, const void *name, uint32_t set) 4361{ 4362 4363 if ((strcmp(no->name, (const char *)name) == 0) && (no->set == set)) 4364 return (0); 4365 4366 return (1); 4367} 4368 4369static uint32_t 4370objhash_hash_idx(struct namedobj_instance *ni, uint32_t val) 4371{ 4372 uint32_t v; 4373 4374 v = val % (ni->nv_size - 1); 4375 4376 return (v); 4377} 4378 4379struct named_object * 4380ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name) 4381{ 4382 struct named_object *no; 4383 uint32_t hash; 4384 4385 hash = ni->hash_f(ni, name, set) % ni->nn_size; 4386 4387 TAILQ_FOREACH(no, &ni->names[hash], nn_next) { 4388 if (ni->cmp_f(no, name, set) == 0) 4389 return (no); 4390 } 4391 4392 return (NULL); 4393} 4394 4395/* 4396 * Find named object by @uid. 4397 * Check @tlvs for valid data inside. 4398 * 4399 * Returns pointer to found TLV or NULL. 4400 */ 4401ipfw_obj_ntlv * 4402ipfw_find_name_tlv_type(void *tlvs, int len, uint16_t uidx, uint32_t etlv) 4403{ 4404 ipfw_obj_ntlv *ntlv; 4405 uintptr_t pa, pe; 4406 int l; 4407 4408 pa = (uintptr_t)tlvs; 4409 pe = pa + len; 4410 l = 0; 4411 for (; pa < pe; pa += l) { 4412 ntlv = (ipfw_obj_ntlv *)pa; 4413 l = ntlv->head.length; 4414 4415 if (l != sizeof(*ntlv)) 4416 return (NULL); 4417 4418 if (ntlv->idx != uidx) 4419 continue; 4420 /* 4421 * When userland has specified zero TLV type, do 4422 * not compare it with eltv. In some cases userland 4423 * doesn't know what type should it have. Use only 4424 * uidx and name for search named_object. 4425 */ 4426 if (ntlv->head.type != 0 && 4427 ntlv->head.type != (uint16_t)etlv) 4428 continue; 4429 4430 if (ipfw_check_object_name_generic(ntlv->name) != 0) 4431 return (NULL); 4432 4433 return (ntlv); 4434 } 4435 4436 return (NULL); 4437} 4438 4439/* 4440 * Finds object config based on either legacy index 4441 * or name in ntlv. 4442 * Note @ti structure contains unchecked data from userland. 4443 * 4444 * Returns 0 in success and fills in @pno with found config 4445 */ 4446int 4447ipfw_objhash_find_type(struct namedobj_instance *ni, struct tid_info *ti, 4448 uint32_t etlv, struct named_object **pno) 4449{ 4450 char *name; 4451 ipfw_obj_ntlv *ntlv; 4452 uint32_t set; 4453 4454 if (ti->tlvs == NULL) 4455 return (EINVAL); 4456 4457 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, etlv); 4458 if (ntlv == NULL) 4459 return (EINVAL); 4460 name = ntlv->name; 4461 4462 /* 4463 * Use set provided by @ti instead of @ntlv one. 4464 * This is needed due to different sets behavior 4465 * controlled by V_fw_tables_sets. 4466 */ 4467 set = ti->set; 4468 *pno = ipfw_objhash_lookup_name(ni, set, name); 4469 if (*pno == NULL) 4470 return (ESRCH); 4471 return (0); 4472} 4473 4474/* 4475 * Find named object by name, considering also its TLV type. 4476 */ 4477struct named_object * 4478ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set, 4479 uint32_t type, const char *name) 4480{ 4481 struct named_object *no; 4482 uint32_t hash; 4483 4484 hash = ni->hash_f(ni, name, set) % ni->nn_size; 4485 4486 TAILQ_FOREACH(no, &ni->names[hash], nn_next) { 4487 if (ni->cmp_f(no, name, set) == 0 && 4488 no->etlv == (uint16_t)type) 4489 return (no); 4490 } 4491 4492 return (NULL); 4493} 4494 4495struct named_object * 4496ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx) 4497{ 4498 struct named_object *no; 4499 uint32_t hash; 4500 4501 hash = objhash_hash_idx(ni, kidx); 4502 4503 TAILQ_FOREACH(no, &ni->values[hash], nv_next) { 4504 if (no->kidx == kidx) 4505 return (no); 4506 } 4507 4508 return (NULL); 4509} 4510 4511int 4512ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a, 4513 struct named_object *b) 4514{ 4515 4516 if ((strcmp(a->name, b->name) == 0) && a->set == b->set) 4517 return (1); 4518 4519 return (0); 4520} 4521 4522void 4523ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no) 4524{ 4525 uint32_t hash; 4526 4527 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 4528 TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next); 4529 4530 hash = objhash_hash_idx(ni, no->kidx); 4531 TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next); 4532 4533 ni->count++; 4534} 4535 4536void 4537ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no) 4538{ 4539 uint32_t hash; 4540 4541 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 4542 TAILQ_REMOVE(&ni->names[hash], no, nn_next); 4543 4544 hash = objhash_hash_idx(ni, no->kidx); 4545 TAILQ_REMOVE(&ni->values[hash], no, nv_next); 4546 4547 ni->count--; 4548} 4549 4550uint32_t 4551ipfw_objhash_count(struct namedobj_instance *ni) 4552{ 4553 4554 return (ni->count); 4555} 4556 4557uint32_t 4558ipfw_objhash_count_type(struct namedobj_instance *ni, uint16_t type) 4559{ 4560 struct named_object *no; 4561 uint32_t count; 4562 int i; 4563 4564 count = 0; 4565 for (i = 0; i < ni->nn_size; i++) { 4566 TAILQ_FOREACH(no, &ni->names[i], nn_next) { 4567 if (no->etlv == type) 4568 count++; 4569 } 4570 } 4571 return (count); 4572} 4573 4574/* 4575 * Runs @func for each found named object. 4576 * It is safe to delete objects from callback 4577 */ 4578int 4579ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg) 4580{ 4581 struct named_object *no, *no_tmp; 4582 int i, ret; 4583 4584 for (i = 0; i < ni->nn_size; i++) { 4585 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) { 4586 ret = f(ni, no, arg); 4587 if (ret != 0) 4588 return (ret); 4589 } 4590 } 4591 return (0); 4592} 4593 4594/* 4595 * Runs @f for each found named object with type @type. 4596 * It is safe to delete objects from callback 4597 */ 4598int 4599ipfw_objhash_foreach_type(struct namedobj_instance *ni, objhash_cb_t *f, 4600 void *arg, uint16_t type) 4601{ 4602 struct named_object *no, *no_tmp; 4603 int i, ret; 4604 4605 for (i = 0; i < ni->nn_size; i++) { 4606 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) { 4607 if (no->etlv != type) 4608 continue; 4609 ret = f(ni, no, arg); 4610 if (ret != 0) 4611 return (ret); 4612 } 4613 } 4614 return (0); 4615} 4616 4617/* 4618 * Removes index from given set. 4619 * Returns 0 on success. 4620 */ 4621int 4622ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx) 4623{ 4624 u_long *mask; 4625 int i, v; 4626 4627 i = idx / BLOCK_ITEMS; 4628 v = idx % BLOCK_ITEMS; 4629 4630 if (i >= ni->max_blocks) 4631 return (1); 4632 4633 mask = &ni->idx_mask[i]; 4634 4635 if ((*mask & ((u_long)1 << v)) != 0) 4636 return (1); 4637 4638 /* Mark as free */ 4639 *mask |= (u_long)1 << v; 4640 4641 /* Update free offset */ 4642 if (ni->free_off[0] > i) 4643 ni->free_off[0] = i; 4644 4645 return (0); 4646} 4647 4648/* 4649 * Allocate new index in given instance and stores in in @pidx. 4650 * Returns 0 on success. 4651 */ 4652int 4653ipfw_objhash_alloc_idx(void *n, uint16_t *pidx) 4654{ 4655 struct namedobj_instance *ni; 4656 u_long *mask; 4657 int i, off, v; 4658 4659 ni = (struct namedobj_instance *)n; 4660 4661 off = ni->free_off[0]; 4662 mask = &ni->idx_mask[off]; 4663 4664 for (i = off; i < ni->max_blocks; i++, mask++) { 4665 if ((v = ffsl(*mask)) == 0) 4666 continue; 4667 4668 /* Mark as busy */ 4669 *mask &= ~ ((u_long)1 << (v - 1)); 4670 4671 ni->free_off[0] = i; 4672 4673 v = BLOCK_ITEMS * i + v - 1; 4674 4675 *pidx = v; 4676 return (0); 4677 } 4678 4679 return (1); 4680} 4681 4682/* end of file */ 4683