Deleted Added
full compact
ip_fw_sockopt.c (290330) ip_fw_sockopt.c (290332)
1/*-
2 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
3 * Copyright (c) 2014 Yandex LLC
4 * Copyright (c) 2014 Alexander V. Chernikov
5 *
6 * Supported by: Valeria Paoli
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
3 * Copyright (c) 2014 Yandex LLC
4 * Copyright (c) 2014 Alexander V. Chernikov
5 *
6 * Supported by: Valeria Paoli
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/netpfil/ipfw/ip_fw_sockopt.c 290330 2015-11-03 10:21:53Z ae $");
31__FBSDID("$FreeBSD: head/sys/netpfil/ipfw/ip_fw_sockopt.c 290332 2015-11-03 10:29:46Z ae $");
32
33/*
34 * Control socket and rule management routines for ipfw.
35 * Control is currently implemented via IP_FW3 setsockopt() code.
36 */
37
38#include "opt_ipfw.h"
39#include "opt_inet.h"
40#ifndef INET
41#error IPFIREWALL requires INET.
42#endif /* INET */
43#include "opt_inet6.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h> /* struct m_tag used by nested headers */
49#include <sys/kernel.h>
50#include <sys/lock.h>
51#include <sys/priv.h>
52#include <sys/proc.h>
53#include <sys/rwlock.h>
54#include <sys/rmlock.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57#include <sys/sysctl.h>
58#include <sys/syslog.h>
59#include <sys/fnv_hash.h>
60#include <net/if.h>
61#include <net/route.h>
62#include <net/vnet.h>
63#include <vm/vm.h>
64#include <vm/vm_extern.h>
65
66#include <netinet/in.h>
67#include <netinet/ip_var.h> /* hooks */
68#include <netinet/ip_fw.h>
69
70#include <netpfil/ipfw/ip_fw_private.h>
71#include <netpfil/ipfw/ip_fw_table.h>
72
73#ifdef MAC
74#include <security/mac/mac_framework.h>
75#endif
76
77static int ipfw_ctl(struct sockopt *sopt);
78static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len,
79 struct rule_check_info *ci);
80static int check_ipfw_rule1(struct ip_fw_rule *rule, int size,
81 struct rule_check_info *ci);
82static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
83 struct rule_check_info *ci);
84
85#define NAMEDOBJ_HASH_SIZE 32
86
87struct namedobj_instance {
88 struct namedobjects_head *names;
89 struct namedobjects_head *values;
90 uint32_t nn_size; /* names hash size */
91 uint32_t nv_size; /* number hash size */
92 u_long *idx_mask; /* used items bitmask */
93 uint32_t max_blocks; /* number of "long" blocks in bitmask */
94 uint32_t count; /* number of items */
95 uint16_t free_off[IPFW_MAX_SETS]; /* first possible free offset */
96 objhash_hash_f *hash_f;
97 objhash_cmp_f *cmp_f;
98};
99#define BLOCK_ITEMS (8 * sizeof(u_long)) /* Number of items for ffsl() */
100
101static uint32_t objhash_hash_name(struct namedobj_instance *ni, void *key,
102 uint32_t kopt);
103static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val);
104static int objhash_cmp_name(struct named_object *no, void *name, uint32_t set);
105
106MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
107
108static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
109 struct sockopt_data *sd);
110static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
111 struct sockopt_data *sd);
112static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
113 struct sockopt_data *sd);
114static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
115 struct sockopt_data *sd);
116static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
117 struct sockopt_data *sd);
118static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
119 struct sockopt_data *sd);
120static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
121 struct sockopt_data *sd);
122static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
123 struct sockopt_data *sd);
124
125/* ctl3 handler data */
126struct mtx ctl3_lock;
127#define CTL3_LOCK_INIT() mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF)
128#define CTL3_LOCK_DESTROY() mtx_destroy(&ctl3_lock)
129#define CTL3_LOCK() mtx_lock(&ctl3_lock)
130#define CTL3_UNLOCK() mtx_unlock(&ctl3_lock)
131
132static struct ipfw_sopt_handler *ctl3_handlers;
133static size_t ctl3_hsize;
134static uint64_t ctl3_refct, ctl3_gencnt;
135#define CTL3_SMALLBUF 4096 /* small page-size write buffer */
136#define CTL3_LARGEBUF 16 * 1024 * 1024 /* handle large rulesets */
137
138static int ipfw_flush_sopt_data(struct sockopt_data *sd);
139
140static struct ipfw_sopt_handler scodes[] = {
141 { IP_FW_XGET, 0, HDIR_GET, dump_config },
142 { IP_FW_XADD, 0, HDIR_BOTH, add_rules },
143 { IP_FW_XDEL, 0, HDIR_BOTH, del_rules },
144 { IP_FW_XZERO, 0, HDIR_SET, clear_rules },
145 { IP_FW_XRESETLOG, 0, HDIR_SET, clear_rules },
146 { IP_FW_XMOVE, 0, HDIR_SET, move_rules },
147 { IP_FW_SET_SWAP, 0, HDIR_SET, manage_sets },
148 { IP_FW_SET_MOVE, 0, HDIR_SET, manage_sets },
149 { IP_FW_SET_ENABLE, 0, HDIR_SET, manage_sets },
150 { IP_FW_DUMP_SOPTCODES, 0, HDIR_GET, dump_soptcodes },
151 { IP_FW_DUMP_SRVOBJECTS,0, HDIR_GET, dump_srvobjects },
152};
153
154static int
155set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule);
156struct opcode_obj_rewrite *ipfw_find_op_rw(uint16_t opcode);
157static int mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule,
158 uint32_t *bmask);
159static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule);
160static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
161 struct sockopt_data *sd);
162
163/*
164 * Opcode object rewriter variables
165 */
166struct opcode_obj_rewrite *ctl3_rewriters;
167static size_t ctl3_rsize;
168
169/*
170 * static variables followed by global ones
171 */
172
173static VNET_DEFINE(uma_zone_t, ipfw_cntr_zone);
174#define V_ipfw_cntr_zone VNET(ipfw_cntr_zone)
175
176void
177ipfw_init_counters()
178{
179
180 V_ipfw_cntr_zone = uma_zcreate("IPFW counters",
181 IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL,
182 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
183}
184
185void
186ipfw_destroy_counters()
187{
188
189 uma_zdestroy(V_ipfw_cntr_zone);
190}
191
192struct ip_fw *
193ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize)
194{
195 struct ip_fw *rule;
196
197 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO);
198 rule->cntr = uma_zalloc(V_ipfw_cntr_zone, M_WAITOK | M_ZERO);
199
200 return (rule);
201}
202
203static void
204free_rule(struct ip_fw *rule)
205{
206
207 uma_zfree(V_ipfw_cntr_zone, rule->cntr);
208 free(rule, M_IPFW);
209}
210
211
212/*
213 * Find the smallest rule >= key, id.
214 * We could use bsearch but it is so simple that we code it directly
215 */
216int
217ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id)
218{
219 int i, lo, hi;
220 struct ip_fw *r;
221
222 for (lo = 0, hi = chain->n_rules - 1; lo < hi;) {
223 i = (lo + hi) / 2;
224 r = chain->map[i];
225 if (r->rulenum < key)
226 lo = i + 1; /* continue from the next one */
227 else if (r->rulenum > key)
228 hi = i; /* this might be good */
229 else if (r->id < id)
230 lo = i + 1; /* continue from the next one */
231 else /* r->id >= id */
232 hi = i; /* this might be good */
233 };
234 return hi;
235}
236
237/*
238 * Builds skipto cache on rule set @map.
239 */
240static void
241update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map)
242{
243 int *smap, rulenum;
244 int i, mi;
245
246 IPFW_UH_WLOCK_ASSERT(chain);
247
248 mi = 0;
249 rulenum = map[mi]->rulenum;
250 smap = chain->idxmap_back;
251
252 if (smap == NULL)
253 return;
254
255 for (i = 0; i < 65536; i++) {
256 smap[i] = mi;
257 /* Use the same rule index until i < rulenum */
258 if (i != rulenum || i == 65535)
259 continue;
260 /* Find next rule with num > i */
261 rulenum = map[++mi]->rulenum;
262 while (rulenum == i)
263 rulenum = map[++mi]->rulenum;
264 }
265}
266
267/*
268 * Swaps prepared (backup) index with current one.
269 */
270static void
271swap_skipto_cache(struct ip_fw_chain *chain)
272{
273 int *map;
274
275 IPFW_UH_WLOCK_ASSERT(chain);
276 IPFW_WLOCK_ASSERT(chain);
277
278 map = chain->idxmap;
279 chain->idxmap = chain->idxmap_back;
280 chain->idxmap_back = map;
281}
282
283/*
284 * Allocate and initialize skipto cache.
285 */
286void
287ipfw_init_skipto_cache(struct ip_fw_chain *chain)
288{
289 int *idxmap, *idxmap_back;
290
291 idxmap = malloc(65536 * sizeof(uint32_t *), M_IPFW,
292 M_WAITOK | M_ZERO);
293 idxmap_back = malloc(65536 * sizeof(uint32_t *), M_IPFW,
294 M_WAITOK | M_ZERO);
295
296 /*
297 * Note we may be called at any time after initialization,
298 * for example, on first skipto rule, so we need to
299 * provide valid chain->idxmap on return
300 */
301
302 IPFW_UH_WLOCK(chain);
303 if (chain->idxmap != NULL) {
304 IPFW_UH_WUNLOCK(chain);
305 free(idxmap, M_IPFW);
306 free(idxmap_back, M_IPFW);
307 return;
308 }
309
310 /* Set backup pointer first to permit building cache */
311 chain->idxmap_back = idxmap_back;
312 update_skipto_cache(chain, chain->map);
313 IPFW_WLOCK(chain);
314 /* It is now safe to set chain->idxmap ptr */
315 chain->idxmap = idxmap;
316 swap_skipto_cache(chain);
317 IPFW_WUNLOCK(chain);
318 IPFW_UH_WUNLOCK(chain);
319}
320
321/*
322 * Destroys skipto cache.
323 */
324void
325ipfw_destroy_skipto_cache(struct ip_fw_chain *chain)
326{
327
328 if (chain->idxmap != NULL)
329 free(chain->idxmap, M_IPFW);
330 if (chain->idxmap != NULL)
331 free(chain->idxmap_back, M_IPFW);
332}
333
334
335/*
336 * allocate a new map, returns the chain locked. extra is the number
337 * of entries to add or delete.
338 */
339static struct ip_fw **
340get_map(struct ip_fw_chain *chain, int extra, int locked)
341{
342
343 for (;;) {
344 struct ip_fw **map;
345 int i, mflags;
346
347 mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK);
348
349 i = chain->n_rules + extra;
350 map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags);
351 if (map == NULL) {
352 printf("%s: cannot allocate map\n", __FUNCTION__);
353 return NULL;
354 }
355 if (!locked)
356 IPFW_UH_WLOCK(chain);
357 if (i >= chain->n_rules + extra) /* good */
358 return map;
359 /* otherwise we lost the race, free and retry */
360 if (!locked)
361 IPFW_UH_WUNLOCK(chain);
362 free(map, M_IPFW);
363 }
364}
365
366/*
367 * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
368 */
369static struct ip_fw **
370swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
371{
372 struct ip_fw **old_map;
373
374 IPFW_WLOCK(chain);
375 chain->id++;
376 chain->n_rules = new_len;
377 old_map = chain->map;
378 chain->map = new_map;
379 swap_skipto_cache(chain);
380 IPFW_WUNLOCK(chain);
381 return old_map;
382}
383
384
385static void
386export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr)
387{
388
389 cntr->size = sizeof(*cntr);
390
391 if (krule->cntr != NULL) {
392 cntr->pcnt = counter_u64_fetch(krule->cntr);
393 cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
394 cntr->timestamp = krule->timestamp;
395 }
396 if (cntr->timestamp > 0)
397 cntr->timestamp += boottime.tv_sec;
398}
399
400static void
401export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr)
402{
403
404 if (krule->cntr != NULL) {
405 cntr->pcnt = counter_u64_fetch(krule->cntr);
406 cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
407 cntr->timestamp = krule->timestamp;
408 }
409 if (cntr->timestamp > 0)
410 cntr->timestamp += boottime.tv_sec;
411}
412
413/*
414 * Copies rule @urule from v1 userland format (current).
415 * to kernel @krule.
416 * Assume @krule is zeroed.
417 */
418static void
419import_rule1(struct rule_check_info *ci)
420{
421 struct ip_fw_rule *urule;
422 struct ip_fw *krule;
423
424 urule = (struct ip_fw_rule *)ci->urule;
425 krule = (struct ip_fw *)ci->krule;
426
427 /* copy header */
428 krule->act_ofs = urule->act_ofs;
429 krule->cmd_len = urule->cmd_len;
430 krule->rulenum = urule->rulenum;
431 krule->set = urule->set;
432 krule->flags = urule->flags;
433
434 /* Save rulenum offset */
435 ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum);
436
437 /* Copy opcodes */
438 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
439}
440
441/*
442 * Export rule into v1 format (Current).
443 * Layout:
444 * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT)
445 * [ ip_fw_rule ] OR
446 * [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs).
447 * ]
448 * Assume @data is zeroed.
449 */
450static void
451export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs)
452{
453 struct ip_fw_bcounter *cntr;
454 struct ip_fw_rule *urule;
455 ipfw_obj_tlv *tlv;
456
457 /* Fill in TLV header */
458 tlv = (ipfw_obj_tlv *)data;
459 tlv->type = IPFW_TLV_RULE_ENT;
460 tlv->length = len;
461
462 if (rcntrs != 0) {
463 /* Copy counters */
464 cntr = (struct ip_fw_bcounter *)(tlv + 1);
465 urule = (struct ip_fw_rule *)(cntr + 1);
466 export_cntr1_base(krule, cntr);
467 } else
468 urule = (struct ip_fw_rule *)(tlv + 1);
469
470 /* copy header */
471 urule->act_ofs = krule->act_ofs;
472 urule->cmd_len = krule->cmd_len;
473 urule->rulenum = krule->rulenum;
474 urule->set = krule->set;
475 urule->flags = krule->flags;
476 urule->id = krule->id;
477
478 /* Copy opcodes */
479 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
480}
481
482
483/*
484 * Copies rule @urule from FreeBSD8 userland format (v0)
485 * to kernel @krule.
486 * Assume @krule is zeroed.
487 */
488static void
489import_rule0(struct rule_check_info *ci)
490{
491 struct ip_fw_rule0 *urule;
492 struct ip_fw *krule;
493 int cmdlen, l;
494 ipfw_insn *cmd;
495 ipfw_insn_limit *lcmd;
496 ipfw_insn_if *cmdif;
497
498 urule = (struct ip_fw_rule0 *)ci->urule;
499 krule = (struct ip_fw *)ci->krule;
500
501 /* copy header */
502 krule->act_ofs = urule->act_ofs;
503 krule->cmd_len = urule->cmd_len;
504 krule->rulenum = urule->rulenum;
505 krule->set = urule->set;
506 if ((urule->_pad & 1) != 0)
507 krule->flags |= IPFW_RULE_NOOPT;
508
509 /* Save rulenum offset */
510 ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum);
511
512 /* Copy opcodes */
513 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
514
515 /*
516 * Alter opcodes:
517 * 1) convert tablearg value from 65335 to 0
518 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room for targ).
519 * 3) convert table number in iface opcodes to u16
520 */
521 l = krule->cmd_len;
522 cmd = krule->cmd;
523 cmdlen = 0;
524
525 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
526 cmdlen = F_LEN(cmd);
527
528 switch (cmd->opcode) {
529 /* Opcodes supporting tablearg */
530 case O_TAG:
531 case O_TAGGED:
532 case O_PIPE:
533 case O_QUEUE:
534 case O_DIVERT:
535 case O_TEE:
536 case O_SKIPTO:
537 case O_CALLRETURN:
538 case O_NETGRAPH:
539 case O_NGTEE:
540 case O_NAT:
541 if (cmd->arg1 == 65535)
542 cmd->arg1 = IP_FW_TARG;
543 break;
544 case O_SETFIB:
545 case O_SETDSCP:
546 if (cmd->arg1 == 65535)
547 cmd->arg1 = IP_FW_TARG;
548 else
549 cmd->arg1 |= 0x8000;
550 break;
551 case O_LIMIT:
552 lcmd = (ipfw_insn_limit *)cmd;
553 if (lcmd->conn_limit == 65535)
554 lcmd->conn_limit = IP_FW_TARG;
555 break;
556 /* Interface tables */
557 case O_XMIT:
558 case O_RECV:
559 case O_VIA:
560 /* Interface table, possibly */
561 cmdif = (ipfw_insn_if *)cmd;
562 if (cmdif->name[0] != '\1')
563 break;
564
565 cmdif->p.kidx = (uint16_t)cmdif->p.glob;
566 break;
567 }
568 }
569}
570
571/*
572 * Copies rule @krule from kernel to FreeBSD8 userland format (v0)
573 */
574static void
575export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len)
576{
577 int cmdlen, l;
578 ipfw_insn *cmd;
579 ipfw_insn_limit *lcmd;
580 ipfw_insn_if *cmdif;
581
582 /* copy header */
583 memset(urule, 0, len);
584 urule->act_ofs = krule->act_ofs;
585 urule->cmd_len = krule->cmd_len;
586 urule->rulenum = krule->rulenum;
587 urule->set = krule->set;
588 if ((krule->flags & IPFW_RULE_NOOPT) != 0)
589 urule->_pad |= 1;
590
591 /* Copy opcodes */
592 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
593
594 /* Export counters */
595 export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt);
596
597 /*
598 * Alter opcodes:
599 * 1) convert tablearg value from 0 to 65335
600 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values.
601 * 3) convert table number in iface opcodes to int
602 */
603 l = urule->cmd_len;
604 cmd = urule->cmd;
605 cmdlen = 0;
606
607 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
608 cmdlen = F_LEN(cmd);
609
610 switch (cmd->opcode) {
611 /* Opcodes supporting tablearg */
612 case O_TAG:
613 case O_TAGGED:
614 case O_PIPE:
615 case O_QUEUE:
616 case O_DIVERT:
617 case O_TEE:
618 case O_SKIPTO:
619 case O_CALLRETURN:
620 case O_NETGRAPH:
621 case O_NGTEE:
622 case O_NAT:
623 if (cmd->arg1 == IP_FW_TARG)
624 cmd->arg1 = 65535;
625 break;
626 case O_SETFIB:
627 case O_SETDSCP:
628 if (cmd->arg1 == IP_FW_TARG)
629 cmd->arg1 = 65535;
630 else
631 cmd->arg1 &= ~0x8000;
632 break;
633 case O_LIMIT:
634 lcmd = (ipfw_insn_limit *)cmd;
635 if (lcmd->conn_limit == IP_FW_TARG)
636 lcmd->conn_limit = 65535;
637 break;
638 /* Interface tables */
639 case O_XMIT:
640 case O_RECV:
641 case O_VIA:
642 /* Interface table, possibly */
643 cmdif = (ipfw_insn_if *)cmd;
644 if (cmdif->name[0] != '\1')
645 break;
646
647 cmdif->p.glob = cmdif->p.kidx;
648 break;
649 }
650 }
651}
652
653/*
654 * Add new rule(s) to the list possibly creating rule number for each.
655 * Update the rule_number in the input struct so the caller knows it as well.
656 * Must be called without IPFW_UH held
657 */
658static int
659commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count)
660{
661 int error, i, insert_before, tcount;
662 uint16_t rulenum, *pnum;
663 struct rule_check_info *ci;
664 struct ip_fw *krule;
665 struct ip_fw **map; /* the new array of pointers */
666
667 /* Check if we need to do table/obj index remap */
668 tcount = 0;
669 for (ci = rci, i = 0; i < count; ci++, i++) {
670 if (ci->object_opcodes == 0)
671 continue;
672
673 /*
674 * Rule has some object opcodes.
675 * We need to find (and create non-existing)
676 * kernel objects, and reference existing ones.
677 */
678 error = ipfw_rewrite_rule_uidx(chain, ci);
679 if (error != 0) {
680
681 /*
682 * rewrite failed, state for current rule
683 * has been reverted. Check if we need to
684 * revert more.
685 */
686 if (tcount > 0) {
687
688 /*
689 * We have some more table rules
690 * we need to rollback.
691 */
692
693 IPFW_UH_WLOCK(chain);
694 while (ci != rci) {
695 ci--;
696 if (ci->object_opcodes == 0)
697 continue;
698 unref_rule_objects(chain,ci->krule);
699
700 }
701 IPFW_UH_WUNLOCK(chain);
702
703 }
704
705 return (error);
706 }
707
708 tcount++;
709 }
710
711 /* get_map returns with IPFW_UH_WLOCK if successful */
712 map = get_map(chain, count, 0 /* not locked */);
713 if (map == NULL) {
714 if (tcount > 0) {
715 /* Unbind tables */
716 IPFW_UH_WLOCK(chain);
717 for (ci = rci, i = 0; i < count; ci++, i++) {
718 if (ci->object_opcodes == 0)
719 continue;
720
721 unref_rule_objects(chain, ci->krule);
722 }
723 IPFW_UH_WUNLOCK(chain);
724 }
725
726 return (ENOSPC);
727 }
728
729 if (V_autoinc_step < 1)
730 V_autoinc_step = 1;
731 else if (V_autoinc_step > 1000)
732 V_autoinc_step = 1000;
733
734 /* FIXME: Handle count > 1 */
735 ci = rci;
736 krule = ci->krule;
737 rulenum = krule->rulenum;
738
739 /* find the insertion point, we will insert before */
740 insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE;
741 i = ipfw_find_rule(chain, insert_before, 0);
742 /* duplicate first part */
743 if (i > 0)
744 bcopy(chain->map, map, i * sizeof(struct ip_fw *));
745 map[i] = krule;
746 /* duplicate remaining part, we always have the default rule */
747 bcopy(chain->map + i, map + i + 1,
748 sizeof(struct ip_fw *) *(chain->n_rules - i));
749 if (rulenum == 0) {
750 /* Compute rule number and write it back */
751 rulenum = i > 0 ? map[i-1]->rulenum : 0;
752 if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
753 rulenum += V_autoinc_step;
754 krule->rulenum = rulenum;
755 /* Save number to userland rule */
756 pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff);
757 *pnum = rulenum;
758 }
759
760 krule->id = chain->id + 1;
761 update_skipto_cache(chain, map);
762 map = swap_map(chain, map, chain->n_rules + 1);
763 chain->static_len += RULEUSIZE0(krule);
764 IPFW_UH_WUNLOCK(chain);
765 if (map)
766 free(map, M_IPFW);
767 return (0);
768}
769
770/*
771 * Adds @rule to the list of rules to reap
772 */
773void
774ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head,
775 struct ip_fw *rule)
776{
777
778 IPFW_UH_WLOCK_ASSERT(chain);
779
780 /* Unlink rule from everywhere */
781 unref_rule_objects(chain, rule);
782
783 *((struct ip_fw **)rule) = *head;
784 *head = rule;
785}
786
787/*
788 * Reclaim storage associated with a list of rules. This is
789 * typically the list created using remove_rule.
790 * A NULL pointer on input is handled correctly.
791 */
792void
793ipfw_reap_rules(struct ip_fw *head)
794{
795 struct ip_fw *rule;
796
797 while ((rule = head) != NULL) {
798 head = *((struct ip_fw **)head);
799 free_rule(rule);
800 }
801}
802
803/*
804 * Rules to keep are
805 * (default || reserved || !match_set || !match_number)
806 * where
807 * default ::= (rule->rulenum == IPFW_DEFAULT_RULE)
808 * // the default rule is always protected
809 *
810 * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET)
811 * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush")
812 *
813 * match_set ::= (cmd == 0 || rule->set == set)
814 * // set number is ignored for cmd == 0
815 *
816 * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum)
817 * // number is ignored for cmd == 1 or n == 0
818 *
819 */
820int
821ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt)
822{
823
824 /* Don't match default rule for modification queries */
825 if (rule->rulenum == IPFW_DEFAULT_RULE &&
826 (rt->flags & IPFW_RCFLAG_DEFAULT) == 0)
827 return (0);
828
829 /* Don't match rules in reserved set for flush requests */
830 if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET)
831 return (0);
832
833 /* If we're filtering by set, don't match other sets */
834 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set)
835 return (0);
836
837 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 &&
838 (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule))
839 return (0);
840
841 return (1);
842}
843
844/*
845 * Delete rules matching range @rt.
846 * Saves number of deleted rules in @ndel.
847 *
848 * Returns 0 on success.
849 */
850static int
851delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel)
852{
853 struct ip_fw *reap, *rule, **map;
854 int end, start;
855 int i, n, ndyn, ofs;
856
857 reap = NULL;
858 IPFW_UH_WLOCK(chain); /* arbitrate writers */
859
860 /*
861 * Stage 1: Determine range to inspect.
862 * Range is half-inclusive, e.g [start, end).
863 */
864 start = 0;
865 end = chain->n_rules - 1;
866
867 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) {
868 start = ipfw_find_rule(chain, rt->start_rule, 0);
869
870 end = ipfw_find_rule(chain, rt->end_rule, 0);
871 if (rt->end_rule != IPFW_DEFAULT_RULE)
872 while (chain->map[end]->rulenum == rt->end_rule)
873 end++;
874 }
875
876 /* Allocate new map of the same size */
877 map = get_map(chain, 0, 1 /* locked */);
878 if (map == NULL) {
879 IPFW_UH_WUNLOCK(chain);
880 return (ENOMEM);
881 }
882
883 n = 0;
884 ndyn = 0;
885 ofs = start;
886 /* 1. bcopy the initial part of the map */
887 if (start > 0)
888 bcopy(chain->map, map, start * sizeof(struct ip_fw *));
889 /* 2. copy active rules between start and end */
890 for (i = start; i < end; i++) {
891 rule = chain->map[i];
892 if (ipfw_match_range(rule, rt) == 0) {
893 map[ofs++] = rule;
894 continue;
895 }
896
897 n++;
898 if (ipfw_is_dyn_rule(rule) != 0)
899 ndyn++;
900 }
901 /* 3. copy the final part of the map */
902 bcopy(chain->map + end, map + ofs,
903 (chain->n_rules - end) * sizeof(struct ip_fw *));
904 /* 4. recalculate skipto cache */
905 update_skipto_cache(chain, map);
906 /* 5. swap the maps (under UH_WLOCK + WHLOCK) */
907 map = swap_map(chain, map, chain->n_rules - n);
908 /* 6. Remove all dynamic states originated by deleted rules */
909 if (ndyn > 0)
910 ipfw_expire_dyn_rules(chain, rt);
911 /* 7. now remove the rules deleted from the old map */
912 for (i = start; i < end; i++) {
913 rule = map[i];
914 if (ipfw_match_range(rule, rt) == 0)
915 continue;
916 chain->static_len -= RULEUSIZE0(rule);
917 ipfw_reap_add(chain, &reap, rule);
918 }
919 IPFW_UH_WUNLOCK(chain);
920
921 ipfw_reap_rules(reap);
922 if (map != NULL)
923 free(map, M_IPFW);
924 *ndel = n;
925 return (0);
926}
927
928/*
929 * Changes set of given rule rannge @rt
930 * with each other.
931 *
932 * Returns 0 on success.
933 */
934static int
935move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
936{
937 struct ip_fw *rule;
938 int i;
939
940 IPFW_UH_WLOCK(chain);
941
942 /*
943 * Move rules with matching paramenerts to a new set.
944 * This one is much more complex. We have to ensure
945 * that all referenced tables (if any) are referenced
946 * by given rule subset only. Otherwise, we can't move
947 * them to new set and have to return error.
948 */
949 if (V_fw_tables_sets != 0) {
950 if (ipfw_move_tables_sets(chain, rt, rt->new_set) != 0) {
951 IPFW_UH_WUNLOCK(chain);
952 return (EBUSY);
953 }
954 }
955
956 /* XXX: We have to do swap holding WLOCK */
957 for (i = 0; i < chain->n_rules; i++) {
958 rule = chain->map[i];
959 if (ipfw_match_range(rule, rt) == 0)
960 continue;
961 rule->set = rt->new_set;
962 }
963
964 IPFW_UH_WUNLOCK(chain);
965
966 return (0);
967}
968
969/*
970 * Clear counters for a specific rule.
971 * Normally run under IPFW_UH_RLOCK, but these are idempotent ops
972 * so we only care that rules do not disappear.
973 */
974static void
975clear_counters(struct ip_fw *rule, int log_only)
976{
977 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
978
979 if (log_only == 0)
980 IPFW_ZERO_RULE_COUNTER(rule);
981 if (l->o.opcode == O_LOG)
982 l->log_left = l->max_log;
983}
984
985/*
986 * Flushes rules counters and/or log values on matching range.
987 *
988 * Returns number of items cleared.
989 */
990static int
991clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only)
992{
993 struct ip_fw *rule;
994 int num;
995 int i;
996
997 num = 0;
998 rt->flags |= IPFW_RCFLAG_DEFAULT;
999
1000 IPFW_UH_WLOCK(chain); /* arbitrate writers */
1001 for (i = 0; i < chain->n_rules; i++) {
1002 rule = chain->map[i];
1003 if (ipfw_match_range(rule, rt) == 0)
1004 continue;
1005 clear_counters(rule, log_only);
1006 num++;
1007 }
1008 IPFW_UH_WUNLOCK(chain);
1009
1010 return (num);
1011}
1012
1013static int
1014check_range_tlv(ipfw_range_tlv *rt)
1015{
1016
1017 if (rt->head.length != sizeof(*rt))
1018 return (1);
1019 if (rt->start_rule > rt->end_rule)
1020 return (1);
1021 if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS)
1022 return (1);
1023
1024 if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags)
1025 return (1);
1026
1027 return (0);
1028}
1029
1030/*
1031 * Delete rules matching specified parameters
1032 * Data layout (v0)(current):
1033 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1034 * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1035 *
1036 * Saves number of deleted rules in ipfw_range_tlv->new_set.
1037 *
1038 * Returns 0 on success.
1039 */
1040static int
1041del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1042 struct sockopt_data *sd)
1043{
1044 ipfw_range_header *rh;
1045 int error, ndel;
1046
1047 if (sd->valsize != sizeof(*rh))
1048 return (EINVAL);
1049
1050 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1051
1052 if (check_range_tlv(&rh->range) != 0)
1053 return (EINVAL);
1054
1055 ndel = 0;
1056 if ((error = delete_range(chain, &rh->range, &ndel)) != 0)
1057 return (error);
1058
1059 /* Save number of rules deleted */
1060 rh->range.new_set = ndel;
1061 return (0);
1062}
1063
1064/*
1065 * Move rules/sets matching specified parameters
1066 * Data layout (v0)(current):
1067 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1068 *
1069 * Returns 0 on success.
1070 */
1071static int
1072move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1073 struct sockopt_data *sd)
1074{
1075 ipfw_range_header *rh;
1076
1077 if (sd->valsize != sizeof(*rh))
1078 return (EINVAL);
1079
1080 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1081
1082 if (check_range_tlv(&rh->range) != 0)
1083 return (EINVAL);
1084
1085 return (move_range(chain, &rh->range));
1086}
1087
1088/*
1089 * Clear rule accounting data matching specified parameters
1090 * Data layout (v0)(current):
1091 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1092 * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1093 *
1094 * Saves number of cleared rules in ipfw_range_tlv->new_set.
1095 *
1096 * Returns 0 on success.
1097 */
1098static int
1099clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1100 struct sockopt_data *sd)
1101{
1102 ipfw_range_header *rh;
1103 int log_only, num;
1104 char *msg;
1105
1106 if (sd->valsize != sizeof(*rh))
1107 return (EINVAL);
1108
1109 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1110
1111 if (check_range_tlv(&rh->range) != 0)
1112 return (EINVAL);
1113
1114 log_only = (op3->opcode == IP_FW_XRESETLOG);
1115
1116 num = clear_range(chain, &rh->range, log_only);
1117
1118 if (rh->range.flags & IPFW_RCFLAG_ALL)
1119 msg = log_only ? "All logging counts reset" :
1120 "Accounting cleared";
1121 else
1122 msg = log_only ? "logging count reset" : "cleared";
1123
1124 if (V_fw_verbose) {
1125 int lev = LOG_SECURITY | LOG_NOTICE;
1126 log(lev, "ipfw: %s.\n", msg);
1127 }
1128
1129 /* Save number of rules cleared */
1130 rh->range.new_set = num;
1131 return (0);
1132}
1133
1134static void
1135enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1136{
1137 uint32_t v_set;
1138
1139 IPFW_UH_WLOCK_ASSERT(chain);
1140
1141 /* Change enabled/disabled sets mask */
1142 v_set = (V_set_disable | rt->set) & ~rt->new_set;
1143 v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */
1144 IPFW_WLOCK(chain);
1145 V_set_disable = v_set;
1146 IPFW_WUNLOCK(chain);
1147}
1148
1149static void
1150swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv)
1151{
1152 struct ip_fw *rule;
1153 int i;
1154
1155 IPFW_UH_WLOCK_ASSERT(chain);
1156
1157 /* Swap or move two sets */
1158 for (i = 0; i < chain->n_rules - 1; i++) {
1159 rule = chain->map[i];
1160 if (rule->set == rt->set)
1161 rule->set = rt->new_set;
1162 else if (rule->set == rt->new_set && mv == 0)
1163 rule->set = rt->set;
1164 }
1165 if (V_fw_tables_sets != 0)
1166 ipfw_swap_tables_sets(chain, rt->set, rt->new_set, mv);
1167}
1168
1169/*
1170 * Swaps or moves set
1171 * Data layout (v0)(current):
1172 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1173 *
1174 * Returns 0 on success.
1175 */
1176static int
1177manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1178 struct sockopt_data *sd)
1179{
1180 ipfw_range_header *rh;
1181
1182 if (sd->valsize != sizeof(*rh))
1183 return (EINVAL);
1184
1185 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1186
1187 if (rh->range.head.length != sizeof(ipfw_range_tlv))
1188 return (1);
1189
1190 IPFW_UH_WLOCK(chain);
1191 switch (op3->opcode) {
1192 case IP_FW_SET_SWAP:
1193 case IP_FW_SET_MOVE:
1194 swap_sets(chain, &rh->range, op3->opcode == IP_FW_SET_MOVE);
1195 break;
1196 case IP_FW_SET_ENABLE:
1197 enable_sets(chain, &rh->range);
1198 break;
1199 }
1200 IPFW_UH_WUNLOCK(chain);
1201
1202 return (0);
1203}
1204
1205/**
1206 * Remove all rules with given number, or do set manipulation.
1207 * Assumes chain != NULL && *chain != NULL.
1208 *
1209 * The argument is an uint32_t. The low 16 bit are the rule or set number;
1210 * the next 8 bits are the new set; the top 8 bits indicate the command:
1211 *
1212 * 0 delete rules numbered "rulenum"
1213 * 1 delete rules in set "rulenum"
1214 * 2 move rules "rulenum" to set "new_set"
1215 * 3 move rules from set "rulenum" to set "new_set"
1216 * 4 swap sets "rulenum" and "new_set"
1217 * 5 delete rules "rulenum" and set "new_set"
1218 */
1219static int
1220del_entry(struct ip_fw_chain *chain, uint32_t arg)
1221{
1222 uint32_t num; /* rule number or old_set */
1223 uint8_t cmd, new_set;
1224 int do_del, ndel;
1225 int error = 0;
1226 ipfw_range_tlv rt;
1227
1228 num = arg & 0xffff;
1229 cmd = (arg >> 24) & 0xff;
1230 new_set = (arg >> 16) & 0xff;
1231
1232 if (cmd > 5 || new_set > RESVD_SET)
1233 return EINVAL;
1234 if (cmd == 0 || cmd == 2 || cmd == 5) {
1235 if (num >= IPFW_DEFAULT_RULE)
1236 return EINVAL;
1237 } else {
1238 if (num > RESVD_SET) /* old_set */
1239 return EINVAL;
1240 }
1241
1242 /* Convert old requests into new representation */
1243 memset(&rt, 0, sizeof(rt));
1244 rt.start_rule = num;
1245 rt.end_rule = num;
1246 rt.set = num;
1247 rt.new_set = new_set;
1248 do_del = 0;
1249
1250 switch (cmd) {
1251 case 0: /* delete rules numbered "rulenum" */
1252 if (num == 0)
1253 rt.flags |= IPFW_RCFLAG_ALL;
1254 else
1255 rt.flags |= IPFW_RCFLAG_RANGE;
1256 do_del = 1;
1257 break;
1258 case 1: /* delete rules in set "rulenum" */
1259 rt.flags |= IPFW_RCFLAG_SET;
1260 do_del = 1;
1261 break;
1262 case 5: /* delete rules "rulenum" and set "new_set" */
1263 rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET;
1264 rt.set = new_set;
1265 rt.new_set = 0;
1266 do_del = 1;
1267 break;
1268 case 2: /* move rules "rulenum" to set "new_set" */
1269 rt.flags |= IPFW_RCFLAG_RANGE;
1270 break;
1271 case 3: /* move rules from set "rulenum" to set "new_set" */
1272 IPFW_UH_WLOCK(chain);
1273 swap_sets(chain, &rt, 1);
1274 IPFW_UH_WUNLOCK(chain);
1275 return (0);
1276 case 4: /* swap sets "rulenum" and "new_set" */
1277 IPFW_UH_WLOCK(chain);
1278 swap_sets(chain, &rt, 0);
1279 IPFW_UH_WUNLOCK(chain);
1280 return (0);
1281 default:
1282 return (ENOTSUP);
1283 }
1284
1285 if (do_del != 0) {
1286 if ((error = delete_range(chain, &rt, &ndel)) != 0)
1287 return (error);
1288
1289 if (ndel == 0 && (cmd != 1 && num != 0))
1290 return (EINVAL);
1291
1292 return (0);
1293 }
1294
1295 return (move_range(chain, &rt));
1296}
1297
1298/**
1299 * Reset some or all counters on firewall rules.
1300 * The argument `arg' is an u_int32_t. The low 16 bit are the rule number,
1301 * the next 8 bits are the set number, the top 8 bits are the command:
1302 * 0 work with rules from all set's;
1303 * 1 work with rules only from specified set.
1304 * Specified rule number is zero if we want to clear all entries.
1305 * log_only is 1 if we only want to reset logs, zero otherwise.
1306 */
1307static int
1308zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
1309{
1310 struct ip_fw *rule;
1311 char *msg;
1312 int i;
1313
1314 uint16_t rulenum = arg & 0xffff;
1315 uint8_t set = (arg >> 16) & 0xff;
1316 uint8_t cmd = (arg >> 24) & 0xff;
1317
1318 if (cmd > 1)
1319 return (EINVAL);
1320 if (cmd == 1 && set > RESVD_SET)
1321 return (EINVAL);
1322
1323 IPFW_UH_RLOCK(chain);
1324 if (rulenum == 0) {
1325 V_norule_counter = 0;
1326 for (i = 0; i < chain->n_rules; i++) {
1327 rule = chain->map[i];
1328 /* Skip rules not in our set. */
1329 if (cmd == 1 && rule->set != set)
1330 continue;
1331 clear_counters(rule, log_only);
1332 }
1333 msg = log_only ? "All logging counts reset" :
1334 "Accounting cleared";
1335 } else {
1336 int cleared = 0;
1337 for (i = 0; i < chain->n_rules; i++) {
1338 rule = chain->map[i];
1339 if (rule->rulenum == rulenum) {
1340 if (cmd == 0 || rule->set == set)
1341 clear_counters(rule, log_only);
1342 cleared = 1;
1343 }
1344 if (rule->rulenum > rulenum)
1345 break;
1346 }
1347 if (!cleared) { /* we did not find any matching rules */
1348 IPFW_UH_RUNLOCK(chain);
1349 return (EINVAL);
1350 }
1351 msg = log_only ? "logging count reset" : "cleared";
1352 }
1353 IPFW_UH_RUNLOCK(chain);
1354
1355 if (V_fw_verbose) {
1356 int lev = LOG_SECURITY | LOG_NOTICE;
1357
1358 if (rulenum)
1359 log(lev, "ipfw: Entry %d %s.\n", rulenum, msg);
1360 else
1361 log(lev, "ipfw: %s.\n", msg);
1362 }
1363 return (0);
1364}
1365
1366
1367/*
1368 * Check rule head in FreeBSD11 format
1369 *
1370 */
1371static int
1372check_ipfw_rule1(struct ip_fw_rule *rule, int size,
1373 struct rule_check_info *ci)
1374{
1375 int l;
1376
1377 if (size < sizeof(*rule)) {
1378 printf("ipfw: rule too short\n");
1379 return (EINVAL);
1380 }
1381
1382 /* Check for valid cmd_len */
1383 l = roundup2(RULESIZE(rule), sizeof(uint64_t));
1384 if (l != size) {
1385 printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1386 return (EINVAL);
1387 }
1388 if (rule->act_ofs >= rule->cmd_len) {
1389 printf("ipfw: bogus action offset (%u > %u)\n",
1390 rule->act_ofs, rule->cmd_len - 1);
1391 return (EINVAL);
1392 }
1393
1394 if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1395 return (EINVAL);
1396
1397 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1398}
1399
1400/*
1401 * Check rule head in FreeBSD8 format
1402 *
1403 */
1404static int
1405check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
1406 struct rule_check_info *ci)
1407{
1408 int l;
1409
1410 if (size < sizeof(*rule)) {
1411 printf("ipfw: rule too short\n");
1412 return (EINVAL);
1413 }
1414
1415 /* Check for valid cmd_len */
1416 l = sizeof(*rule) + rule->cmd_len * 4 - 4;
1417 if (l != size) {
1418 printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1419 return (EINVAL);
1420 }
1421 if (rule->act_ofs >= rule->cmd_len) {
1422 printf("ipfw: bogus action offset (%u > %u)\n",
1423 rule->act_ofs, rule->cmd_len - 1);
1424 return (EINVAL);
1425 }
1426
1427 if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1428 return (EINVAL);
1429
1430 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1431}
1432
1433static int
1434check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci)
1435{
1436 int cmdlen, l;
1437 int have_action;
1438
1439 have_action = 0;
1440
1441 /*
1442 * Now go for the individual checks. Very simple ones, basically only
1443 * instruction sizes.
1444 */
1445 for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) {
1446 cmdlen = F_LEN(cmd);
1447 if (cmdlen > l) {
1448 printf("ipfw: opcode %d size truncated\n",
1449 cmd->opcode);
1450 return EINVAL;
1451 }
1452 switch (cmd->opcode) {
1453 case O_PROBE_STATE:
1454 case O_KEEP_STATE:
1455 case O_PROTO:
1456 case O_IP_SRC_ME:
1457 case O_IP_DST_ME:
1458 case O_LAYER2:
1459 case O_IN:
1460 case O_FRAG:
1461 case O_DIVERTED:
1462 case O_IPOPT:
1463 case O_IPTOS:
1464 case O_IPPRECEDENCE:
1465 case O_IPVER:
1466 case O_SOCKARG:
1467 case O_TCPFLAGS:
1468 case O_TCPOPTS:
1469 case O_ESTAB:
1470 case O_VERREVPATH:
1471 case O_VERSRCREACH:
1472 case O_ANTISPOOF:
1473 case O_IPSEC:
1474#ifdef INET6
1475 case O_IP6_SRC_ME:
1476 case O_IP6_DST_ME:
1477 case O_EXT_HDR:
1478 case O_IP6:
1479#endif
1480 case O_IP4:
1481 case O_TAG:
1482 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1483 goto bad_size;
1484 break;
1485
1486 case O_FIB:
1487 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1488 goto bad_size;
1489 if (cmd->arg1 >= rt_numfibs) {
1490 printf("ipfw: invalid fib number %d\n",
1491 cmd->arg1);
1492 return EINVAL;
1493 }
1494 break;
1495
1496 case O_SETFIB:
1497 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1498 goto bad_size;
1499 if ((cmd->arg1 != IP_FW_TARG) &&
1500 ((cmd->arg1 & 0x7FFFF) >= rt_numfibs)) {
1501 printf("ipfw: invalid fib number %d\n",
1502 cmd->arg1 & 0x7FFFF);
1503 return EINVAL;
1504 }
1505 goto check_action;
1506
1507 case O_UID:
1508 case O_GID:
1509 case O_JAIL:
1510 case O_IP_SRC:
1511 case O_IP_DST:
1512 case O_TCPSEQ:
1513 case O_TCPACK:
1514 case O_PROB:
1515 case O_ICMPTYPE:
1516 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1517 goto bad_size;
1518 break;
1519
1520 case O_LIMIT:
1521 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
1522 goto bad_size;
1523 break;
1524
1525 case O_LOG:
1526 if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
1527 goto bad_size;
1528
1529 ((ipfw_insn_log *)cmd)->log_left =
1530 ((ipfw_insn_log *)cmd)->max_log;
1531
1532 break;
1533
1534 case O_IP_SRC_MASK:
1535 case O_IP_DST_MASK:
1536 /* only odd command lengths */
1537 if ((cmdlen & 1) == 0)
1538 goto bad_size;
1539 break;
1540
1541 case O_IP_SRC_SET:
1542 case O_IP_DST_SET:
1543 if (cmd->arg1 == 0 || cmd->arg1 > 256) {
1544 printf("ipfw: invalid set size %d\n",
1545 cmd->arg1);
1546 return EINVAL;
1547 }
1548 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1549 (cmd->arg1+31)/32 )
1550 goto bad_size;
1551 break;
1552
1553 case O_IP_SRC_LOOKUP:
1554 case O_IP_DST_LOOKUP:
1555 if (cmd->arg1 >= V_fw_tables_max) {
1556 printf("ipfw: invalid table number %d\n",
1557 cmd->arg1);
1558 return (EINVAL);
1559 }
1560 if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1561 cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 &&
1562 cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1563 goto bad_size;
1564 ci->object_opcodes++;
1565 break;
1566 case O_IP_FLOW_LOOKUP:
1567 if (cmd->arg1 >= V_fw_tables_max) {
1568 printf("ipfw: invalid table number %d\n",
1569 cmd->arg1);
1570 return (EINVAL);
1571 }
1572 if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1573 cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1574 goto bad_size;
1575 ci->object_opcodes++;
1576 break;
1577 case O_MACADDR2:
1578 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
1579 goto bad_size;
1580 break;
1581
1582 case O_NOP:
1583 case O_IPID:
1584 case O_IPTTL:
1585 case O_IPLEN:
1586 case O_TCPDATALEN:
1587 case O_TCPWIN:
1588 case O_TAGGED:
1589 if (cmdlen < 1 || cmdlen > 31)
1590 goto bad_size;
1591 break;
1592
1593 case O_DSCP:
1594 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1)
1595 goto bad_size;
1596 break;
1597
1598 case O_MAC_TYPE:
1599 case O_IP_SRCPORT:
1600 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
1601 if (cmdlen < 2 || cmdlen > 31)
1602 goto bad_size;
1603 break;
1604
1605 case O_RECV:
1606 case O_XMIT:
1607 case O_VIA:
1608 if (((ipfw_insn_if *)cmd)->name[0] == '\1')
1609 ci->object_opcodes++;
1610 if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
1611 goto bad_size;
1612 break;
1613
1614 case O_ALTQ:
1615 if (cmdlen != F_INSN_SIZE(ipfw_insn_altq))
1616 goto bad_size;
1617 break;
1618
1619 case O_PIPE:
1620 case O_QUEUE:
1621 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1622 goto bad_size;
1623 goto check_action;
1624
1625 case O_FORWARD_IP:
1626 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
1627 goto bad_size;
1628 goto check_action;
1629#ifdef INET6
1630 case O_FORWARD_IP6:
1631 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6))
1632 goto bad_size;
1633 goto check_action;
1634#endif /* INET6 */
1635
1636 case O_DIVERT:
1637 case O_TEE:
1638 if (ip_divert_ptr == NULL)
1639 return EINVAL;
1640 else
1641 goto check_size;
1642 case O_NETGRAPH:
1643 case O_NGTEE:
1644 if (ng_ipfw_input_p == NULL)
1645 return EINVAL;
1646 else
1647 goto check_size;
1648 case O_NAT:
1649 if (!IPFW_NAT_LOADED)
1650 return EINVAL;
1651 if (cmdlen != F_INSN_SIZE(ipfw_insn_nat))
1652 goto bad_size;
1653 goto check_action;
1654 case O_FORWARD_MAC: /* XXX not implemented yet */
1655 case O_CHECK_STATE:
1656 case O_COUNT:
1657 case O_ACCEPT:
1658 case O_DENY:
1659 case O_REJECT:
1660 case O_SETDSCP:
1661#ifdef INET6
1662 case O_UNREACH6:
1663#endif
1664 case O_SKIPTO:
1665 case O_REASS:
1666 case O_CALLRETURN:
1667check_size:
1668 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1669 goto bad_size;
1670check_action:
1671 if (have_action) {
1672 printf("ipfw: opcode %d, multiple actions"
1673 " not allowed\n",
1674 cmd->opcode);
1675 return (EINVAL);
1676 }
1677 have_action = 1;
1678 if (l != cmdlen) {
1679 printf("ipfw: opcode %d, action must be"
1680 " last opcode\n",
1681 cmd->opcode);
1682 return (EINVAL);
1683 }
1684 break;
1685#ifdef INET6
1686 case O_IP6_SRC:
1687 case O_IP6_DST:
1688 if (cmdlen != F_INSN_SIZE(struct in6_addr) +
1689 F_INSN_SIZE(ipfw_insn))
1690 goto bad_size;
1691 break;
1692
1693 case O_FLOW6ID:
1694 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1695 ((ipfw_insn_u32 *)cmd)->o.arg1)
1696 goto bad_size;
1697 break;
1698
1699 case O_IP6_SRC_MASK:
1700 case O_IP6_DST_MASK:
1701 if ( !(cmdlen & 1) || cmdlen > 127)
1702 goto bad_size;
1703 break;
1704 case O_ICMP6TYPE:
1705 if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) )
1706 goto bad_size;
1707 break;
1708#endif
1709
1710 default:
1711 switch (cmd->opcode) {
1712#ifndef INET6
1713 case O_IP6_SRC_ME:
1714 case O_IP6_DST_ME:
1715 case O_EXT_HDR:
1716 case O_IP6:
1717 case O_UNREACH6:
1718 case O_IP6_SRC:
1719 case O_IP6_DST:
1720 case O_FLOW6ID:
1721 case O_IP6_SRC_MASK:
1722 case O_IP6_DST_MASK:
1723 case O_ICMP6TYPE:
1724 printf("ipfw: no IPv6 support in kernel\n");
1725 return (EPROTONOSUPPORT);
1726#endif
1727 default:
1728 printf("ipfw: opcode %d, unknown opcode\n",
1729 cmd->opcode);
1730 return (EINVAL);
1731 }
1732 }
1733 }
1734 if (have_action == 0) {
1735 printf("ipfw: missing action\n");
1736 return (EINVAL);
1737 }
1738 return 0;
1739
1740bad_size:
1741 printf("ipfw: opcode %d size %d wrong\n",
1742 cmd->opcode, cmdlen);
1743 return (EINVAL);
1744}
1745
1746
1747/*
1748 * Translation of requests for compatibility with FreeBSD 7.2/8.
1749 * a static variable tells us if we have an old client from userland,
1750 * and if necessary we translate requests and responses between the
1751 * two formats.
1752 */
1753static int is7 = 0;
1754
1755struct ip_fw7 {
1756 struct ip_fw7 *next; /* linked list of rules */
1757 struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */
1758 /* 'next_rule' is used to pass up 'set_disable' status */
1759
1760 uint16_t act_ofs; /* offset of action in 32-bit units */
1761 uint16_t cmd_len; /* # of 32-bit words in cmd */
1762 uint16_t rulenum; /* rule number */
1763 uint8_t set; /* rule set (0..31) */
1764 // #define RESVD_SET 31 /* set for default and persistent rules */
1765 uint8_t _pad; /* padding */
1766 // uint32_t id; /* rule id, only in v.8 */
1767 /* These fields are present in all rules. */
1768 uint64_t pcnt; /* Packet counter */
1769 uint64_t bcnt; /* Byte counter */
1770 uint32_t timestamp; /* tv_sec of last match */
1771
1772 ipfw_insn cmd[1]; /* storage for commands */
1773};
1774
1775static int convert_rule_to_7(struct ip_fw_rule0 *rule);
1776static int convert_rule_to_8(struct ip_fw_rule0 *rule);
1777
1778#ifndef RULESIZE7
1779#define RULESIZE7(rule) (sizeof(struct ip_fw7) + \
1780 ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4)
1781#endif
1782
1783
1784/*
1785 * Copy the static and dynamic rules to the supplied buffer
1786 * and return the amount of space actually used.
1787 * Must be run under IPFW_UH_RLOCK
1788 */
1789static size_t
1790ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
1791{
1792 char *bp = buf;
1793 char *ep = bp + space;
1794 struct ip_fw *rule;
1795 struct ip_fw_rule0 *dst;
1796 int error, i, l, warnflag;
1797 time_t boot_seconds;
1798
1799 warnflag = 0;
1800
1801 boot_seconds = boottime.tv_sec;
1802 for (i = 0; i < chain->n_rules; i++) {
1803 rule = chain->map[i];
1804
1805 if (is7) {
1806 /* Convert rule to FreeBSd 7.2 format */
1807 l = RULESIZE7(rule);
1808 if (bp + l + sizeof(uint32_t) <= ep) {
1809 bcopy(rule, bp, l + sizeof(uint32_t));
1810 error = set_legacy_obj_kidx(chain,
1811 (struct ip_fw_rule0 *)bp);
1812 if (error != 0)
1813 return (0);
1814 error = convert_rule_to_7((struct ip_fw_rule0 *) bp);
1815 if (error)
1816 return 0; /*XXX correct? */
1817 /*
1818 * XXX HACK. Store the disable mask in the "next"
1819 * pointer in a wild attempt to keep the ABI the same.
1820 * Why do we do this on EVERY rule?
1821 */
1822 bcopy(&V_set_disable,
1823 &(((struct ip_fw7 *)bp)->next_rule),
1824 sizeof(V_set_disable));
1825 if (((struct ip_fw7 *)bp)->timestamp)
1826 ((struct ip_fw7 *)bp)->timestamp += boot_seconds;
1827 bp += l;
1828 }
1829 continue; /* go to next rule */
1830 }
1831
1832 l = RULEUSIZE0(rule);
1833 if (bp + l > ep) { /* should not happen */
1834 printf("overflow dumping static rules\n");
1835 break;
1836 }
1837 dst = (struct ip_fw_rule0 *)bp;
1838 export_rule0(rule, dst, l);
1839 error = set_legacy_obj_kidx(chain, dst);
1840
1841 /*
1842 * XXX HACK. Store the disable mask in the "next"
1843 * pointer in a wild attempt to keep the ABI the same.
1844 * Why do we do this on EVERY rule?
1845 *
1846 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask
1847 * so we need to fail _after_ saving at least one mask.
1848 */
1849 bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable));
1850 if (dst->timestamp)
1851 dst->timestamp += boot_seconds;
1852 bp += l;
1853
1854 if (error != 0) {
1855 if (error == 2) {
1856 /* Non-fatal table rewrite error. */
1857 warnflag = 1;
1858 continue;
1859 }
1860 printf("Stop on rule %d. Fail to convert table\n",
1861 rule->rulenum);
1862 break;
1863 }
1864 }
1865 if (warnflag != 0)
1866 printf("ipfw: process %s is using legacy interfaces,"
1867 " consider rebuilding\n", "");
1868 ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */
1869 return (bp - (char *)buf);
1870}
1871
1872
1873struct dump_args {
1874 uint32_t b; /* start rule */
1875 uint32_t e; /* end rule */
1876 uint32_t rcount; /* number of rules */
1877 uint32_t rsize; /* rules size */
1878 uint32_t tcount; /* number of tables */
1879 int rcounters; /* counters */
1880};
1881
1882void
1883ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv)
1884{
1885
1886 ntlv->head.type = no->etlv;
1887 ntlv->head.length = sizeof(*ntlv);
1888 ntlv->idx = no->kidx;
1889 strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
1890}
1891
1892/*
1893 * Export named object info in instance @ni, identified by @kidx
1894 * to ipfw_obj_ntlv. TLV is allocated from @sd space.
1895 *
1896 * Returns 0 on success.
1897 */
1898static int
1899export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
1900 struct sockopt_data *sd)
1901{
1902 struct named_object *no;
1903 ipfw_obj_ntlv *ntlv;
1904
1905 no = ipfw_objhash_lookup_kidx(ni, kidx);
1906 KASSERT(no != NULL, ("invalid object kernel index passed"));
1907
1908 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
1909 if (ntlv == NULL)
1910 return (ENOMEM);
1911
1912 ipfw_export_obj_ntlv(no, ntlv);
1913 return (0);
1914}
1915
1916/*
1917 * Dumps static rules with table TLVs in buffer @sd.
1918 *
1919 * Returns 0 on success.
1920 */
1921static int
1922dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da,
1923 uint32_t *bmask, struct sockopt_data *sd)
1924{
1925 int error;
1926 int i, l;
1927 uint32_t tcount;
1928 ipfw_obj_ctlv *ctlv;
1929 struct ip_fw *krule;
1930 struct namedobj_instance *ni;
1931 caddr_t dst;
1932
1933 /* Dump table names first (if any) */
1934 if (da->tcount > 0) {
1935 /* Header first */
1936 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1937 if (ctlv == NULL)
1938 return (ENOMEM);
1939 ctlv->head.type = IPFW_TLV_TBLNAME_LIST;
1940 ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) +
1941 sizeof(*ctlv);
1942 ctlv->count = da->tcount;
1943 ctlv->objsize = sizeof(ipfw_obj_ntlv);
1944 }
1945
1946 i = 0;
1947 tcount = da->tcount;
1948 ni = ipfw_get_table_objhash(chain);
1949 while (tcount > 0) {
1950 if ((bmask[i / 32] & (1 << (i % 32))) == 0) {
1951 i++;
1952 continue;
1953 }
1954
1955 /* Jump to shared named object bitmask */
1956 if (i >= IPFW_TABLES_MAX) {
1957 ni = CHAIN_TO_SRV(chain);
1958 i -= IPFW_TABLES_MAX;
1959 bmask += IPFW_TABLES_MAX / 32;
1960 }
1961
1962 if ((error = export_objhash_ntlv(ni, i, sd)) != 0)
1963 return (error);
1964
1965 i++;
1966 tcount--;
1967 }
1968
1969 /* Dump rules */
1970 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1971 if (ctlv == NULL)
1972 return (ENOMEM);
1973 ctlv->head.type = IPFW_TLV_RULE_LIST;
1974 ctlv->head.length = da->rsize + sizeof(*ctlv);
1975 ctlv->count = da->rcount;
1976
1977 for (i = da->b; i < da->e; i++) {
1978 krule = chain->map[i];
1979
1980 l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv);
1981 if (da->rcounters != 0)
1982 l += sizeof(struct ip_fw_bcounter);
1983 dst = (caddr_t)ipfw_get_sopt_space(sd, l);
1984 if (dst == NULL)
1985 return (ENOMEM);
1986
1987 export_rule1(krule, dst, l, da->rcounters);
1988 }
1989
1990 return (0);
1991}
1992
1993/*
1994 * Marks every object index used in @rule with bit in @bmask.
1995 * Used to generate bitmask of referenced tables/objects for given ruleset
1996 * or its part.
1997 *
1998 * Returns number of newly-referenced objects.
1999 */
2000static int
2001mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule,
2002 uint32_t *bmask)
2003{
2004 int cmdlen, l, count;
2005 ipfw_insn *cmd;
2006 uint16_t kidx;
2007 struct opcode_obj_rewrite *rw;
2008 int bidx;
2009 uint8_t subtype;
2010
2011 l = rule->cmd_len;
2012 cmd = rule->cmd;
2013 cmdlen = 0;
2014 count = 0;
2015 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2016 cmdlen = F_LEN(cmd);
2017
2018 rw = ipfw_find_op_rw(cmd->opcode);
2019 if (rw == NULL)
2020 continue;
2021
2022 if (rw->classifier(cmd, &kidx, &subtype) != 0)
2023 continue;
2024
2025 bidx = kidx / 32;
2026 /* Maintain separate bitmasks for table and non-table objects */
2027 if (rw->etlv != IPFW_TLV_TBL_NAME)
2028 bidx += IPFW_TABLES_MAX / 32;
2029
2030 if ((bmask[bidx] & (1 << (kidx % 32))) == 0)
2031 count++;
2032
2033 bmask[bidx] |= 1 << (kidx % 32);
2034 }
2035
2036 return (count);
2037}
2038
2039/*
2040 * Dumps requested objects data
2041 * Data layout (version 0)(current):
2042 * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags
2043 * size = ipfw_cfg_lheader.size
2044 * Reply: [ ipfw_cfg_lheader
2045 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2046 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST)
2047 * ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ]
2048 * ] (optional)
2049 * [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional)
2050 * ]
2051 * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize.
2052 * The rest (size, count) are set to zero and needs to be ignored.
2053 *
2054 * Returns 0 on success.
2055 */
2056static int
2057dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2058 struct sockopt_data *sd)
2059{
2060 ipfw_cfg_lheader *hdr;
2061 struct ip_fw *rule;
2062 size_t sz, rnum;
2063 uint32_t hdr_flags;
2064 int error, i;
2065 struct dump_args da;
2066 uint32_t *bmask;
2067
2068 hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
2069 if (hdr == NULL)
2070 return (EINVAL);
2071
2072 error = 0;
2073 bmask = NULL;
2074 /* Allocate needed state. Note we allocate 2xspace mask, for table&srv */
2075 if (hdr->flags & IPFW_CFG_GET_STATIC)
2076 bmask = malloc(IPFW_TABLES_MAX / 4, M_TEMP, M_WAITOK | M_ZERO);
2077
2078 IPFW_UH_RLOCK(chain);
2079
2080 /*
2081 * STAGE 1: Determine size/count for objects in range.
2082 * Prepare used tables bitmask.
2083 */
2084 sz = sizeof(ipfw_cfg_lheader);
2085 memset(&da, 0, sizeof(da));
2086
2087 da.b = 0;
2088 da.e = chain->n_rules;
2089
2090 if (hdr->end_rule != 0) {
2091 /* Handle custom range */
2092 if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE)
2093 rnum = IPFW_DEFAULT_RULE;
2094 da.b = ipfw_find_rule(chain, rnum, 0);
2095 rnum = hdr->end_rule;
2096 rnum = (rnum < IPFW_DEFAULT_RULE) ? rnum+1 : IPFW_DEFAULT_RULE;
2097 da.e = ipfw_find_rule(chain, rnum, 0) + 1;
2098 }
2099
2100 if (hdr->flags & IPFW_CFG_GET_STATIC) {
2101 for (i = da.b; i < da.e; i++) {
2102 rule = chain->map[i];
2103 da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv);
2104 da.rcount++;
2105 /* Update bitmask of used objects for given range */
2106 da.tcount += mark_object_kidx(chain, rule, bmask);
2107 }
2108 /* Add counters if requested */
2109 if (hdr->flags & IPFW_CFG_GET_COUNTERS) {
2110 da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount;
2111 da.rcounters = 1;
2112 }
2113
2114 if (da.tcount > 0)
2115 sz += da.tcount * sizeof(ipfw_obj_ntlv) +
2116 sizeof(ipfw_obj_ctlv);
2117 sz += da.rsize + sizeof(ipfw_obj_ctlv);
2118 }
2119
2120 if (hdr->flags & IPFW_CFG_GET_STATES)
2121 sz += ipfw_dyn_get_count() * sizeof(ipfw_obj_dyntlv) +
2122 sizeof(ipfw_obj_ctlv);
2123
2124
2125 /*
2126 * Fill header anyway.
2127 * Note we have to save header fields to stable storage
2128 * buffer inside @sd can be flushed after dumping rules
2129 */
2130 hdr->size = sz;
2131 hdr->set_mask = ~V_set_disable;
2132 hdr_flags = hdr->flags;
2133 hdr = NULL;
2134
2135 if (sd->valsize < sz) {
2136 error = ENOMEM;
2137 goto cleanup;
2138 }
2139
2140 /* STAGE2: Store actual data */
2141 if (hdr_flags & IPFW_CFG_GET_STATIC) {
2142 error = dump_static_rules(chain, &da, bmask, sd);
2143 if (error != 0)
2144 goto cleanup;
2145 }
2146
2147 if (hdr_flags & IPFW_CFG_GET_STATES)
2148 error = ipfw_dump_states(chain, sd);
2149
2150cleanup:
2151 IPFW_UH_RUNLOCK(chain);
2152
2153 if (bmask != NULL)
2154 free(bmask, M_TEMP);
2155
2156 return (error);
2157}
2158
32
33/*
34 * Control socket and rule management routines for ipfw.
35 * Control is currently implemented via IP_FW3 setsockopt() code.
36 */
37
38#include "opt_ipfw.h"
39#include "opt_inet.h"
40#ifndef INET
41#error IPFIREWALL requires INET.
42#endif /* INET */
43#include "opt_inet6.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h> /* struct m_tag used by nested headers */
49#include <sys/kernel.h>
50#include <sys/lock.h>
51#include <sys/priv.h>
52#include <sys/proc.h>
53#include <sys/rwlock.h>
54#include <sys/rmlock.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57#include <sys/sysctl.h>
58#include <sys/syslog.h>
59#include <sys/fnv_hash.h>
60#include <net/if.h>
61#include <net/route.h>
62#include <net/vnet.h>
63#include <vm/vm.h>
64#include <vm/vm_extern.h>
65
66#include <netinet/in.h>
67#include <netinet/ip_var.h> /* hooks */
68#include <netinet/ip_fw.h>
69
70#include <netpfil/ipfw/ip_fw_private.h>
71#include <netpfil/ipfw/ip_fw_table.h>
72
73#ifdef MAC
74#include <security/mac/mac_framework.h>
75#endif
76
77static int ipfw_ctl(struct sockopt *sopt);
78static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len,
79 struct rule_check_info *ci);
80static int check_ipfw_rule1(struct ip_fw_rule *rule, int size,
81 struct rule_check_info *ci);
82static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
83 struct rule_check_info *ci);
84
85#define NAMEDOBJ_HASH_SIZE 32
86
87struct namedobj_instance {
88 struct namedobjects_head *names;
89 struct namedobjects_head *values;
90 uint32_t nn_size; /* names hash size */
91 uint32_t nv_size; /* number hash size */
92 u_long *idx_mask; /* used items bitmask */
93 uint32_t max_blocks; /* number of "long" blocks in bitmask */
94 uint32_t count; /* number of items */
95 uint16_t free_off[IPFW_MAX_SETS]; /* first possible free offset */
96 objhash_hash_f *hash_f;
97 objhash_cmp_f *cmp_f;
98};
99#define BLOCK_ITEMS (8 * sizeof(u_long)) /* Number of items for ffsl() */
100
101static uint32_t objhash_hash_name(struct namedobj_instance *ni, void *key,
102 uint32_t kopt);
103static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val);
104static int objhash_cmp_name(struct named_object *no, void *name, uint32_t set);
105
106MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
107
108static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
109 struct sockopt_data *sd);
110static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
111 struct sockopt_data *sd);
112static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
113 struct sockopt_data *sd);
114static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
115 struct sockopt_data *sd);
116static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
117 struct sockopt_data *sd);
118static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
119 struct sockopt_data *sd);
120static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
121 struct sockopt_data *sd);
122static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
123 struct sockopt_data *sd);
124
125/* ctl3 handler data */
126struct mtx ctl3_lock;
127#define CTL3_LOCK_INIT() mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF)
128#define CTL3_LOCK_DESTROY() mtx_destroy(&ctl3_lock)
129#define CTL3_LOCK() mtx_lock(&ctl3_lock)
130#define CTL3_UNLOCK() mtx_unlock(&ctl3_lock)
131
132static struct ipfw_sopt_handler *ctl3_handlers;
133static size_t ctl3_hsize;
134static uint64_t ctl3_refct, ctl3_gencnt;
135#define CTL3_SMALLBUF 4096 /* small page-size write buffer */
136#define CTL3_LARGEBUF 16 * 1024 * 1024 /* handle large rulesets */
137
138static int ipfw_flush_sopt_data(struct sockopt_data *sd);
139
140static struct ipfw_sopt_handler scodes[] = {
141 { IP_FW_XGET, 0, HDIR_GET, dump_config },
142 { IP_FW_XADD, 0, HDIR_BOTH, add_rules },
143 { IP_FW_XDEL, 0, HDIR_BOTH, del_rules },
144 { IP_FW_XZERO, 0, HDIR_SET, clear_rules },
145 { IP_FW_XRESETLOG, 0, HDIR_SET, clear_rules },
146 { IP_FW_XMOVE, 0, HDIR_SET, move_rules },
147 { IP_FW_SET_SWAP, 0, HDIR_SET, manage_sets },
148 { IP_FW_SET_MOVE, 0, HDIR_SET, manage_sets },
149 { IP_FW_SET_ENABLE, 0, HDIR_SET, manage_sets },
150 { IP_FW_DUMP_SOPTCODES, 0, HDIR_GET, dump_soptcodes },
151 { IP_FW_DUMP_SRVOBJECTS,0, HDIR_GET, dump_srvobjects },
152};
153
154static int
155set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule);
156struct opcode_obj_rewrite *ipfw_find_op_rw(uint16_t opcode);
157static int mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule,
158 uint32_t *bmask);
159static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule);
160static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
161 struct sockopt_data *sd);
162
163/*
164 * Opcode object rewriter variables
165 */
166struct opcode_obj_rewrite *ctl3_rewriters;
167static size_t ctl3_rsize;
168
169/*
170 * static variables followed by global ones
171 */
172
173static VNET_DEFINE(uma_zone_t, ipfw_cntr_zone);
174#define V_ipfw_cntr_zone VNET(ipfw_cntr_zone)
175
176void
177ipfw_init_counters()
178{
179
180 V_ipfw_cntr_zone = uma_zcreate("IPFW counters",
181 IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL,
182 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
183}
184
185void
186ipfw_destroy_counters()
187{
188
189 uma_zdestroy(V_ipfw_cntr_zone);
190}
191
192struct ip_fw *
193ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize)
194{
195 struct ip_fw *rule;
196
197 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO);
198 rule->cntr = uma_zalloc(V_ipfw_cntr_zone, M_WAITOK | M_ZERO);
199
200 return (rule);
201}
202
203static void
204free_rule(struct ip_fw *rule)
205{
206
207 uma_zfree(V_ipfw_cntr_zone, rule->cntr);
208 free(rule, M_IPFW);
209}
210
211
212/*
213 * Find the smallest rule >= key, id.
214 * We could use bsearch but it is so simple that we code it directly
215 */
216int
217ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id)
218{
219 int i, lo, hi;
220 struct ip_fw *r;
221
222 for (lo = 0, hi = chain->n_rules - 1; lo < hi;) {
223 i = (lo + hi) / 2;
224 r = chain->map[i];
225 if (r->rulenum < key)
226 lo = i + 1; /* continue from the next one */
227 else if (r->rulenum > key)
228 hi = i; /* this might be good */
229 else if (r->id < id)
230 lo = i + 1; /* continue from the next one */
231 else /* r->id >= id */
232 hi = i; /* this might be good */
233 };
234 return hi;
235}
236
237/*
238 * Builds skipto cache on rule set @map.
239 */
240static void
241update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map)
242{
243 int *smap, rulenum;
244 int i, mi;
245
246 IPFW_UH_WLOCK_ASSERT(chain);
247
248 mi = 0;
249 rulenum = map[mi]->rulenum;
250 smap = chain->idxmap_back;
251
252 if (smap == NULL)
253 return;
254
255 for (i = 0; i < 65536; i++) {
256 smap[i] = mi;
257 /* Use the same rule index until i < rulenum */
258 if (i != rulenum || i == 65535)
259 continue;
260 /* Find next rule with num > i */
261 rulenum = map[++mi]->rulenum;
262 while (rulenum == i)
263 rulenum = map[++mi]->rulenum;
264 }
265}
266
267/*
268 * Swaps prepared (backup) index with current one.
269 */
270static void
271swap_skipto_cache(struct ip_fw_chain *chain)
272{
273 int *map;
274
275 IPFW_UH_WLOCK_ASSERT(chain);
276 IPFW_WLOCK_ASSERT(chain);
277
278 map = chain->idxmap;
279 chain->idxmap = chain->idxmap_back;
280 chain->idxmap_back = map;
281}
282
283/*
284 * Allocate and initialize skipto cache.
285 */
286void
287ipfw_init_skipto_cache(struct ip_fw_chain *chain)
288{
289 int *idxmap, *idxmap_back;
290
291 idxmap = malloc(65536 * sizeof(uint32_t *), M_IPFW,
292 M_WAITOK | M_ZERO);
293 idxmap_back = malloc(65536 * sizeof(uint32_t *), M_IPFW,
294 M_WAITOK | M_ZERO);
295
296 /*
297 * Note we may be called at any time after initialization,
298 * for example, on first skipto rule, so we need to
299 * provide valid chain->idxmap on return
300 */
301
302 IPFW_UH_WLOCK(chain);
303 if (chain->idxmap != NULL) {
304 IPFW_UH_WUNLOCK(chain);
305 free(idxmap, M_IPFW);
306 free(idxmap_back, M_IPFW);
307 return;
308 }
309
310 /* Set backup pointer first to permit building cache */
311 chain->idxmap_back = idxmap_back;
312 update_skipto_cache(chain, chain->map);
313 IPFW_WLOCK(chain);
314 /* It is now safe to set chain->idxmap ptr */
315 chain->idxmap = idxmap;
316 swap_skipto_cache(chain);
317 IPFW_WUNLOCK(chain);
318 IPFW_UH_WUNLOCK(chain);
319}
320
321/*
322 * Destroys skipto cache.
323 */
324void
325ipfw_destroy_skipto_cache(struct ip_fw_chain *chain)
326{
327
328 if (chain->idxmap != NULL)
329 free(chain->idxmap, M_IPFW);
330 if (chain->idxmap != NULL)
331 free(chain->idxmap_back, M_IPFW);
332}
333
334
335/*
336 * allocate a new map, returns the chain locked. extra is the number
337 * of entries to add or delete.
338 */
339static struct ip_fw **
340get_map(struct ip_fw_chain *chain, int extra, int locked)
341{
342
343 for (;;) {
344 struct ip_fw **map;
345 int i, mflags;
346
347 mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK);
348
349 i = chain->n_rules + extra;
350 map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags);
351 if (map == NULL) {
352 printf("%s: cannot allocate map\n", __FUNCTION__);
353 return NULL;
354 }
355 if (!locked)
356 IPFW_UH_WLOCK(chain);
357 if (i >= chain->n_rules + extra) /* good */
358 return map;
359 /* otherwise we lost the race, free and retry */
360 if (!locked)
361 IPFW_UH_WUNLOCK(chain);
362 free(map, M_IPFW);
363 }
364}
365
366/*
367 * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
368 */
369static struct ip_fw **
370swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
371{
372 struct ip_fw **old_map;
373
374 IPFW_WLOCK(chain);
375 chain->id++;
376 chain->n_rules = new_len;
377 old_map = chain->map;
378 chain->map = new_map;
379 swap_skipto_cache(chain);
380 IPFW_WUNLOCK(chain);
381 return old_map;
382}
383
384
385static void
386export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr)
387{
388
389 cntr->size = sizeof(*cntr);
390
391 if (krule->cntr != NULL) {
392 cntr->pcnt = counter_u64_fetch(krule->cntr);
393 cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
394 cntr->timestamp = krule->timestamp;
395 }
396 if (cntr->timestamp > 0)
397 cntr->timestamp += boottime.tv_sec;
398}
399
400static void
401export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr)
402{
403
404 if (krule->cntr != NULL) {
405 cntr->pcnt = counter_u64_fetch(krule->cntr);
406 cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
407 cntr->timestamp = krule->timestamp;
408 }
409 if (cntr->timestamp > 0)
410 cntr->timestamp += boottime.tv_sec;
411}
412
413/*
414 * Copies rule @urule from v1 userland format (current).
415 * to kernel @krule.
416 * Assume @krule is zeroed.
417 */
418static void
419import_rule1(struct rule_check_info *ci)
420{
421 struct ip_fw_rule *urule;
422 struct ip_fw *krule;
423
424 urule = (struct ip_fw_rule *)ci->urule;
425 krule = (struct ip_fw *)ci->krule;
426
427 /* copy header */
428 krule->act_ofs = urule->act_ofs;
429 krule->cmd_len = urule->cmd_len;
430 krule->rulenum = urule->rulenum;
431 krule->set = urule->set;
432 krule->flags = urule->flags;
433
434 /* Save rulenum offset */
435 ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum);
436
437 /* Copy opcodes */
438 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
439}
440
441/*
442 * Export rule into v1 format (Current).
443 * Layout:
444 * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT)
445 * [ ip_fw_rule ] OR
446 * [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs).
447 * ]
448 * Assume @data is zeroed.
449 */
450static void
451export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs)
452{
453 struct ip_fw_bcounter *cntr;
454 struct ip_fw_rule *urule;
455 ipfw_obj_tlv *tlv;
456
457 /* Fill in TLV header */
458 tlv = (ipfw_obj_tlv *)data;
459 tlv->type = IPFW_TLV_RULE_ENT;
460 tlv->length = len;
461
462 if (rcntrs != 0) {
463 /* Copy counters */
464 cntr = (struct ip_fw_bcounter *)(tlv + 1);
465 urule = (struct ip_fw_rule *)(cntr + 1);
466 export_cntr1_base(krule, cntr);
467 } else
468 urule = (struct ip_fw_rule *)(tlv + 1);
469
470 /* copy header */
471 urule->act_ofs = krule->act_ofs;
472 urule->cmd_len = krule->cmd_len;
473 urule->rulenum = krule->rulenum;
474 urule->set = krule->set;
475 urule->flags = krule->flags;
476 urule->id = krule->id;
477
478 /* Copy opcodes */
479 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
480}
481
482
483/*
484 * Copies rule @urule from FreeBSD8 userland format (v0)
485 * to kernel @krule.
486 * Assume @krule is zeroed.
487 */
488static void
489import_rule0(struct rule_check_info *ci)
490{
491 struct ip_fw_rule0 *urule;
492 struct ip_fw *krule;
493 int cmdlen, l;
494 ipfw_insn *cmd;
495 ipfw_insn_limit *lcmd;
496 ipfw_insn_if *cmdif;
497
498 urule = (struct ip_fw_rule0 *)ci->urule;
499 krule = (struct ip_fw *)ci->krule;
500
501 /* copy header */
502 krule->act_ofs = urule->act_ofs;
503 krule->cmd_len = urule->cmd_len;
504 krule->rulenum = urule->rulenum;
505 krule->set = urule->set;
506 if ((urule->_pad & 1) != 0)
507 krule->flags |= IPFW_RULE_NOOPT;
508
509 /* Save rulenum offset */
510 ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum);
511
512 /* Copy opcodes */
513 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
514
515 /*
516 * Alter opcodes:
517 * 1) convert tablearg value from 65335 to 0
518 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room for targ).
519 * 3) convert table number in iface opcodes to u16
520 */
521 l = krule->cmd_len;
522 cmd = krule->cmd;
523 cmdlen = 0;
524
525 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
526 cmdlen = F_LEN(cmd);
527
528 switch (cmd->opcode) {
529 /* Opcodes supporting tablearg */
530 case O_TAG:
531 case O_TAGGED:
532 case O_PIPE:
533 case O_QUEUE:
534 case O_DIVERT:
535 case O_TEE:
536 case O_SKIPTO:
537 case O_CALLRETURN:
538 case O_NETGRAPH:
539 case O_NGTEE:
540 case O_NAT:
541 if (cmd->arg1 == 65535)
542 cmd->arg1 = IP_FW_TARG;
543 break;
544 case O_SETFIB:
545 case O_SETDSCP:
546 if (cmd->arg1 == 65535)
547 cmd->arg1 = IP_FW_TARG;
548 else
549 cmd->arg1 |= 0x8000;
550 break;
551 case O_LIMIT:
552 lcmd = (ipfw_insn_limit *)cmd;
553 if (lcmd->conn_limit == 65535)
554 lcmd->conn_limit = IP_FW_TARG;
555 break;
556 /* Interface tables */
557 case O_XMIT:
558 case O_RECV:
559 case O_VIA:
560 /* Interface table, possibly */
561 cmdif = (ipfw_insn_if *)cmd;
562 if (cmdif->name[0] != '\1')
563 break;
564
565 cmdif->p.kidx = (uint16_t)cmdif->p.glob;
566 break;
567 }
568 }
569}
570
571/*
572 * Copies rule @krule from kernel to FreeBSD8 userland format (v0)
573 */
574static void
575export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len)
576{
577 int cmdlen, l;
578 ipfw_insn *cmd;
579 ipfw_insn_limit *lcmd;
580 ipfw_insn_if *cmdif;
581
582 /* copy header */
583 memset(urule, 0, len);
584 urule->act_ofs = krule->act_ofs;
585 urule->cmd_len = krule->cmd_len;
586 urule->rulenum = krule->rulenum;
587 urule->set = krule->set;
588 if ((krule->flags & IPFW_RULE_NOOPT) != 0)
589 urule->_pad |= 1;
590
591 /* Copy opcodes */
592 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
593
594 /* Export counters */
595 export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt);
596
597 /*
598 * Alter opcodes:
599 * 1) convert tablearg value from 0 to 65335
600 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values.
601 * 3) convert table number in iface opcodes to int
602 */
603 l = urule->cmd_len;
604 cmd = urule->cmd;
605 cmdlen = 0;
606
607 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
608 cmdlen = F_LEN(cmd);
609
610 switch (cmd->opcode) {
611 /* Opcodes supporting tablearg */
612 case O_TAG:
613 case O_TAGGED:
614 case O_PIPE:
615 case O_QUEUE:
616 case O_DIVERT:
617 case O_TEE:
618 case O_SKIPTO:
619 case O_CALLRETURN:
620 case O_NETGRAPH:
621 case O_NGTEE:
622 case O_NAT:
623 if (cmd->arg1 == IP_FW_TARG)
624 cmd->arg1 = 65535;
625 break;
626 case O_SETFIB:
627 case O_SETDSCP:
628 if (cmd->arg1 == IP_FW_TARG)
629 cmd->arg1 = 65535;
630 else
631 cmd->arg1 &= ~0x8000;
632 break;
633 case O_LIMIT:
634 lcmd = (ipfw_insn_limit *)cmd;
635 if (lcmd->conn_limit == IP_FW_TARG)
636 lcmd->conn_limit = 65535;
637 break;
638 /* Interface tables */
639 case O_XMIT:
640 case O_RECV:
641 case O_VIA:
642 /* Interface table, possibly */
643 cmdif = (ipfw_insn_if *)cmd;
644 if (cmdif->name[0] != '\1')
645 break;
646
647 cmdif->p.glob = cmdif->p.kidx;
648 break;
649 }
650 }
651}
652
653/*
654 * Add new rule(s) to the list possibly creating rule number for each.
655 * Update the rule_number in the input struct so the caller knows it as well.
656 * Must be called without IPFW_UH held
657 */
658static int
659commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count)
660{
661 int error, i, insert_before, tcount;
662 uint16_t rulenum, *pnum;
663 struct rule_check_info *ci;
664 struct ip_fw *krule;
665 struct ip_fw **map; /* the new array of pointers */
666
667 /* Check if we need to do table/obj index remap */
668 tcount = 0;
669 for (ci = rci, i = 0; i < count; ci++, i++) {
670 if (ci->object_opcodes == 0)
671 continue;
672
673 /*
674 * Rule has some object opcodes.
675 * We need to find (and create non-existing)
676 * kernel objects, and reference existing ones.
677 */
678 error = ipfw_rewrite_rule_uidx(chain, ci);
679 if (error != 0) {
680
681 /*
682 * rewrite failed, state for current rule
683 * has been reverted. Check if we need to
684 * revert more.
685 */
686 if (tcount > 0) {
687
688 /*
689 * We have some more table rules
690 * we need to rollback.
691 */
692
693 IPFW_UH_WLOCK(chain);
694 while (ci != rci) {
695 ci--;
696 if (ci->object_opcodes == 0)
697 continue;
698 unref_rule_objects(chain,ci->krule);
699
700 }
701 IPFW_UH_WUNLOCK(chain);
702
703 }
704
705 return (error);
706 }
707
708 tcount++;
709 }
710
711 /* get_map returns with IPFW_UH_WLOCK if successful */
712 map = get_map(chain, count, 0 /* not locked */);
713 if (map == NULL) {
714 if (tcount > 0) {
715 /* Unbind tables */
716 IPFW_UH_WLOCK(chain);
717 for (ci = rci, i = 0; i < count; ci++, i++) {
718 if (ci->object_opcodes == 0)
719 continue;
720
721 unref_rule_objects(chain, ci->krule);
722 }
723 IPFW_UH_WUNLOCK(chain);
724 }
725
726 return (ENOSPC);
727 }
728
729 if (V_autoinc_step < 1)
730 V_autoinc_step = 1;
731 else if (V_autoinc_step > 1000)
732 V_autoinc_step = 1000;
733
734 /* FIXME: Handle count > 1 */
735 ci = rci;
736 krule = ci->krule;
737 rulenum = krule->rulenum;
738
739 /* find the insertion point, we will insert before */
740 insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE;
741 i = ipfw_find_rule(chain, insert_before, 0);
742 /* duplicate first part */
743 if (i > 0)
744 bcopy(chain->map, map, i * sizeof(struct ip_fw *));
745 map[i] = krule;
746 /* duplicate remaining part, we always have the default rule */
747 bcopy(chain->map + i, map + i + 1,
748 sizeof(struct ip_fw *) *(chain->n_rules - i));
749 if (rulenum == 0) {
750 /* Compute rule number and write it back */
751 rulenum = i > 0 ? map[i-1]->rulenum : 0;
752 if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
753 rulenum += V_autoinc_step;
754 krule->rulenum = rulenum;
755 /* Save number to userland rule */
756 pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff);
757 *pnum = rulenum;
758 }
759
760 krule->id = chain->id + 1;
761 update_skipto_cache(chain, map);
762 map = swap_map(chain, map, chain->n_rules + 1);
763 chain->static_len += RULEUSIZE0(krule);
764 IPFW_UH_WUNLOCK(chain);
765 if (map)
766 free(map, M_IPFW);
767 return (0);
768}
769
770/*
771 * Adds @rule to the list of rules to reap
772 */
773void
774ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head,
775 struct ip_fw *rule)
776{
777
778 IPFW_UH_WLOCK_ASSERT(chain);
779
780 /* Unlink rule from everywhere */
781 unref_rule_objects(chain, rule);
782
783 *((struct ip_fw **)rule) = *head;
784 *head = rule;
785}
786
787/*
788 * Reclaim storage associated with a list of rules. This is
789 * typically the list created using remove_rule.
790 * A NULL pointer on input is handled correctly.
791 */
792void
793ipfw_reap_rules(struct ip_fw *head)
794{
795 struct ip_fw *rule;
796
797 while ((rule = head) != NULL) {
798 head = *((struct ip_fw **)head);
799 free_rule(rule);
800 }
801}
802
803/*
804 * Rules to keep are
805 * (default || reserved || !match_set || !match_number)
806 * where
807 * default ::= (rule->rulenum == IPFW_DEFAULT_RULE)
808 * // the default rule is always protected
809 *
810 * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET)
811 * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush")
812 *
813 * match_set ::= (cmd == 0 || rule->set == set)
814 * // set number is ignored for cmd == 0
815 *
816 * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum)
817 * // number is ignored for cmd == 1 or n == 0
818 *
819 */
820int
821ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt)
822{
823
824 /* Don't match default rule for modification queries */
825 if (rule->rulenum == IPFW_DEFAULT_RULE &&
826 (rt->flags & IPFW_RCFLAG_DEFAULT) == 0)
827 return (0);
828
829 /* Don't match rules in reserved set for flush requests */
830 if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET)
831 return (0);
832
833 /* If we're filtering by set, don't match other sets */
834 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set)
835 return (0);
836
837 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 &&
838 (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule))
839 return (0);
840
841 return (1);
842}
843
844/*
845 * Delete rules matching range @rt.
846 * Saves number of deleted rules in @ndel.
847 *
848 * Returns 0 on success.
849 */
850static int
851delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel)
852{
853 struct ip_fw *reap, *rule, **map;
854 int end, start;
855 int i, n, ndyn, ofs;
856
857 reap = NULL;
858 IPFW_UH_WLOCK(chain); /* arbitrate writers */
859
860 /*
861 * Stage 1: Determine range to inspect.
862 * Range is half-inclusive, e.g [start, end).
863 */
864 start = 0;
865 end = chain->n_rules - 1;
866
867 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) {
868 start = ipfw_find_rule(chain, rt->start_rule, 0);
869
870 end = ipfw_find_rule(chain, rt->end_rule, 0);
871 if (rt->end_rule != IPFW_DEFAULT_RULE)
872 while (chain->map[end]->rulenum == rt->end_rule)
873 end++;
874 }
875
876 /* Allocate new map of the same size */
877 map = get_map(chain, 0, 1 /* locked */);
878 if (map == NULL) {
879 IPFW_UH_WUNLOCK(chain);
880 return (ENOMEM);
881 }
882
883 n = 0;
884 ndyn = 0;
885 ofs = start;
886 /* 1. bcopy the initial part of the map */
887 if (start > 0)
888 bcopy(chain->map, map, start * sizeof(struct ip_fw *));
889 /* 2. copy active rules between start and end */
890 for (i = start; i < end; i++) {
891 rule = chain->map[i];
892 if (ipfw_match_range(rule, rt) == 0) {
893 map[ofs++] = rule;
894 continue;
895 }
896
897 n++;
898 if (ipfw_is_dyn_rule(rule) != 0)
899 ndyn++;
900 }
901 /* 3. copy the final part of the map */
902 bcopy(chain->map + end, map + ofs,
903 (chain->n_rules - end) * sizeof(struct ip_fw *));
904 /* 4. recalculate skipto cache */
905 update_skipto_cache(chain, map);
906 /* 5. swap the maps (under UH_WLOCK + WHLOCK) */
907 map = swap_map(chain, map, chain->n_rules - n);
908 /* 6. Remove all dynamic states originated by deleted rules */
909 if (ndyn > 0)
910 ipfw_expire_dyn_rules(chain, rt);
911 /* 7. now remove the rules deleted from the old map */
912 for (i = start; i < end; i++) {
913 rule = map[i];
914 if (ipfw_match_range(rule, rt) == 0)
915 continue;
916 chain->static_len -= RULEUSIZE0(rule);
917 ipfw_reap_add(chain, &reap, rule);
918 }
919 IPFW_UH_WUNLOCK(chain);
920
921 ipfw_reap_rules(reap);
922 if (map != NULL)
923 free(map, M_IPFW);
924 *ndel = n;
925 return (0);
926}
927
928/*
929 * Changes set of given rule rannge @rt
930 * with each other.
931 *
932 * Returns 0 on success.
933 */
934static int
935move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
936{
937 struct ip_fw *rule;
938 int i;
939
940 IPFW_UH_WLOCK(chain);
941
942 /*
943 * Move rules with matching paramenerts to a new set.
944 * This one is much more complex. We have to ensure
945 * that all referenced tables (if any) are referenced
946 * by given rule subset only. Otherwise, we can't move
947 * them to new set and have to return error.
948 */
949 if (V_fw_tables_sets != 0) {
950 if (ipfw_move_tables_sets(chain, rt, rt->new_set) != 0) {
951 IPFW_UH_WUNLOCK(chain);
952 return (EBUSY);
953 }
954 }
955
956 /* XXX: We have to do swap holding WLOCK */
957 for (i = 0; i < chain->n_rules; i++) {
958 rule = chain->map[i];
959 if (ipfw_match_range(rule, rt) == 0)
960 continue;
961 rule->set = rt->new_set;
962 }
963
964 IPFW_UH_WUNLOCK(chain);
965
966 return (0);
967}
968
969/*
970 * Clear counters for a specific rule.
971 * Normally run under IPFW_UH_RLOCK, but these are idempotent ops
972 * so we only care that rules do not disappear.
973 */
974static void
975clear_counters(struct ip_fw *rule, int log_only)
976{
977 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
978
979 if (log_only == 0)
980 IPFW_ZERO_RULE_COUNTER(rule);
981 if (l->o.opcode == O_LOG)
982 l->log_left = l->max_log;
983}
984
985/*
986 * Flushes rules counters and/or log values on matching range.
987 *
988 * Returns number of items cleared.
989 */
990static int
991clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only)
992{
993 struct ip_fw *rule;
994 int num;
995 int i;
996
997 num = 0;
998 rt->flags |= IPFW_RCFLAG_DEFAULT;
999
1000 IPFW_UH_WLOCK(chain); /* arbitrate writers */
1001 for (i = 0; i < chain->n_rules; i++) {
1002 rule = chain->map[i];
1003 if (ipfw_match_range(rule, rt) == 0)
1004 continue;
1005 clear_counters(rule, log_only);
1006 num++;
1007 }
1008 IPFW_UH_WUNLOCK(chain);
1009
1010 return (num);
1011}
1012
1013static int
1014check_range_tlv(ipfw_range_tlv *rt)
1015{
1016
1017 if (rt->head.length != sizeof(*rt))
1018 return (1);
1019 if (rt->start_rule > rt->end_rule)
1020 return (1);
1021 if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS)
1022 return (1);
1023
1024 if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags)
1025 return (1);
1026
1027 return (0);
1028}
1029
1030/*
1031 * Delete rules matching specified parameters
1032 * Data layout (v0)(current):
1033 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1034 * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1035 *
1036 * Saves number of deleted rules in ipfw_range_tlv->new_set.
1037 *
1038 * Returns 0 on success.
1039 */
1040static int
1041del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1042 struct sockopt_data *sd)
1043{
1044 ipfw_range_header *rh;
1045 int error, ndel;
1046
1047 if (sd->valsize != sizeof(*rh))
1048 return (EINVAL);
1049
1050 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1051
1052 if (check_range_tlv(&rh->range) != 0)
1053 return (EINVAL);
1054
1055 ndel = 0;
1056 if ((error = delete_range(chain, &rh->range, &ndel)) != 0)
1057 return (error);
1058
1059 /* Save number of rules deleted */
1060 rh->range.new_set = ndel;
1061 return (0);
1062}
1063
1064/*
1065 * Move rules/sets matching specified parameters
1066 * Data layout (v0)(current):
1067 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1068 *
1069 * Returns 0 on success.
1070 */
1071static int
1072move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1073 struct sockopt_data *sd)
1074{
1075 ipfw_range_header *rh;
1076
1077 if (sd->valsize != sizeof(*rh))
1078 return (EINVAL);
1079
1080 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1081
1082 if (check_range_tlv(&rh->range) != 0)
1083 return (EINVAL);
1084
1085 return (move_range(chain, &rh->range));
1086}
1087
1088/*
1089 * Clear rule accounting data matching specified parameters
1090 * Data layout (v0)(current):
1091 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1092 * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1093 *
1094 * Saves number of cleared rules in ipfw_range_tlv->new_set.
1095 *
1096 * Returns 0 on success.
1097 */
1098static int
1099clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1100 struct sockopt_data *sd)
1101{
1102 ipfw_range_header *rh;
1103 int log_only, num;
1104 char *msg;
1105
1106 if (sd->valsize != sizeof(*rh))
1107 return (EINVAL);
1108
1109 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1110
1111 if (check_range_tlv(&rh->range) != 0)
1112 return (EINVAL);
1113
1114 log_only = (op3->opcode == IP_FW_XRESETLOG);
1115
1116 num = clear_range(chain, &rh->range, log_only);
1117
1118 if (rh->range.flags & IPFW_RCFLAG_ALL)
1119 msg = log_only ? "All logging counts reset" :
1120 "Accounting cleared";
1121 else
1122 msg = log_only ? "logging count reset" : "cleared";
1123
1124 if (V_fw_verbose) {
1125 int lev = LOG_SECURITY | LOG_NOTICE;
1126 log(lev, "ipfw: %s.\n", msg);
1127 }
1128
1129 /* Save number of rules cleared */
1130 rh->range.new_set = num;
1131 return (0);
1132}
1133
1134static void
1135enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1136{
1137 uint32_t v_set;
1138
1139 IPFW_UH_WLOCK_ASSERT(chain);
1140
1141 /* Change enabled/disabled sets mask */
1142 v_set = (V_set_disable | rt->set) & ~rt->new_set;
1143 v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */
1144 IPFW_WLOCK(chain);
1145 V_set_disable = v_set;
1146 IPFW_WUNLOCK(chain);
1147}
1148
1149static void
1150swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv)
1151{
1152 struct ip_fw *rule;
1153 int i;
1154
1155 IPFW_UH_WLOCK_ASSERT(chain);
1156
1157 /* Swap or move two sets */
1158 for (i = 0; i < chain->n_rules - 1; i++) {
1159 rule = chain->map[i];
1160 if (rule->set == rt->set)
1161 rule->set = rt->new_set;
1162 else if (rule->set == rt->new_set && mv == 0)
1163 rule->set = rt->set;
1164 }
1165 if (V_fw_tables_sets != 0)
1166 ipfw_swap_tables_sets(chain, rt->set, rt->new_set, mv);
1167}
1168
1169/*
1170 * Swaps or moves set
1171 * Data layout (v0)(current):
1172 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1173 *
1174 * Returns 0 on success.
1175 */
1176static int
1177manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1178 struct sockopt_data *sd)
1179{
1180 ipfw_range_header *rh;
1181
1182 if (sd->valsize != sizeof(*rh))
1183 return (EINVAL);
1184
1185 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1186
1187 if (rh->range.head.length != sizeof(ipfw_range_tlv))
1188 return (1);
1189
1190 IPFW_UH_WLOCK(chain);
1191 switch (op3->opcode) {
1192 case IP_FW_SET_SWAP:
1193 case IP_FW_SET_MOVE:
1194 swap_sets(chain, &rh->range, op3->opcode == IP_FW_SET_MOVE);
1195 break;
1196 case IP_FW_SET_ENABLE:
1197 enable_sets(chain, &rh->range);
1198 break;
1199 }
1200 IPFW_UH_WUNLOCK(chain);
1201
1202 return (0);
1203}
1204
1205/**
1206 * Remove all rules with given number, or do set manipulation.
1207 * Assumes chain != NULL && *chain != NULL.
1208 *
1209 * The argument is an uint32_t. The low 16 bit are the rule or set number;
1210 * the next 8 bits are the new set; the top 8 bits indicate the command:
1211 *
1212 * 0 delete rules numbered "rulenum"
1213 * 1 delete rules in set "rulenum"
1214 * 2 move rules "rulenum" to set "new_set"
1215 * 3 move rules from set "rulenum" to set "new_set"
1216 * 4 swap sets "rulenum" and "new_set"
1217 * 5 delete rules "rulenum" and set "new_set"
1218 */
1219static int
1220del_entry(struct ip_fw_chain *chain, uint32_t arg)
1221{
1222 uint32_t num; /* rule number or old_set */
1223 uint8_t cmd, new_set;
1224 int do_del, ndel;
1225 int error = 0;
1226 ipfw_range_tlv rt;
1227
1228 num = arg & 0xffff;
1229 cmd = (arg >> 24) & 0xff;
1230 new_set = (arg >> 16) & 0xff;
1231
1232 if (cmd > 5 || new_set > RESVD_SET)
1233 return EINVAL;
1234 if (cmd == 0 || cmd == 2 || cmd == 5) {
1235 if (num >= IPFW_DEFAULT_RULE)
1236 return EINVAL;
1237 } else {
1238 if (num > RESVD_SET) /* old_set */
1239 return EINVAL;
1240 }
1241
1242 /* Convert old requests into new representation */
1243 memset(&rt, 0, sizeof(rt));
1244 rt.start_rule = num;
1245 rt.end_rule = num;
1246 rt.set = num;
1247 rt.new_set = new_set;
1248 do_del = 0;
1249
1250 switch (cmd) {
1251 case 0: /* delete rules numbered "rulenum" */
1252 if (num == 0)
1253 rt.flags |= IPFW_RCFLAG_ALL;
1254 else
1255 rt.flags |= IPFW_RCFLAG_RANGE;
1256 do_del = 1;
1257 break;
1258 case 1: /* delete rules in set "rulenum" */
1259 rt.flags |= IPFW_RCFLAG_SET;
1260 do_del = 1;
1261 break;
1262 case 5: /* delete rules "rulenum" and set "new_set" */
1263 rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET;
1264 rt.set = new_set;
1265 rt.new_set = 0;
1266 do_del = 1;
1267 break;
1268 case 2: /* move rules "rulenum" to set "new_set" */
1269 rt.flags |= IPFW_RCFLAG_RANGE;
1270 break;
1271 case 3: /* move rules from set "rulenum" to set "new_set" */
1272 IPFW_UH_WLOCK(chain);
1273 swap_sets(chain, &rt, 1);
1274 IPFW_UH_WUNLOCK(chain);
1275 return (0);
1276 case 4: /* swap sets "rulenum" and "new_set" */
1277 IPFW_UH_WLOCK(chain);
1278 swap_sets(chain, &rt, 0);
1279 IPFW_UH_WUNLOCK(chain);
1280 return (0);
1281 default:
1282 return (ENOTSUP);
1283 }
1284
1285 if (do_del != 0) {
1286 if ((error = delete_range(chain, &rt, &ndel)) != 0)
1287 return (error);
1288
1289 if (ndel == 0 && (cmd != 1 && num != 0))
1290 return (EINVAL);
1291
1292 return (0);
1293 }
1294
1295 return (move_range(chain, &rt));
1296}
1297
1298/**
1299 * Reset some or all counters on firewall rules.
1300 * The argument `arg' is an u_int32_t. The low 16 bit are the rule number,
1301 * the next 8 bits are the set number, the top 8 bits are the command:
1302 * 0 work with rules from all set's;
1303 * 1 work with rules only from specified set.
1304 * Specified rule number is zero if we want to clear all entries.
1305 * log_only is 1 if we only want to reset logs, zero otherwise.
1306 */
1307static int
1308zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
1309{
1310 struct ip_fw *rule;
1311 char *msg;
1312 int i;
1313
1314 uint16_t rulenum = arg & 0xffff;
1315 uint8_t set = (arg >> 16) & 0xff;
1316 uint8_t cmd = (arg >> 24) & 0xff;
1317
1318 if (cmd > 1)
1319 return (EINVAL);
1320 if (cmd == 1 && set > RESVD_SET)
1321 return (EINVAL);
1322
1323 IPFW_UH_RLOCK(chain);
1324 if (rulenum == 0) {
1325 V_norule_counter = 0;
1326 for (i = 0; i < chain->n_rules; i++) {
1327 rule = chain->map[i];
1328 /* Skip rules not in our set. */
1329 if (cmd == 1 && rule->set != set)
1330 continue;
1331 clear_counters(rule, log_only);
1332 }
1333 msg = log_only ? "All logging counts reset" :
1334 "Accounting cleared";
1335 } else {
1336 int cleared = 0;
1337 for (i = 0; i < chain->n_rules; i++) {
1338 rule = chain->map[i];
1339 if (rule->rulenum == rulenum) {
1340 if (cmd == 0 || rule->set == set)
1341 clear_counters(rule, log_only);
1342 cleared = 1;
1343 }
1344 if (rule->rulenum > rulenum)
1345 break;
1346 }
1347 if (!cleared) { /* we did not find any matching rules */
1348 IPFW_UH_RUNLOCK(chain);
1349 return (EINVAL);
1350 }
1351 msg = log_only ? "logging count reset" : "cleared";
1352 }
1353 IPFW_UH_RUNLOCK(chain);
1354
1355 if (V_fw_verbose) {
1356 int lev = LOG_SECURITY | LOG_NOTICE;
1357
1358 if (rulenum)
1359 log(lev, "ipfw: Entry %d %s.\n", rulenum, msg);
1360 else
1361 log(lev, "ipfw: %s.\n", msg);
1362 }
1363 return (0);
1364}
1365
1366
1367/*
1368 * Check rule head in FreeBSD11 format
1369 *
1370 */
1371static int
1372check_ipfw_rule1(struct ip_fw_rule *rule, int size,
1373 struct rule_check_info *ci)
1374{
1375 int l;
1376
1377 if (size < sizeof(*rule)) {
1378 printf("ipfw: rule too short\n");
1379 return (EINVAL);
1380 }
1381
1382 /* Check for valid cmd_len */
1383 l = roundup2(RULESIZE(rule), sizeof(uint64_t));
1384 if (l != size) {
1385 printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1386 return (EINVAL);
1387 }
1388 if (rule->act_ofs >= rule->cmd_len) {
1389 printf("ipfw: bogus action offset (%u > %u)\n",
1390 rule->act_ofs, rule->cmd_len - 1);
1391 return (EINVAL);
1392 }
1393
1394 if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1395 return (EINVAL);
1396
1397 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1398}
1399
1400/*
1401 * Check rule head in FreeBSD8 format
1402 *
1403 */
1404static int
1405check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
1406 struct rule_check_info *ci)
1407{
1408 int l;
1409
1410 if (size < sizeof(*rule)) {
1411 printf("ipfw: rule too short\n");
1412 return (EINVAL);
1413 }
1414
1415 /* Check for valid cmd_len */
1416 l = sizeof(*rule) + rule->cmd_len * 4 - 4;
1417 if (l != size) {
1418 printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1419 return (EINVAL);
1420 }
1421 if (rule->act_ofs >= rule->cmd_len) {
1422 printf("ipfw: bogus action offset (%u > %u)\n",
1423 rule->act_ofs, rule->cmd_len - 1);
1424 return (EINVAL);
1425 }
1426
1427 if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1428 return (EINVAL);
1429
1430 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1431}
1432
1433static int
1434check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci)
1435{
1436 int cmdlen, l;
1437 int have_action;
1438
1439 have_action = 0;
1440
1441 /*
1442 * Now go for the individual checks. Very simple ones, basically only
1443 * instruction sizes.
1444 */
1445 for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) {
1446 cmdlen = F_LEN(cmd);
1447 if (cmdlen > l) {
1448 printf("ipfw: opcode %d size truncated\n",
1449 cmd->opcode);
1450 return EINVAL;
1451 }
1452 switch (cmd->opcode) {
1453 case O_PROBE_STATE:
1454 case O_KEEP_STATE:
1455 case O_PROTO:
1456 case O_IP_SRC_ME:
1457 case O_IP_DST_ME:
1458 case O_LAYER2:
1459 case O_IN:
1460 case O_FRAG:
1461 case O_DIVERTED:
1462 case O_IPOPT:
1463 case O_IPTOS:
1464 case O_IPPRECEDENCE:
1465 case O_IPVER:
1466 case O_SOCKARG:
1467 case O_TCPFLAGS:
1468 case O_TCPOPTS:
1469 case O_ESTAB:
1470 case O_VERREVPATH:
1471 case O_VERSRCREACH:
1472 case O_ANTISPOOF:
1473 case O_IPSEC:
1474#ifdef INET6
1475 case O_IP6_SRC_ME:
1476 case O_IP6_DST_ME:
1477 case O_EXT_HDR:
1478 case O_IP6:
1479#endif
1480 case O_IP4:
1481 case O_TAG:
1482 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1483 goto bad_size;
1484 break;
1485
1486 case O_FIB:
1487 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1488 goto bad_size;
1489 if (cmd->arg1 >= rt_numfibs) {
1490 printf("ipfw: invalid fib number %d\n",
1491 cmd->arg1);
1492 return EINVAL;
1493 }
1494 break;
1495
1496 case O_SETFIB:
1497 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1498 goto bad_size;
1499 if ((cmd->arg1 != IP_FW_TARG) &&
1500 ((cmd->arg1 & 0x7FFFF) >= rt_numfibs)) {
1501 printf("ipfw: invalid fib number %d\n",
1502 cmd->arg1 & 0x7FFFF);
1503 return EINVAL;
1504 }
1505 goto check_action;
1506
1507 case O_UID:
1508 case O_GID:
1509 case O_JAIL:
1510 case O_IP_SRC:
1511 case O_IP_DST:
1512 case O_TCPSEQ:
1513 case O_TCPACK:
1514 case O_PROB:
1515 case O_ICMPTYPE:
1516 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1517 goto bad_size;
1518 break;
1519
1520 case O_LIMIT:
1521 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
1522 goto bad_size;
1523 break;
1524
1525 case O_LOG:
1526 if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
1527 goto bad_size;
1528
1529 ((ipfw_insn_log *)cmd)->log_left =
1530 ((ipfw_insn_log *)cmd)->max_log;
1531
1532 break;
1533
1534 case O_IP_SRC_MASK:
1535 case O_IP_DST_MASK:
1536 /* only odd command lengths */
1537 if ((cmdlen & 1) == 0)
1538 goto bad_size;
1539 break;
1540
1541 case O_IP_SRC_SET:
1542 case O_IP_DST_SET:
1543 if (cmd->arg1 == 0 || cmd->arg1 > 256) {
1544 printf("ipfw: invalid set size %d\n",
1545 cmd->arg1);
1546 return EINVAL;
1547 }
1548 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1549 (cmd->arg1+31)/32 )
1550 goto bad_size;
1551 break;
1552
1553 case O_IP_SRC_LOOKUP:
1554 case O_IP_DST_LOOKUP:
1555 if (cmd->arg1 >= V_fw_tables_max) {
1556 printf("ipfw: invalid table number %d\n",
1557 cmd->arg1);
1558 return (EINVAL);
1559 }
1560 if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1561 cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 &&
1562 cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1563 goto bad_size;
1564 ci->object_opcodes++;
1565 break;
1566 case O_IP_FLOW_LOOKUP:
1567 if (cmd->arg1 >= V_fw_tables_max) {
1568 printf("ipfw: invalid table number %d\n",
1569 cmd->arg1);
1570 return (EINVAL);
1571 }
1572 if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1573 cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1574 goto bad_size;
1575 ci->object_opcodes++;
1576 break;
1577 case O_MACADDR2:
1578 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
1579 goto bad_size;
1580 break;
1581
1582 case O_NOP:
1583 case O_IPID:
1584 case O_IPTTL:
1585 case O_IPLEN:
1586 case O_TCPDATALEN:
1587 case O_TCPWIN:
1588 case O_TAGGED:
1589 if (cmdlen < 1 || cmdlen > 31)
1590 goto bad_size;
1591 break;
1592
1593 case O_DSCP:
1594 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1)
1595 goto bad_size;
1596 break;
1597
1598 case O_MAC_TYPE:
1599 case O_IP_SRCPORT:
1600 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
1601 if (cmdlen < 2 || cmdlen > 31)
1602 goto bad_size;
1603 break;
1604
1605 case O_RECV:
1606 case O_XMIT:
1607 case O_VIA:
1608 if (((ipfw_insn_if *)cmd)->name[0] == '\1')
1609 ci->object_opcodes++;
1610 if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
1611 goto bad_size;
1612 break;
1613
1614 case O_ALTQ:
1615 if (cmdlen != F_INSN_SIZE(ipfw_insn_altq))
1616 goto bad_size;
1617 break;
1618
1619 case O_PIPE:
1620 case O_QUEUE:
1621 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1622 goto bad_size;
1623 goto check_action;
1624
1625 case O_FORWARD_IP:
1626 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
1627 goto bad_size;
1628 goto check_action;
1629#ifdef INET6
1630 case O_FORWARD_IP6:
1631 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6))
1632 goto bad_size;
1633 goto check_action;
1634#endif /* INET6 */
1635
1636 case O_DIVERT:
1637 case O_TEE:
1638 if (ip_divert_ptr == NULL)
1639 return EINVAL;
1640 else
1641 goto check_size;
1642 case O_NETGRAPH:
1643 case O_NGTEE:
1644 if (ng_ipfw_input_p == NULL)
1645 return EINVAL;
1646 else
1647 goto check_size;
1648 case O_NAT:
1649 if (!IPFW_NAT_LOADED)
1650 return EINVAL;
1651 if (cmdlen != F_INSN_SIZE(ipfw_insn_nat))
1652 goto bad_size;
1653 goto check_action;
1654 case O_FORWARD_MAC: /* XXX not implemented yet */
1655 case O_CHECK_STATE:
1656 case O_COUNT:
1657 case O_ACCEPT:
1658 case O_DENY:
1659 case O_REJECT:
1660 case O_SETDSCP:
1661#ifdef INET6
1662 case O_UNREACH6:
1663#endif
1664 case O_SKIPTO:
1665 case O_REASS:
1666 case O_CALLRETURN:
1667check_size:
1668 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1669 goto bad_size;
1670check_action:
1671 if (have_action) {
1672 printf("ipfw: opcode %d, multiple actions"
1673 " not allowed\n",
1674 cmd->opcode);
1675 return (EINVAL);
1676 }
1677 have_action = 1;
1678 if (l != cmdlen) {
1679 printf("ipfw: opcode %d, action must be"
1680 " last opcode\n",
1681 cmd->opcode);
1682 return (EINVAL);
1683 }
1684 break;
1685#ifdef INET6
1686 case O_IP6_SRC:
1687 case O_IP6_DST:
1688 if (cmdlen != F_INSN_SIZE(struct in6_addr) +
1689 F_INSN_SIZE(ipfw_insn))
1690 goto bad_size;
1691 break;
1692
1693 case O_FLOW6ID:
1694 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1695 ((ipfw_insn_u32 *)cmd)->o.arg1)
1696 goto bad_size;
1697 break;
1698
1699 case O_IP6_SRC_MASK:
1700 case O_IP6_DST_MASK:
1701 if ( !(cmdlen & 1) || cmdlen > 127)
1702 goto bad_size;
1703 break;
1704 case O_ICMP6TYPE:
1705 if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) )
1706 goto bad_size;
1707 break;
1708#endif
1709
1710 default:
1711 switch (cmd->opcode) {
1712#ifndef INET6
1713 case O_IP6_SRC_ME:
1714 case O_IP6_DST_ME:
1715 case O_EXT_HDR:
1716 case O_IP6:
1717 case O_UNREACH6:
1718 case O_IP6_SRC:
1719 case O_IP6_DST:
1720 case O_FLOW6ID:
1721 case O_IP6_SRC_MASK:
1722 case O_IP6_DST_MASK:
1723 case O_ICMP6TYPE:
1724 printf("ipfw: no IPv6 support in kernel\n");
1725 return (EPROTONOSUPPORT);
1726#endif
1727 default:
1728 printf("ipfw: opcode %d, unknown opcode\n",
1729 cmd->opcode);
1730 return (EINVAL);
1731 }
1732 }
1733 }
1734 if (have_action == 0) {
1735 printf("ipfw: missing action\n");
1736 return (EINVAL);
1737 }
1738 return 0;
1739
1740bad_size:
1741 printf("ipfw: opcode %d size %d wrong\n",
1742 cmd->opcode, cmdlen);
1743 return (EINVAL);
1744}
1745
1746
1747/*
1748 * Translation of requests for compatibility with FreeBSD 7.2/8.
1749 * a static variable tells us if we have an old client from userland,
1750 * and if necessary we translate requests and responses between the
1751 * two formats.
1752 */
1753static int is7 = 0;
1754
1755struct ip_fw7 {
1756 struct ip_fw7 *next; /* linked list of rules */
1757 struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */
1758 /* 'next_rule' is used to pass up 'set_disable' status */
1759
1760 uint16_t act_ofs; /* offset of action in 32-bit units */
1761 uint16_t cmd_len; /* # of 32-bit words in cmd */
1762 uint16_t rulenum; /* rule number */
1763 uint8_t set; /* rule set (0..31) */
1764 // #define RESVD_SET 31 /* set for default and persistent rules */
1765 uint8_t _pad; /* padding */
1766 // uint32_t id; /* rule id, only in v.8 */
1767 /* These fields are present in all rules. */
1768 uint64_t pcnt; /* Packet counter */
1769 uint64_t bcnt; /* Byte counter */
1770 uint32_t timestamp; /* tv_sec of last match */
1771
1772 ipfw_insn cmd[1]; /* storage for commands */
1773};
1774
1775static int convert_rule_to_7(struct ip_fw_rule0 *rule);
1776static int convert_rule_to_8(struct ip_fw_rule0 *rule);
1777
1778#ifndef RULESIZE7
1779#define RULESIZE7(rule) (sizeof(struct ip_fw7) + \
1780 ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4)
1781#endif
1782
1783
1784/*
1785 * Copy the static and dynamic rules to the supplied buffer
1786 * and return the amount of space actually used.
1787 * Must be run under IPFW_UH_RLOCK
1788 */
1789static size_t
1790ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
1791{
1792 char *bp = buf;
1793 char *ep = bp + space;
1794 struct ip_fw *rule;
1795 struct ip_fw_rule0 *dst;
1796 int error, i, l, warnflag;
1797 time_t boot_seconds;
1798
1799 warnflag = 0;
1800
1801 boot_seconds = boottime.tv_sec;
1802 for (i = 0; i < chain->n_rules; i++) {
1803 rule = chain->map[i];
1804
1805 if (is7) {
1806 /* Convert rule to FreeBSd 7.2 format */
1807 l = RULESIZE7(rule);
1808 if (bp + l + sizeof(uint32_t) <= ep) {
1809 bcopy(rule, bp, l + sizeof(uint32_t));
1810 error = set_legacy_obj_kidx(chain,
1811 (struct ip_fw_rule0 *)bp);
1812 if (error != 0)
1813 return (0);
1814 error = convert_rule_to_7((struct ip_fw_rule0 *) bp);
1815 if (error)
1816 return 0; /*XXX correct? */
1817 /*
1818 * XXX HACK. Store the disable mask in the "next"
1819 * pointer in a wild attempt to keep the ABI the same.
1820 * Why do we do this on EVERY rule?
1821 */
1822 bcopy(&V_set_disable,
1823 &(((struct ip_fw7 *)bp)->next_rule),
1824 sizeof(V_set_disable));
1825 if (((struct ip_fw7 *)bp)->timestamp)
1826 ((struct ip_fw7 *)bp)->timestamp += boot_seconds;
1827 bp += l;
1828 }
1829 continue; /* go to next rule */
1830 }
1831
1832 l = RULEUSIZE0(rule);
1833 if (bp + l > ep) { /* should not happen */
1834 printf("overflow dumping static rules\n");
1835 break;
1836 }
1837 dst = (struct ip_fw_rule0 *)bp;
1838 export_rule0(rule, dst, l);
1839 error = set_legacy_obj_kidx(chain, dst);
1840
1841 /*
1842 * XXX HACK. Store the disable mask in the "next"
1843 * pointer in a wild attempt to keep the ABI the same.
1844 * Why do we do this on EVERY rule?
1845 *
1846 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask
1847 * so we need to fail _after_ saving at least one mask.
1848 */
1849 bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable));
1850 if (dst->timestamp)
1851 dst->timestamp += boot_seconds;
1852 bp += l;
1853
1854 if (error != 0) {
1855 if (error == 2) {
1856 /* Non-fatal table rewrite error. */
1857 warnflag = 1;
1858 continue;
1859 }
1860 printf("Stop on rule %d. Fail to convert table\n",
1861 rule->rulenum);
1862 break;
1863 }
1864 }
1865 if (warnflag != 0)
1866 printf("ipfw: process %s is using legacy interfaces,"
1867 " consider rebuilding\n", "");
1868 ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */
1869 return (bp - (char *)buf);
1870}
1871
1872
1873struct dump_args {
1874 uint32_t b; /* start rule */
1875 uint32_t e; /* end rule */
1876 uint32_t rcount; /* number of rules */
1877 uint32_t rsize; /* rules size */
1878 uint32_t tcount; /* number of tables */
1879 int rcounters; /* counters */
1880};
1881
1882void
1883ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv)
1884{
1885
1886 ntlv->head.type = no->etlv;
1887 ntlv->head.length = sizeof(*ntlv);
1888 ntlv->idx = no->kidx;
1889 strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
1890}
1891
1892/*
1893 * Export named object info in instance @ni, identified by @kidx
1894 * to ipfw_obj_ntlv. TLV is allocated from @sd space.
1895 *
1896 * Returns 0 on success.
1897 */
1898static int
1899export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
1900 struct sockopt_data *sd)
1901{
1902 struct named_object *no;
1903 ipfw_obj_ntlv *ntlv;
1904
1905 no = ipfw_objhash_lookup_kidx(ni, kidx);
1906 KASSERT(no != NULL, ("invalid object kernel index passed"));
1907
1908 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
1909 if (ntlv == NULL)
1910 return (ENOMEM);
1911
1912 ipfw_export_obj_ntlv(no, ntlv);
1913 return (0);
1914}
1915
1916/*
1917 * Dumps static rules with table TLVs in buffer @sd.
1918 *
1919 * Returns 0 on success.
1920 */
1921static int
1922dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da,
1923 uint32_t *bmask, struct sockopt_data *sd)
1924{
1925 int error;
1926 int i, l;
1927 uint32_t tcount;
1928 ipfw_obj_ctlv *ctlv;
1929 struct ip_fw *krule;
1930 struct namedobj_instance *ni;
1931 caddr_t dst;
1932
1933 /* Dump table names first (if any) */
1934 if (da->tcount > 0) {
1935 /* Header first */
1936 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1937 if (ctlv == NULL)
1938 return (ENOMEM);
1939 ctlv->head.type = IPFW_TLV_TBLNAME_LIST;
1940 ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) +
1941 sizeof(*ctlv);
1942 ctlv->count = da->tcount;
1943 ctlv->objsize = sizeof(ipfw_obj_ntlv);
1944 }
1945
1946 i = 0;
1947 tcount = da->tcount;
1948 ni = ipfw_get_table_objhash(chain);
1949 while (tcount > 0) {
1950 if ((bmask[i / 32] & (1 << (i % 32))) == 0) {
1951 i++;
1952 continue;
1953 }
1954
1955 /* Jump to shared named object bitmask */
1956 if (i >= IPFW_TABLES_MAX) {
1957 ni = CHAIN_TO_SRV(chain);
1958 i -= IPFW_TABLES_MAX;
1959 bmask += IPFW_TABLES_MAX / 32;
1960 }
1961
1962 if ((error = export_objhash_ntlv(ni, i, sd)) != 0)
1963 return (error);
1964
1965 i++;
1966 tcount--;
1967 }
1968
1969 /* Dump rules */
1970 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1971 if (ctlv == NULL)
1972 return (ENOMEM);
1973 ctlv->head.type = IPFW_TLV_RULE_LIST;
1974 ctlv->head.length = da->rsize + sizeof(*ctlv);
1975 ctlv->count = da->rcount;
1976
1977 for (i = da->b; i < da->e; i++) {
1978 krule = chain->map[i];
1979
1980 l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv);
1981 if (da->rcounters != 0)
1982 l += sizeof(struct ip_fw_bcounter);
1983 dst = (caddr_t)ipfw_get_sopt_space(sd, l);
1984 if (dst == NULL)
1985 return (ENOMEM);
1986
1987 export_rule1(krule, dst, l, da->rcounters);
1988 }
1989
1990 return (0);
1991}
1992
1993/*
1994 * Marks every object index used in @rule with bit in @bmask.
1995 * Used to generate bitmask of referenced tables/objects for given ruleset
1996 * or its part.
1997 *
1998 * Returns number of newly-referenced objects.
1999 */
2000static int
2001mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule,
2002 uint32_t *bmask)
2003{
2004 int cmdlen, l, count;
2005 ipfw_insn *cmd;
2006 uint16_t kidx;
2007 struct opcode_obj_rewrite *rw;
2008 int bidx;
2009 uint8_t subtype;
2010
2011 l = rule->cmd_len;
2012 cmd = rule->cmd;
2013 cmdlen = 0;
2014 count = 0;
2015 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2016 cmdlen = F_LEN(cmd);
2017
2018 rw = ipfw_find_op_rw(cmd->opcode);
2019 if (rw == NULL)
2020 continue;
2021
2022 if (rw->classifier(cmd, &kidx, &subtype) != 0)
2023 continue;
2024
2025 bidx = kidx / 32;
2026 /* Maintain separate bitmasks for table and non-table objects */
2027 if (rw->etlv != IPFW_TLV_TBL_NAME)
2028 bidx += IPFW_TABLES_MAX / 32;
2029
2030 if ((bmask[bidx] & (1 << (kidx % 32))) == 0)
2031 count++;
2032
2033 bmask[bidx] |= 1 << (kidx % 32);
2034 }
2035
2036 return (count);
2037}
2038
2039/*
2040 * Dumps requested objects data
2041 * Data layout (version 0)(current):
2042 * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags
2043 * size = ipfw_cfg_lheader.size
2044 * Reply: [ ipfw_cfg_lheader
2045 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2046 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST)
2047 * ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ]
2048 * ] (optional)
2049 * [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional)
2050 * ]
2051 * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize.
2052 * The rest (size, count) are set to zero and needs to be ignored.
2053 *
2054 * Returns 0 on success.
2055 */
2056static int
2057dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2058 struct sockopt_data *sd)
2059{
2060 ipfw_cfg_lheader *hdr;
2061 struct ip_fw *rule;
2062 size_t sz, rnum;
2063 uint32_t hdr_flags;
2064 int error, i;
2065 struct dump_args da;
2066 uint32_t *bmask;
2067
2068 hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
2069 if (hdr == NULL)
2070 return (EINVAL);
2071
2072 error = 0;
2073 bmask = NULL;
2074 /* Allocate needed state. Note we allocate 2xspace mask, for table&srv */
2075 if (hdr->flags & IPFW_CFG_GET_STATIC)
2076 bmask = malloc(IPFW_TABLES_MAX / 4, M_TEMP, M_WAITOK | M_ZERO);
2077
2078 IPFW_UH_RLOCK(chain);
2079
2080 /*
2081 * STAGE 1: Determine size/count for objects in range.
2082 * Prepare used tables bitmask.
2083 */
2084 sz = sizeof(ipfw_cfg_lheader);
2085 memset(&da, 0, sizeof(da));
2086
2087 da.b = 0;
2088 da.e = chain->n_rules;
2089
2090 if (hdr->end_rule != 0) {
2091 /* Handle custom range */
2092 if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE)
2093 rnum = IPFW_DEFAULT_RULE;
2094 da.b = ipfw_find_rule(chain, rnum, 0);
2095 rnum = hdr->end_rule;
2096 rnum = (rnum < IPFW_DEFAULT_RULE) ? rnum+1 : IPFW_DEFAULT_RULE;
2097 da.e = ipfw_find_rule(chain, rnum, 0) + 1;
2098 }
2099
2100 if (hdr->flags & IPFW_CFG_GET_STATIC) {
2101 for (i = da.b; i < da.e; i++) {
2102 rule = chain->map[i];
2103 da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv);
2104 da.rcount++;
2105 /* Update bitmask of used objects for given range */
2106 da.tcount += mark_object_kidx(chain, rule, bmask);
2107 }
2108 /* Add counters if requested */
2109 if (hdr->flags & IPFW_CFG_GET_COUNTERS) {
2110 da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount;
2111 da.rcounters = 1;
2112 }
2113
2114 if (da.tcount > 0)
2115 sz += da.tcount * sizeof(ipfw_obj_ntlv) +
2116 sizeof(ipfw_obj_ctlv);
2117 sz += da.rsize + sizeof(ipfw_obj_ctlv);
2118 }
2119
2120 if (hdr->flags & IPFW_CFG_GET_STATES)
2121 sz += ipfw_dyn_get_count() * sizeof(ipfw_obj_dyntlv) +
2122 sizeof(ipfw_obj_ctlv);
2123
2124
2125 /*
2126 * Fill header anyway.
2127 * Note we have to save header fields to stable storage
2128 * buffer inside @sd can be flushed after dumping rules
2129 */
2130 hdr->size = sz;
2131 hdr->set_mask = ~V_set_disable;
2132 hdr_flags = hdr->flags;
2133 hdr = NULL;
2134
2135 if (sd->valsize < sz) {
2136 error = ENOMEM;
2137 goto cleanup;
2138 }
2139
2140 /* STAGE2: Store actual data */
2141 if (hdr_flags & IPFW_CFG_GET_STATIC) {
2142 error = dump_static_rules(chain, &da, bmask, sd);
2143 if (error != 0)
2144 goto cleanup;
2145 }
2146
2147 if (hdr_flags & IPFW_CFG_GET_STATES)
2148 error = ipfw_dump_states(chain, sd);
2149
2150cleanup:
2151 IPFW_UH_RUNLOCK(chain);
2152
2153 if (bmask != NULL)
2154 free(bmask, M_TEMP);
2155
2156 return (error);
2157}
2158
2159static int
2160check_object_name(ipfw_obj_ntlv *ntlv)
2159int
2160ipfw_check_object_name_generic(const char *name)
2161{
2161{
2162 int error;
2162 int nsize;
2163
2163
2164 switch (ntlv->head.type) {
2165 case IPFW_TLV_TBL_NAME:
2166 error = ipfw_check_table_name(ntlv->name);
2167 break;
2168 default:
2169 error = ENOTSUP;
2170 }
2171
2164 nsize = sizeof(((ipfw_obj_ntlv *)0)->name);
2165 if (strnlen(name, nsize) == nsize)
2166 return (EINVAL);
2167 if (name[0] == '\0')
2168 return (EINVAL);
2172 return (0);
2173}
2174
2175/*
2176 * Creates non-existent objects referenced by rule.
2177 *
2178 * Return 0 on success.
2179 */
2180int
2181create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd,
2182 struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti)
2183{
2184 struct opcode_obj_rewrite *rw;
2185 struct obj_idx *p;
2186 uint16_t kidx;
2187 int error;
2188
2189 /*
2190 * Compatibility stuff: do actual creation for non-existing,
2191 * but referenced objects.
2192 */
2193 for (p = oib; p < pidx; p++) {
2194 if (p->kidx != 0)
2195 continue;
2196
2197 ti->uidx = p->uidx;
2198 ti->type = p->type;
2199 ti->atype = 0;
2200
2201 rw = ipfw_find_op_rw((cmd + p->off)->opcode);
2202 KASSERT(rw != NULL, ("Unable to find handler for op %d",
2203 (cmd + p->off)->opcode));
2204
2205 error = rw->create_object(ch, ti, &kidx);
2206 if (error == 0) {
2207 p->kidx = kidx;
2208 continue;
2209 }
2210
2211 /*
2212 * Error happened. We have to rollback everything.
2213 * Drop all already acquired references.
2214 */
2215 IPFW_UH_WLOCK(ch);
2216 unref_oib_objects(ch, cmd, oib, pidx);
2217 IPFW_UH_WUNLOCK(ch);
2218
2219 return (error);
2220 }
2221
2222 return (0);
2223}
2224
2225/*
2226 * Compatibility function for old ipfw(8) binaries.
2227 * Rewrites table/nat kernel indices with userland ones.
2228 * Convert tables matching '/^\d+$/' to their atoi() value.
2229 * Use number 65535 for other tables.
2230 *
2231 * Returns 0 on success.
2232 */
2233static int
2234set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule)
2235{
2236 int cmdlen, error, l;
2237 ipfw_insn *cmd;
2238 uint16_t kidx, uidx;
2239 struct named_object *no;
2240 struct opcode_obj_rewrite *rw;
2241 uint8_t subtype;
2242 char *end;
2243 long val;
2244
2245 error = 0;
2246
2247 l = rule->cmd_len;
2248 cmd = rule->cmd;
2249 cmdlen = 0;
2250 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2251 cmdlen = F_LEN(cmd);
2252
2253 rw = ipfw_find_op_rw(cmd->opcode);
2254 if (rw == NULL)
2255 continue;
2256
2257 /* Check if is index in given opcode */
2258 if (rw->classifier(cmd, &kidx, &subtype) != 0)
2259 continue;
2260
2261 /* Try to find referenced kernel object */
2262 no = rw->find_bykidx(ch, kidx);
2263 if (no == NULL)
2264 continue;
2265
2266 val = strtol(no->name, &end, 10);
2267 if (*end == '\0' && val < 65535) {
2268 uidx = val;
2269 } else {
2270
2271 /*
2272 * We are called via legacy opcode.
2273 * Save error and show table as fake number
2274 * not to make ipfw(8) hang.
2275 */
2276 uidx = 65535;
2277 error = 2;
2278 }
2279
2280 rw->update(cmd, uidx);
2281 }
2282
2283 return (error);
2284}
2285
2286
2287/*
2288 * Unreferences all already-referenced objects in given @cmd rule,
2289 * using information in @oib.
2290 *
2291 * Used to rollback partially converted rule on error.
2292 */
2293void
2294unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib,
2295 struct obj_idx *end)
2296{
2297 struct opcode_obj_rewrite *rw;
2298 struct named_object *no;
2299 struct obj_idx *p;
2300
2301 IPFW_UH_WLOCK_ASSERT(ch);
2302
2303 for (p = oib; p < end; p++) {
2304 if (p->kidx == 0)
2305 continue;
2306
2307 rw = ipfw_find_op_rw((cmd + p->off)->opcode);
2308 KASSERT(rw != NULL, ("Unable to find handler for op %d",
2309 (cmd + p->off)->opcode));
2310
2311 /* Find & unref by existing idx */
2312 no = rw->find_bykidx(ch, p->kidx);
2313 KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx));
2314 no->refcnt--;
2315 }
2316}
2317
2318/*
2319 * Remove references from every object used in @rule.
2320 * Used at rule removal code.
2321 */
2322static void
2323unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule)
2324{
2325 int cmdlen, l;
2326 ipfw_insn *cmd;
2327 struct named_object *no;
2328 uint16_t kidx;
2329 struct opcode_obj_rewrite *rw;
2330 uint8_t subtype;
2331
2332 IPFW_UH_WLOCK_ASSERT(ch);
2333
2334 l = rule->cmd_len;
2335 cmd = rule->cmd;
2336 cmdlen = 0;
2337 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2338 cmdlen = F_LEN(cmd);
2339
2340 rw = ipfw_find_op_rw(cmd->opcode);
2341 if (rw == NULL)
2342 continue;
2343 if (rw->classifier(cmd, &kidx, &subtype) != 0)
2344 continue;
2345
2346 no = rw->find_bykidx(ch, kidx);
2347
2348 KASSERT(no != NULL, ("table id %d not found", kidx));
2349 KASSERT(no->subtype == subtype,
2350 ("wrong type %d (%d) for table id %d",
2351 no->subtype, subtype, kidx));
2352 KASSERT(no->refcnt > 0, ("refcount for table %d is %d",
2353 kidx, no->refcnt));
2354
2355 no->refcnt--;
2356 }
2357}
2358
2359
2360/*
2361 * Find and reference object (if any) stored in instruction @cmd.
2362 *
2363 * Saves object info in @pidx, sets
2364 * - @found to 1 if object was found and references
2365 * - @unresolved to 1 if object should exists but not found
2366 *
2367 * Returns non-zero value in case of error.
2368 */
2369int
2370ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti,
2371 struct obj_idx *pidx, int *found, int *unresolved)
2372{
2373 struct named_object *no;
2374 struct opcode_obj_rewrite *rw;
2375 int error;
2376
2377 *found = 0;
2378 *unresolved = 0;
2379
2380 /* Check if this opcode is candidate for rewrite */
2381 rw = ipfw_find_op_rw(cmd->opcode);
2382 if (rw == NULL)
2383 return (0);
2384
2385 /* Check if we need to rewrite this opcode */
2386 if (rw->classifier(cmd, &ti->uidx, &ti->type) != 0)
2387 return (0);
2388
2389 /* Need to rewrite. Save necessary fields */
2390 pidx->uidx = ti->uidx;
2391 pidx->type = ti->type;
2392
2393 /* Try to find referenced kernel object */
2394 error = rw->find_byname(ch, ti, &no);
2395 if (error != 0)
2396 return (error);
2397 if (no == NULL) {
2398 *unresolved = 1;
2399 return (0);
2400 }
2401
2402 /* Found. bump refcount */
2403 *found = 1;
2404 no->refcnt++;
2405 pidx->kidx = no->kidx;
2406
2407 return (0);
2408}
2409
2410/*
2411 * Adds one or more rules to ipfw @chain.
2412 * Data layout (version 0)(current):
2413 * Request:
2414 * [
2415 * ip_fw3_opheader
2416 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1)
2417 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3)
2418 * ]
2419 * Reply:
2420 * [
2421 * ip_fw3_opheader
2422 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2423 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ]
2424 * ]
2425 *
2426 * Rules in reply are modified to store their actual ruleset number.
2427 *
2428 * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending
2429 * accoring to their idx field and there has to be no duplicates.
2430 * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending.
2431 * (*3) Each ip_fw structure needs to be aligned to u64 boundary.
2432 *
2433 * Returns 0 on success.
2434 */
2435static int
2436add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2437 struct sockopt_data *sd)
2438{
2439 ipfw_obj_ctlv *ctlv, *rtlv, *tstate;
2440 ipfw_obj_ntlv *ntlv;
2441 int clen, error, idx;
2442 uint32_t count, read;
2443 struct ip_fw_rule *r;
2444 struct rule_check_info rci, *ci, *cbuf;
2445 int i, rsize;
2446
2447 op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize);
2448 ctlv = (ipfw_obj_ctlv *)(op3 + 1);
2449
2450 read = sizeof(ip_fw3_opheader);
2451 rtlv = NULL;
2452 tstate = NULL;
2453 cbuf = NULL;
2454 memset(&rci, 0, sizeof(struct rule_check_info));
2455
2456 if (read + sizeof(*ctlv) > sd->valsize)
2457 return (EINVAL);
2458
2459 if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
2460 clen = ctlv->head.length;
2461 /* Check size and alignment */
2462 if (clen > sd->valsize || clen < sizeof(*ctlv))
2463 return (EINVAL);
2464 if ((clen % sizeof(uint64_t)) != 0)
2465 return (EINVAL);
2466
2467 /*
2468 * Some table names or other named objects.
2469 * Check for validness.
2470 */
2471 count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv);
2472 if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv))
2473 return (EINVAL);
2474
2475 /*
2476 * Check each TLV.
2477 * Ensure TLVs are sorted ascending and
2478 * there are no duplicates.
2479 */
2480 idx = -1;
2481 ntlv = (ipfw_obj_ntlv *)(ctlv + 1);
2482 while (count > 0) {
2483 if (ntlv->head.length != sizeof(ipfw_obj_ntlv))
2484 return (EINVAL);
2485
2169 return (0);
2170}
2171
2172/*
2173 * Creates non-existent objects referenced by rule.
2174 *
2175 * Return 0 on success.
2176 */
2177int
2178create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd,
2179 struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti)
2180{
2181 struct opcode_obj_rewrite *rw;
2182 struct obj_idx *p;
2183 uint16_t kidx;
2184 int error;
2185
2186 /*
2187 * Compatibility stuff: do actual creation for non-existing,
2188 * but referenced objects.
2189 */
2190 for (p = oib; p < pidx; p++) {
2191 if (p->kidx != 0)
2192 continue;
2193
2194 ti->uidx = p->uidx;
2195 ti->type = p->type;
2196 ti->atype = 0;
2197
2198 rw = ipfw_find_op_rw((cmd + p->off)->opcode);
2199 KASSERT(rw != NULL, ("Unable to find handler for op %d",
2200 (cmd + p->off)->opcode));
2201
2202 error = rw->create_object(ch, ti, &kidx);
2203 if (error == 0) {
2204 p->kidx = kidx;
2205 continue;
2206 }
2207
2208 /*
2209 * Error happened. We have to rollback everything.
2210 * Drop all already acquired references.
2211 */
2212 IPFW_UH_WLOCK(ch);
2213 unref_oib_objects(ch, cmd, oib, pidx);
2214 IPFW_UH_WUNLOCK(ch);
2215
2216 return (error);
2217 }
2218
2219 return (0);
2220}
2221
2222/*
2223 * Compatibility function for old ipfw(8) binaries.
2224 * Rewrites table/nat kernel indices with userland ones.
2225 * Convert tables matching '/^\d+$/' to their atoi() value.
2226 * Use number 65535 for other tables.
2227 *
2228 * Returns 0 on success.
2229 */
2230static int
2231set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule)
2232{
2233 int cmdlen, error, l;
2234 ipfw_insn *cmd;
2235 uint16_t kidx, uidx;
2236 struct named_object *no;
2237 struct opcode_obj_rewrite *rw;
2238 uint8_t subtype;
2239 char *end;
2240 long val;
2241
2242 error = 0;
2243
2244 l = rule->cmd_len;
2245 cmd = rule->cmd;
2246 cmdlen = 0;
2247 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2248 cmdlen = F_LEN(cmd);
2249
2250 rw = ipfw_find_op_rw(cmd->opcode);
2251 if (rw == NULL)
2252 continue;
2253
2254 /* Check if is index in given opcode */
2255 if (rw->classifier(cmd, &kidx, &subtype) != 0)
2256 continue;
2257
2258 /* Try to find referenced kernel object */
2259 no = rw->find_bykidx(ch, kidx);
2260 if (no == NULL)
2261 continue;
2262
2263 val = strtol(no->name, &end, 10);
2264 if (*end == '\0' && val < 65535) {
2265 uidx = val;
2266 } else {
2267
2268 /*
2269 * We are called via legacy opcode.
2270 * Save error and show table as fake number
2271 * not to make ipfw(8) hang.
2272 */
2273 uidx = 65535;
2274 error = 2;
2275 }
2276
2277 rw->update(cmd, uidx);
2278 }
2279
2280 return (error);
2281}
2282
2283
2284/*
2285 * Unreferences all already-referenced objects in given @cmd rule,
2286 * using information in @oib.
2287 *
2288 * Used to rollback partially converted rule on error.
2289 */
2290void
2291unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib,
2292 struct obj_idx *end)
2293{
2294 struct opcode_obj_rewrite *rw;
2295 struct named_object *no;
2296 struct obj_idx *p;
2297
2298 IPFW_UH_WLOCK_ASSERT(ch);
2299
2300 for (p = oib; p < end; p++) {
2301 if (p->kidx == 0)
2302 continue;
2303
2304 rw = ipfw_find_op_rw((cmd + p->off)->opcode);
2305 KASSERT(rw != NULL, ("Unable to find handler for op %d",
2306 (cmd + p->off)->opcode));
2307
2308 /* Find & unref by existing idx */
2309 no = rw->find_bykidx(ch, p->kidx);
2310 KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx));
2311 no->refcnt--;
2312 }
2313}
2314
2315/*
2316 * Remove references from every object used in @rule.
2317 * Used at rule removal code.
2318 */
2319static void
2320unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule)
2321{
2322 int cmdlen, l;
2323 ipfw_insn *cmd;
2324 struct named_object *no;
2325 uint16_t kidx;
2326 struct opcode_obj_rewrite *rw;
2327 uint8_t subtype;
2328
2329 IPFW_UH_WLOCK_ASSERT(ch);
2330
2331 l = rule->cmd_len;
2332 cmd = rule->cmd;
2333 cmdlen = 0;
2334 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2335 cmdlen = F_LEN(cmd);
2336
2337 rw = ipfw_find_op_rw(cmd->opcode);
2338 if (rw == NULL)
2339 continue;
2340 if (rw->classifier(cmd, &kidx, &subtype) != 0)
2341 continue;
2342
2343 no = rw->find_bykidx(ch, kidx);
2344
2345 KASSERT(no != NULL, ("table id %d not found", kidx));
2346 KASSERT(no->subtype == subtype,
2347 ("wrong type %d (%d) for table id %d",
2348 no->subtype, subtype, kidx));
2349 KASSERT(no->refcnt > 0, ("refcount for table %d is %d",
2350 kidx, no->refcnt));
2351
2352 no->refcnt--;
2353 }
2354}
2355
2356
2357/*
2358 * Find and reference object (if any) stored in instruction @cmd.
2359 *
2360 * Saves object info in @pidx, sets
2361 * - @found to 1 if object was found and references
2362 * - @unresolved to 1 if object should exists but not found
2363 *
2364 * Returns non-zero value in case of error.
2365 */
2366int
2367ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti,
2368 struct obj_idx *pidx, int *found, int *unresolved)
2369{
2370 struct named_object *no;
2371 struct opcode_obj_rewrite *rw;
2372 int error;
2373
2374 *found = 0;
2375 *unresolved = 0;
2376
2377 /* Check if this opcode is candidate for rewrite */
2378 rw = ipfw_find_op_rw(cmd->opcode);
2379 if (rw == NULL)
2380 return (0);
2381
2382 /* Check if we need to rewrite this opcode */
2383 if (rw->classifier(cmd, &ti->uidx, &ti->type) != 0)
2384 return (0);
2385
2386 /* Need to rewrite. Save necessary fields */
2387 pidx->uidx = ti->uidx;
2388 pidx->type = ti->type;
2389
2390 /* Try to find referenced kernel object */
2391 error = rw->find_byname(ch, ti, &no);
2392 if (error != 0)
2393 return (error);
2394 if (no == NULL) {
2395 *unresolved = 1;
2396 return (0);
2397 }
2398
2399 /* Found. bump refcount */
2400 *found = 1;
2401 no->refcnt++;
2402 pidx->kidx = no->kidx;
2403
2404 return (0);
2405}
2406
2407/*
2408 * Adds one or more rules to ipfw @chain.
2409 * Data layout (version 0)(current):
2410 * Request:
2411 * [
2412 * ip_fw3_opheader
2413 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1)
2414 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3)
2415 * ]
2416 * Reply:
2417 * [
2418 * ip_fw3_opheader
2419 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2420 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ]
2421 * ]
2422 *
2423 * Rules in reply are modified to store their actual ruleset number.
2424 *
2425 * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending
2426 * accoring to their idx field and there has to be no duplicates.
2427 * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending.
2428 * (*3) Each ip_fw structure needs to be aligned to u64 boundary.
2429 *
2430 * Returns 0 on success.
2431 */
2432static int
2433add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2434 struct sockopt_data *sd)
2435{
2436 ipfw_obj_ctlv *ctlv, *rtlv, *tstate;
2437 ipfw_obj_ntlv *ntlv;
2438 int clen, error, idx;
2439 uint32_t count, read;
2440 struct ip_fw_rule *r;
2441 struct rule_check_info rci, *ci, *cbuf;
2442 int i, rsize;
2443
2444 op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize);
2445 ctlv = (ipfw_obj_ctlv *)(op3 + 1);
2446
2447 read = sizeof(ip_fw3_opheader);
2448 rtlv = NULL;
2449 tstate = NULL;
2450 cbuf = NULL;
2451 memset(&rci, 0, sizeof(struct rule_check_info));
2452
2453 if (read + sizeof(*ctlv) > sd->valsize)
2454 return (EINVAL);
2455
2456 if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
2457 clen = ctlv->head.length;
2458 /* Check size and alignment */
2459 if (clen > sd->valsize || clen < sizeof(*ctlv))
2460 return (EINVAL);
2461 if ((clen % sizeof(uint64_t)) != 0)
2462 return (EINVAL);
2463
2464 /*
2465 * Some table names or other named objects.
2466 * Check for validness.
2467 */
2468 count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv);
2469 if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv))
2470 return (EINVAL);
2471
2472 /*
2473 * Check each TLV.
2474 * Ensure TLVs are sorted ascending and
2475 * there are no duplicates.
2476 */
2477 idx = -1;
2478 ntlv = (ipfw_obj_ntlv *)(ctlv + 1);
2479 while (count > 0) {
2480 if (ntlv->head.length != sizeof(ipfw_obj_ntlv))
2481 return (EINVAL);
2482
2486 error = check_object_name(ntlv);
2483 error = ipfw_check_object_name_generic(ntlv->name);
2487 if (error != 0)
2488 return (error);
2489
2490 if (ntlv->idx <= idx)
2491 return (EINVAL);
2492
2493 idx = ntlv->idx;
2494 count--;
2495 ntlv++;
2496 }
2497
2498 tstate = ctlv;
2499 read += ctlv->head.length;
2500 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2501 }
2502
2503 if (read + sizeof(*ctlv) > sd->valsize)
2504 return (EINVAL);
2505
2506 if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
2507 clen = ctlv->head.length;
2508 if (clen + read > sd->valsize || clen < sizeof(*ctlv))
2509 return (EINVAL);
2510 if ((clen % sizeof(uint64_t)) != 0)
2511 return (EINVAL);
2512
2513 /*
2514 * TODO: Permit adding multiple rules at once
2515 */
2516 if (ctlv->count != 1)
2517 return (ENOTSUP);
2518
2519 clen -= sizeof(*ctlv);
2520
2521 if (ctlv->count > clen / sizeof(struct ip_fw_rule))
2522 return (EINVAL);
2523
2524 /* Allocate state for each rule or use stack */
2525 if (ctlv->count == 1) {
2526 memset(&rci, 0, sizeof(struct rule_check_info));
2527 cbuf = &rci;
2528 } else
2529 cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP,
2530 M_WAITOK | M_ZERO);
2531 ci = cbuf;
2532
2533 /*
2534 * Check each rule for validness.
2535 * Ensure numbered rules are sorted ascending
2536 * and properly aligned
2537 */
2538 idx = 0;
2539 r = (struct ip_fw_rule *)(ctlv + 1);
2540 count = 0;
2541 error = 0;
2542 while (clen > 0) {
2543 rsize = roundup2(RULESIZE(r), sizeof(uint64_t));
2544 if (rsize > clen || ctlv->count <= count) {
2545 error = EINVAL;
2546 break;
2547 }
2548
2549 ci->ctlv = tstate;
2550 error = check_ipfw_rule1(r, rsize, ci);
2551 if (error != 0)
2552 break;
2553
2554 /* Check sorting */
2555 if (r->rulenum != 0 && r->rulenum < idx) {
2556 printf("rulenum %d idx %d\n", r->rulenum, idx);
2557 error = EINVAL;
2558 break;
2559 }
2560 idx = r->rulenum;
2561
2562 ci->urule = (caddr_t)r;
2563
2564 rsize = roundup2(rsize, sizeof(uint64_t));
2565 clen -= rsize;
2566 r = (struct ip_fw_rule *)((caddr_t)r + rsize);
2567 count++;
2568 ci++;
2569 }
2570
2571 if (ctlv->count != count || error != 0) {
2572 if (cbuf != &rci)
2573 free(cbuf, M_TEMP);
2574 return (EINVAL);
2575 }
2576
2577 rtlv = ctlv;
2578 read += ctlv->head.length;
2579 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2580 }
2581
2582 if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) {
2583 if (cbuf != NULL && cbuf != &rci)
2584 free(cbuf, M_TEMP);
2585 return (EINVAL);
2586 }
2587
2588 /*
2589 * Passed rules seems to be valid.
2590 * Allocate storage and try to add them to chain.
2591 */
2592 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) {
2593 clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule);
2594 ci->krule = ipfw_alloc_rule(chain, clen);
2595 import_rule1(ci);
2596 }
2597
2598 if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) {
2599 /* Free allocate krules */
2600 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++)
2601 free(ci->krule, M_IPFW);
2602 }
2603
2604 if (cbuf != NULL && cbuf != &rci)
2605 free(cbuf, M_TEMP);
2606
2607 return (error);
2608}
2609
2610/*
2611 * Lists all sopts currently registered.
2612 * Data layout (v0)(current):
2613 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
2614 * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ]
2615 *
2616 * Returns 0 on success
2617 */
2618static int
2619dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2620 struct sockopt_data *sd)
2621{
2622 struct _ipfw_obj_lheader *olh;
2623 ipfw_sopt_info *i;
2624 struct ipfw_sopt_handler *sh;
2625 uint32_t count, n, size;
2626
2627 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
2628 if (olh == NULL)
2629 return (EINVAL);
2630 if (sd->valsize < olh->size)
2631 return (EINVAL);
2632
2633 CTL3_LOCK();
2634 count = ctl3_hsize;
2635 size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader);
2636
2637 /* Fill in header regadless of buffer size */
2638 olh->count = count;
2639 olh->objsize = sizeof(ipfw_sopt_info);
2640
2641 if (size > olh->size) {
2642 olh->size = size;
2643 CTL3_UNLOCK();
2644 return (ENOMEM);
2645 }
2646 olh->size = size;
2647
2648 for (n = 1; n <= count; n++) {
2649 i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i));
2650 KASSERT(i != 0, ("previously checked buffer is not enough"));
2651 sh = &ctl3_handlers[n];
2652 i->opcode = sh->opcode;
2653 i->version = sh->version;
2654 i->refcnt = sh->refcnt;
2655 }
2656 CTL3_UNLOCK();
2657
2658 return (0);
2659}
2660
2661/*
2662 * Compares two opcodes.
2663 * Used both in qsort() and bsearch().
2664 *
2665 * Returns 0 if match is found.
2666 */
2667static int
2668compare_opcodes(const void *_a, const void *_b)
2669{
2670 const struct opcode_obj_rewrite *a, *b;
2671
2672 a = (const struct opcode_obj_rewrite *)_a;
2673 b = (const struct opcode_obj_rewrite *)_b;
2674
2675 if (a->opcode < b->opcode)
2676 return (-1);
2677 else if (a->opcode > b->opcode)
2678 return (1);
2679
2680 return (0);
2681}
2682
2683/*
2684 * Finds opcode object rewriter based on @code.
2685 *
2686 * Returns pointer to handler or NULL.
2687 */
2688struct opcode_obj_rewrite *
2689ipfw_find_op_rw(uint16_t opcode)
2690{
2691 struct opcode_obj_rewrite *rw, h;
2692
2693 memset(&h, 0, sizeof(h));
2694 h.opcode = opcode;
2695
2696 rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters,
2697 ctl3_rsize, sizeof(h), compare_opcodes);
2698
2699 return (rw);
2700}
2701
2702int
2703classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx)
2704{
2705 struct opcode_obj_rewrite *rw;
2706 uint8_t subtype;
2707
2708 rw = ipfw_find_op_rw(cmd->opcode);
2709 if (rw == NULL)
2710 return (1);
2711
2712 return (rw->classifier(cmd, puidx, &subtype));
2713}
2714
2715void
2716update_opcode_kidx(ipfw_insn *cmd, uint16_t idx)
2717{
2718 struct opcode_obj_rewrite *rw;
2719
2720 rw = ipfw_find_op_rw(cmd->opcode);
2721 KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode));
2722 rw->update(cmd, idx);
2723}
2724
2725void
2726ipfw_init_obj_rewriter()
2727{
2728
2729 ctl3_rewriters = NULL;
2730 ctl3_rsize = 0;
2731}
2732
2733void
2734ipfw_destroy_obj_rewriter()
2735{
2736
2737 if (ctl3_rewriters != NULL)
2738 free(ctl3_rewriters, M_IPFW);
2739 ctl3_rewriters = NULL;
2740 ctl3_rsize = 0;
2741}
2742
2743/*
2744 * Adds one or more opcode object rewrite handlers to the global array.
2745 * Function may sleep.
2746 */
2747void
2748ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
2749{
2750 size_t sz;
2751 struct opcode_obj_rewrite *tmp;
2752
2753 CTL3_LOCK();
2754
2755 for (;;) {
2756 sz = ctl3_rsize + count;
2757 CTL3_UNLOCK();
2758 tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO);
2759 CTL3_LOCK();
2760 if (ctl3_rsize + count <= sz)
2761 break;
2762
2763 /* Retry */
2764 free(tmp, M_IPFW);
2765 }
2766
2767 /* Merge old & new arrays */
2768 sz = ctl3_rsize + count;
2769 memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw));
2770 memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw));
2771 qsort(tmp, sz, sizeof(*rw), compare_opcodes);
2772 /* Switch new and free old */
2773 if (ctl3_rewriters != NULL)
2774 free(ctl3_rewriters, M_IPFW);
2775 ctl3_rewriters = tmp;
2776 ctl3_rsize = sz;
2777
2778 CTL3_UNLOCK();
2779}
2780
2781/*
2782 * Removes one or more object rewrite handlers from the global array.
2783 */
2784int
2785ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
2786{
2787 size_t sz;
2788 struct opcode_obj_rewrite *tmp, *h;
2789 int i;
2790
2791 CTL3_LOCK();
2792
2793 for (i = 0; i < count; i++) {
2794 tmp = &rw[i];
2795 h = ipfw_find_op_rw(tmp->opcode);
2796 if (h == NULL)
2797 continue;
2798
2799 sz = (ctl3_rewriters + ctl3_rsize - (h + 1)) * sizeof(*h);
2800 memmove(h, h + 1, sz);
2801 ctl3_rsize--;
2802 }
2803
2804 if (ctl3_rsize == 0) {
2805 if (ctl3_rewriters != NULL)
2806 free(ctl3_rewriters, M_IPFW);
2807 ctl3_rewriters = NULL;
2808 }
2809
2810 CTL3_UNLOCK();
2811
2812 return (0);
2813}
2814
2815static void
2816export_objhash_ntlv_internal(struct namedobj_instance *ni,
2817 struct named_object *no, void *arg)
2818{
2819 struct sockopt_data *sd;
2820 ipfw_obj_ntlv *ntlv;
2821
2822 sd = (struct sockopt_data *)arg;
2823 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
2824 if (ntlv == NULL)
2825 return;
2826 ipfw_export_obj_ntlv(no, ntlv);
2827}
2828
2829/*
2830 * Lists all service objects.
2831 * Data layout (v0)(current):
2832 * Request: [ ipfw_obj_lheader ] size = ipfw_cfg_lheader.size
2833 * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ]
2834 * Returns 0 on success
2835 */
2836static int
2837dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2838 struct sockopt_data *sd)
2839{
2840 ipfw_obj_lheader *hdr;
2841 int count;
2842
2843 hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
2844 if (hdr == NULL)
2845 return (EINVAL);
2846
2847 IPFW_UH_RLOCK(chain);
2848 count = ipfw_objhash_count(CHAIN_TO_SRV(chain));
2849 hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv);
2850 if (sd->valsize < hdr->size) {
2851 IPFW_UH_RUNLOCK(chain);
2852 return (ENOMEM);
2853 }
2854 hdr->count = count;
2855 hdr->objsize = sizeof(ipfw_obj_ntlv);
2856 if (count > 0)
2857 ipfw_objhash_foreach(CHAIN_TO_SRV(chain),
2858 export_objhash_ntlv_internal, sd);
2859 IPFW_UH_RUNLOCK(chain);
2860 return (0);
2861}
2862
2863/*
2864 * Compares two sopt handlers (code, version and handler ptr).
2865 * Used both as qsort() and bsearch().
2866 * Does not compare handler for latter case.
2867 *
2868 * Returns 0 if match is found.
2869 */
2870static int
2871compare_sh(const void *_a, const void *_b)
2872{
2873 const struct ipfw_sopt_handler *a, *b;
2874
2875 a = (const struct ipfw_sopt_handler *)_a;
2876 b = (const struct ipfw_sopt_handler *)_b;
2877
2878 if (a->opcode < b->opcode)
2879 return (-1);
2880 else if (a->opcode > b->opcode)
2881 return (1);
2882
2883 if (a->version < b->version)
2884 return (-1);
2885 else if (a->version > b->version)
2886 return (1);
2887
2888 /* bsearch helper */
2889 if (a->handler == NULL)
2890 return (0);
2891
2892 if ((uintptr_t)a->handler < (uintptr_t)b->handler)
2893 return (-1);
2894 else if ((uintptr_t)b->handler > (uintptr_t)b->handler)
2895 return (1);
2896
2897 return (0);
2898}
2899
2900/*
2901 * Finds sopt handler based on @code and @version.
2902 *
2903 * Returns pointer to handler or NULL.
2904 */
2905static struct ipfw_sopt_handler *
2906find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler)
2907{
2908 struct ipfw_sopt_handler *sh, h;
2909
2910 memset(&h, 0, sizeof(h));
2911 h.opcode = code;
2912 h.version = version;
2913 h.handler = handler;
2914
2915 sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers,
2916 ctl3_hsize, sizeof(h), compare_sh);
2917
2918 return (sh);
2919}
2920
2921static int
2922find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh)
2923{
2924 struct ipfw_sopt_handler *sh;
2925
2926 CTL3_LOCK();
2927 if ((sh = find_sh(opcode, version, NULL)) == NULL) {
2928 CTL3_UNLOCK();
2929 printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n",
2930 opcode, version);
2931 return (EINVAL);
2932 }
2933 sh->refcnt++;
2934 ctl3_refct++;
2935 /* Copy handler data to requested buffer */
2936 *psh = *sh;
2937 CTL3_UNLOCK();
2938
2939 return (0);
2940}
2941
2942static void
2943find_unref_sh(struct ipfw_sopt_handler *psh)
2944{
2945 struct ipfw_sopt_handler *sh;
2946
2947 CTL3_LOCK();
2948 sh = find_sh(psh->opcode, psh->version, NULL);
2949 KASSERT(sh != NULL, ("ctl3 handler disappeared"));
2950 sh->refcnt--;
2951 ctl3_refct--;
2952 CTL3_UNLOCK();
2953}
2954
2955void
2956ipfw_init_sopt_handler()
2957{
2958
2959 CTL3_LOCK_INIT();
2960 IPFW_ADD_SOPT_HANDLER(1, scodes);
2961}
2962
2963void
2964ipfw_destroy_sopt_handler()
2965{
2966
2967 IPFW_DEL_SOPT_HANDLER(1, scodes);
2968 CTL3_LOCK_DESTROY();
2969}
2970
2971/*
2972 * Adds one or more sockopt handlers to the global array.
2973 * Function may sleep.
2974 */
2975void
2976ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
2977{
2978 size_t sz;
2979 struct ipfw_sopt_handler *tmp;
2980
2981 CTL3_LOCK();
2982
2983 for (;;) {
2984 sz = ctl3_hsize + count;
2985 CTL3_UNLOCK();
2986 tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO);
2987 CTL3_LOCK();
2988 if (ctl3_hsize + count <= sz)
2989 break;
2990
2991 /* Retry */
2992 free(tmp, M_IPFW);
2993 }
2994
2995 /* Merge old & new arrays */
2996 sz = ctl3_hsize + count;
2997 memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh));
2998 memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh));
2999 qsort(tmp, sz, sizeof(*sh), compare_sh);
3000 /* Switch new and free old */
3001 if (ctl3_handlers != NULL)
3002 free(ctl3_handlers, M_IPFW);
3003 ctl3_handlers = tmp;
3004 ctl3_hsize = sz;
3005 ctl3_gencnt++;
3006
3007 CTL3_UNLOCK();
3008}
3009
3010/*
3011 * Removes one or more sockopt handlers from the global array.
3012 */
3013int
3014ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3015{
3016 size_t sz;
3017 struct ipfw_sopt_handler *tmp, *h;
3018 int i;
3019
3020 CTL3_LOCK();
3021
3022 for (i = 0; i < count; i++) {
3023 tmp = &sh[i];
3024 h = find_sh(tmp->opcode, tmp->version, tmp->handler);
3025 if (h == NULL)
3026 continue;
3027
3028 sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h);
3029 memmove(h, h + 1, sz);
3030 ctl3_hsize--;
3031 }
3032
3033 if (ctl3_hsize == 0) {
3034 if (ctl3_handlers != NULL)
3035 free(ctl3_handlers, M_IPFW);
3036 ctl3_handlers = NULL;
3037 }
3038
3039 ctl3_gencnt++;
3040
3041 CTL3_UNLOCK();
3042
3043 return (0);
3044}
3045
3046/*
3047 * Writes data accumulated in @sd to sockopt buffer.
3048 * Zeroes internal @sd buffer.
3049 */
3050static int
3051ipfw_flush_sopt_data(struct sockopt_data *sd)
3052{
3053 struct sockopt *sopt;
3054 int error;
3055 size_t sz;
3056
3057 sz = sd->koff;
3058 if (sz == 0)
3059 return (0);
3060
3061 sopt = sd->sopt;
3062
3063 if (sopt->sopt_dir == SOPT_GET) {
3064 error = copyout(sd->kbuf, sopt->sopt_val, sz);
3065 if (error != 0)
3066 return (error);
3067 }
3068
3069 memset(sd->kbuf, 0, sd->ksize);
3070 sd->ktotal += sz;
3071 sd->koff = 0;
3072 if (sd->ktotal + sd->ksize < sd->valsize)
3073 sd->kavail = sd->ksize;
3074 else
3075 sd->kavail = sd->valsize - sd->ktotal;
3076
3077 /* Update sopt buffer data */
3078 sopt->sopt_valsize = sd->ktotal;
3079 sopt->sopt_val = sd->sopt_val + sd->ktotal;
3080
3081 return (0);
3082}
3083
3084/*
3085 * Ensures that @sd buffer has contigious @neeeded number of
3086 * bytes.
3087 *
3088 * Returns pointer to requested space or NULL.
3089 */
3090caddr_t
3091ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed)
3092{
3093 int error;
3094 caddr_t addr;
3095
3096 if (sd->kavail < needed) {
3097 /*
3098 * Flush data and try another time.
3099 */
3100 error = ipfw_flush_sopt_data(sd);
3101
3102 if (sd->kavail < needed || error != 0)
3103 return (NULL);
3104 }
3105
3106 addr = sd->kbuf + sd->koff;
3107 sd->koff += needed;
3108 sd->kavail -= needed;
3109 return (addr);
3110}
3111
3112/*
3113 * Requests @needed contigious bytes from @sd buffer.
3114 * Function is used to notify subsystem that we are
3115 * interesed in first @needed bytes (request header)
3116 * and the rest buffer can be safely zeroed.
3117 *
3118 * Returns pointer to requested space or NULL.
3119 */
3120caddr_t
3121ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed)
3122{
3123 caddr_t addr;
3124
3125 if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL)
3126 return (NULL);
3127
3128 if (sd->kavail > 0)
3129 memset(sd->kbuf + sd->koff, 0, sd->kavail);
3130
3131 return (addr);
3132}
3133
3134/*
3135 * New sockopt handler.
3136 */
3137int
3138ipfw_ctl3(struct sockopt *sopt)
3139{
3140 int error, locked;
3141 size_t size, valsize;
3142 struct ip_fw_chain *chain;
3143 char xbuf[256];
3144 struct sockopt_data sdata;
3145 struct ipfw_sopt_handler h;
3146 ip_fw3_opheader *op3 = NULL;
3147
3148 error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW);
3149 if (error != 0)
3150 return (error);
3151
3152 if (sopt->sopt_name != IP_FW3)
3153 return (ipfw_ctl(sopt));
3154
3155 chain = &V_layer3_chain;
3156 error = 0;
3157
3158 /* Save original valsize before it is altered via sooptcopyin() */
3159 valsize = sopt->sopt_valsize;
3160 memset(&sdata, 0, sizeof(sdata));
3161 /* Read op3 header first to determine actual operation */
3162 op3 = (ip_fw3_opheader *)xbuf;
3163 error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3));
3164 if (error != 0)
3165 return (error);
3166 sopt->sopt_valsize = valsize;
3167
3168 /*
3169 * Find and reference command.
3170 */
3171 error = find_ref_sh(op3->opcode, op3->version, &h);
3172 if (error != 0)
3173 return (error);
3174
3175 /*
3176 * Disallow modifications in really-really secure mode, but still allow
3177 * the logging counters to be reset.
3178 */
3179 if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) {
3180 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3181 if (error != 0) {
3182 find_unref_sh(&h);
3183 return (error);
3184 }
3185 }
3186
3187 /*
3188 * Fill in sockopt_data structure that may be useful for
3189 * IP_FW3 get requests.
3190 */
3191 locked = 0;
3192 if (valsize <= sizeof(xbuf)) {
3193 /* use on-stack buffer */
3194 sdata.kbuf = xbuf;
3195 sdata.ksize = sizeof(xbuf);
3196 sdata.kavail = valsize;
3197 } else {
3198
3199 /*
3200 * Determine opcode type/buffer size:
3201 * allocate sliding-window buf for data export or
3202 * contigious buffer for special ops.
3203 */
3204 if ((h.dir & HDIR_SET) != 0) {
3205 /* Set request. Allocate contigous buffer. */
3206 if (valsize > CTL3_LARGEBUF) {
3207 find_unref_sh(&h);
3208 return (EFBIG);
3209 }
3210
3211 size = valsize;
3212 } else {
3213 /* Get request. Allocate sliding window buffer */
3214 size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF;
3215
3216 if (size < valsize) {
3217 /* We have to wire user buffer */
3218 error = vslock(sopt->sopt_val, valsize);
3219 if (error != 0)
3220 return (error);
3221 locked = 1;
3222 }
3223 }
3224
3225 sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3226 sdata.ksize = size;
3227 sdata.kavail = size;
3228 }
3229
3230 sdata.sopt = sopt;
3231 sdata.sopt_val = sopt->sopt_val;
3232 sdata.valsize = valsize;
3233
3234 /*
3235 * Copy either all request (if valsize < bsize_max)
3236 * or first bsize_max bytes to guarantee most consumers
3237 * that all necessary data has been copied).
3238 * Anyway, copy not less than sizeof(ip_fw3_opheader).
3239 */
3240 if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize,
3241 sizeof(ip_fw3_opheader))) != 0)
3242 return (error);
3243 op3 = (ip_fw3_opheader *)sdata.kbuf;
3244
3245 /* Finally, run handler */
3246 error = h.handler(chain, op3, &sdata);
3247 find_unref_sh(&h);
3248
3249 /* Flush state and free buffers */
3250 if (error == 0)
3251 error = ipfw_flush_sopt_data(&sdata);
3252 else
3253 ipfw_flush_sopt_data(&sdata);
3254
3255 if (locked != 0)
3256 vsunlock(sdata.sopt_val, valsize);
3257
3258 /* Restore original pointer and set number of bytes written */
3259 sopt->sopt_val = sdata.sopt_val;
3260 sopt->sopt_valsize = sdata.ktotal;
3261 if (sdata.kbuf != xbuf)
3262 free(sdata.kbuf, M_TEMP);
3263
3264 return (error);
3265}
3266
3267/**
3268 * {set|get}sockopt parser.
3269 */
3270int
3271ipfw_ctl(struct sockopt *sopt)
3272{
3273#define RULE_MAXSIZE (512*sizeof(u_int32_t))
3274 int error;
3275 size_t size, valsize;
3276 struct ip_fw *buf;
3277 struct ip_fw_rule0 *rule;
3278 struct ip_fw_chain *chain;
3279 u_int32_t rulenum[2];
3280 uint32_t opt;
3281 struct rule_check_info ci;
3282 IPFW_RLOCK_TRACKER;
3283
3284 chain = &V_layer3_chain;
3285 error = 0;
3286
3287 /* Save original valsize before it is altered via sooptcopyin() */
3288 valsize = sopt->sopt_valsize;
3289 opt = sopt->sopt_name;
3290
3291 /*
3292 * Disallow modifications in really-really secure mode, but still allow
3293 * the logging counters to be reset.
3294 */
3295 if (opt == IP_FW_ADD ||
3296 (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) {
3297 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3298 if (error != 0)
3299 return (error);
3300 }
3301
3302 switch (opt) {
3303 case IP_FW_GET:
3304 /*
3305 * pass up a copy of the current rules. Static rules
3306 * come first (the last of which has number IPFW_DEFAULT_RULE),
3307 * followed by a possibly empty list of dynamic rule.
3308 * The last dynamic rule has NULL in the "next" field.
3309 *
3310 * Note that the calculated size is used to bound the
3311 * amount of data returned to the user. The rule set may
3312 * change between calculating the size and returning the
3313 * data in which case we'll just return what fits.
3314 */
3315 for (;;) {
3316 int len = 0, want;
3317
3318 size = chain->static_len;
3319 size += ipfw_dyn_len();
3320 if (size >= sopt->sopt_valsize)
3321 break;
3322 buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3323 IPFW_UH_RLOCK(chain);
3324 /* check again how much space we need */
3325 want = chain->static_len + ipfw_dyn_len();
3326 if (size >= want)
3327 len = ipfw_getrules(chain, buf, size);
3328 IPFW_UH_RUNLOCK(chain);
3329 if (size >= want)
3330 error = sooptcopyout(sopt, buf, len);
3331 free(buf, M_TEMP);
3332 if (size >= want)
3333 break;
3334 }
3335 break;
3336
3337 case IP_FW_FLUSH:
3338 /* locking is done within del_entry() */
3339 error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */
3340 break;
3341
3342 case IP_FW_ADD:
3343 rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK);
3344 error = sooptcopyin(sopt, rule, RULE_MAXSIZE,
3345 sizeof(struct ip_fw7) );
3346
3347 memset(&ci, 0, sizeof(struct rule_check_info));
3348
3349 /*
3350 * If the size of commands equals RULESIZE7 then we assume
3351 * a FreeBSD7.2 binary is talking to us (set is7=1).
3352 * is7 is persistent so the next 'ipfw list' command
3353 * will use this format.
3354 * NOTE: If wrong version is guessed (this can happen if
3355 * the first ipfw command is 'ipfw [pipe] list')
3356 * the ipfw binary may crash or loop infinitly...
3357 */
3358 size = sopt->sopt_valsize;
3359 if (size == RULESIZE7(rule)) {
3360 is7 = 1;
3361 error = convert_rule_to_8(rule);
3362 if (error) {
3363 free(rule, M_TEMP);
3364 return error;
3365 }
3366 size = RULESIZE(rule);
3367 } else
3368 is7 = 0;
3369 if (error == 0)
3370 error = check_ipfw_rule0(rule, size, &ci);
3371 if (error == 0) {
3372 /* locking is done within add_rule() */
3373 struct ip_fw *krule;
3374 krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule));
3375 ci.urule = (caddr_t)rule;
3376 ci.krule = krule;
3377 import_rule0(&ci);
3378 error = commit_rules(chain, &ci, 1);
3379 if (!error && sopt->sopt_dir == SOPT_GET) {
3380 if (is7) {
3381 error = convert_rule_to_7(rule);
3382 size = RULESIZE7(rule);
3383 if (error) {
3384 free(rule, M_TEMP);
3385 return error;
3386 }
3387 }
3388 error = sooptcopyout(sopt, rule, size);
3389 }
3390 }
3391 free(rule, M_TEMP);
3392 break;
3393
3394 case IP_FW_DEL:
3395 /*
3396 * IP_FW_DEL is used for deleting single rules or sets,
3397 * and (ab)used to atomically manipulate sets. Argument size
3398 * is used to distinguish between the two:
3399 * sizeof(u_int32_t)
3400 * delete single rule or set of rules,
3401 * or reassign rules (or sets) to a different set.
3402 * 2*sizeof(u_int32_t)
3403 * atomic disable/enable sets.
3404 * first u_int32_t contains sets to be disabled,
3405 * second u_int32_t contains sets to be enabled.
3406 */
3407 error = sooptcopyin(sopt, rulenum,
3408 2*sizeof(u_int32_t), sizeof(u_int32_t));
3409 if (error)
3410 break;
3411 size = sopt->sopt_valsize;
3412 if (size == sizeof(u_int32_t) && rulenum[0] != 0) {
3413 /* delete or reassign, locking done in del_entry() */
3414 error = del_entry(chain, rulenum[0]);
3415 } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */
3416 IPFW_UH_WLOCK(chain);
3417 V_set_disable =
3418 (V_set_disable | rulenum[0]) & ~rulenum[1] &
3419 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */
3420 IPFW_UH_WUNLOCK(chain);
3421 } else
3422 error = EINVAL;
3423 break;
3424
3425 case IP_FW_ZERO:
3426 case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */
3427 rulenum[0] = 0;
3428 if (sopt->sopt_val != 0) {
3429 error = sooptcopyin(sopt, rulenum,
3430 sizeof(u_int32_t), sizeof(u_int32_t));
3431 if (error)
3432 break;
3433 }
3434 error = zero_entry(chain, rulenum[0],
3435 sopt->sopt_name == IP_FW_RESETLOG);
3436 break;
3437
3438 /*--- TABLE opcodes ---*/
3439 case IP_FW_TABLE_ADD:
3440 case IP_FW_TABLE_DEL:
3441 {
3442 ipfw_table_entry ent;
3443 struct tentry_info tei;
3444 struct tid_info ti;
3445 struct table_value v;
3446
3447 error = sooptcopyin(sopt, &ent,
3448 sizeof(ent), sizeof(ent));
3449 if (error)
3450 break;
3451
3452 memset(&tei, 0, sizeof(tei));
3453 tei.paddr = &ent.addr;
3454 tei.subtype = AF_INET;
3455 tei.masklen = ent.masklen;
3456 ipfw_import_table_value_legacy(ent.value, &v);
3457 tei.pvalue = &v;
3458 memset(&ti, 0, sizeof(ti));
3459 ti.uidx = ent.tbl;
3460 ti.type = IPFW_TABLE_CIDR;
3461
3462 error = (opt == IP_FW_TABLE_ADD) ?
3463 add_table_entry(chain, &ti, &tei, 0, 1) :
3464 del_table_entry(chain, &ti, &tei, 0, 1);
3465 }
3466 break;
3467
3468
3469 case IP_FW_TABLE_FLUSH:
3470 {
3471 u_int16_t tbl;
3472 struct tid_info ti;
3473
3474 error = sooptcopyin(sopt, &tbl,
3475 sizeof(tbl), sizeof(tbl));
3476 if (error)
3477 break;
3478 memset(&ti, 0, sizeof(ti));
3479 ti.uidx = tbl;
3480 error = flush_table(chain, &ti);
3481 }
3482 break;
3483
3484 case IP_FW_TABLE_GETSIZE:
3485 {
3486 u_int32_t tbl, cnt;
3487 struct tid_info ti;
3488
3489 if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl),
3490 sizeof(tbl))))
3491 break;
3492 memset(&ti, 0, sizeof(ti));
3493 ti.uidx = tbl;
3494 IPFW_RLOCK(chain);
3495 error = ipfw_count_table(chain, &ti, &cnt);
3496 IPFW_RUNLOCK(chain);
3497 if (error)
3498 break;
3499 error = sooptcopyout(sopt, &cnt, sizeof(cnt));
3500 }
3501 break;
3502
3503 case IP_FW_TABLE_LIST:
3504 {
3505 ipfw_table *tbl;
3506 struct tid_info ti;
3507
3508 if (sopt->sopt_valsize < sizeof(*tbl)) {
3509 error = EINVAL;
3510 break;
3511 }
3512 size = sopt->sopt_valsize;
3513 tbl = malloc(size, M_TEMP, M_WAITOK);
3514 error = sooptcopyin(sopt, tbl, size, sizeof(*tbl));
3515 if (error) {
3516 free(tbl, M_TEMP);
3517 break;
3518 }
3519 tbl->size = (size - sizeof(*tbl)) /
3520 sizeof(ipfw_table_entry);
3521 memset(&ti, 0, sizeof(ti));
3522 ti.uidx = tbl->tbl;
3523 IPFW_RLOCK(chain);
3524 error = ipfw_dump_table_legacy(chain, &ti, tbl);
3525 IPFW_RUNLOCK(chain);
3526 if (error) {
3527 free(tbl, M_TEMP);
3528 break;
3529 }
3530 error = sooptcopyout(sopt, tbl, size);
3531 free(tbl, M_TEMP);
3532 }
3533 break;
3534
3535 /*--- NAT operations are protected by the IPFW_LOCK ---*/
3536 case IP_FW_NAT_CFG:
3537 if (IPFW_NAT_LOADED)
3538 error = ipfw_nat_cfg_ptr(sopt);
3539 else {
3540 printf("IP_FW_NAT_CFG: %s\n",
3541 "ipfw_nat not present, please load it");
3542 error = EINVAL;
3543 }
3544 break;
3545
3546 case IP_FW_NAT_DEL:
3547 if (IPFW_NAT_LOADED)
3548 error = ipfw_nat_del_ptr(sopt);
3549 else {
3550 printf("IP_FW_NAT_DEL: %s\n",
3551 "ipfw_nat not present, please load it");
3552 error = EINVAL;
3553 }
3554 break;
3555
3556 case IP_FW_NAT_GET_CONFIG:
3557 if (IPFW_NAT_LOADED)
3558 error = ipfw_nat_get_cfg_ptr(sopt);
3559 else {
3560 printf("IP_FW_NAT_GET_CFG: %s\n",
3561 "ipfw_nat not present, please load it");
3562 error = EINVAL;
3563 }
3564 break;
3565
3566 case IP_FW_NAT_GET_LOG:
3567 if (IPFW_NAT_LOADED)
3568 error = ipfw_nat_get_log_ptr(sopt);
3569 else {
3570 printf("IP_FW_NAT_GET_LOG: %s\n",
3571 "ipfw_nat not present, please load it");
3572 error = EINVAL;
3573 }
3574 break;
3575
3576 default:
3577 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name);
3578 error = EINVAL;
3579 }
3580
3581 return (error);
3582#undef RULE_MAXSIZE
3583}
3584#define RULE_MAXSIZE (256*sizeof(u_int32_t))
3585
3586/* Functions to convert rules 7.2 <==> 8.0 */
3587static int
3588convert_rule_to_7(struct ip_fw_rule0 *rule)
3589{
3590 /* Used to modify original rule */
3591 struct ip_fw7 *rule7 = (struct ip_fw7 *)rule;
3592 /* copy of original rule, version 8 */
3593 struct ip_fw_rule0 *tmp;
3594
3595 /* Used to copy commands */
3596 ipfw_insn *ccmd, *dst;
3597 int ll = 0, ccmdlen = 0;
3598
3599 tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
3600 if (tmp == NULL) {
3601 return 1; //XXX error
3602 }
3603 bcopy(rule, tmp, RULE_MAXSIZE);
3604
3605 /* Copy fields */
3606 //rule7->_pad = tmp->_pad;
3607 rule7->set = tmp->set;
3608 rule7->rulenum = tmp->rulenum;
3609 rule7->cmd_len = tmp->cmd_len;
3610 rule7->act_ofs = tmp->act_ofs;
3611 rule7->next_rule = (struct ip_fw7 *)tmp->next_rule;
3612 rule7->cmd_len = tmp->cmd_len;
3613 rule7->pcnt = tmp->pcnt;
3614 rule7->bcnt = tmp->bcnt;
3615 rule7->timestamp = tmp->timestamp;
3616
3617 /* Copy commands */
3618 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ;
3619 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
3620 ccmdlen = F_LEN(ccmd);
3621
3622 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
3623
3624 if (dst->opcode > O_NAT)
3625 /* O_REASS doesn't exists in 7.2 version, so
3626 * decrement opcode if it is after O_REASS
3627 */
3628 dst->opcode--;
3629
3630 if (ccmdlen > ll) {
3631 printf("ipfw: opcode %d size truncated\n",
3632 ccmd->opcode);
3633 return EINVAL;
3634 }
3635 }
3636 free(tmp, M_TEMP);
3637
3638 return 0;
3639}
3640
3641static int
3642convert_rule_to_8(struct ip_fw_rule0 *rule)
3643{
3644 /* Used to modify original rule */
3645 struct ip_fw7 *rule7 = (struct ip_fw7 *) rule;
3646
3647 /* Used to copy commands */
3648 ipfw_insn *ccmd, *dst;
3649 int ll = 0, ccmdlen = 0;
3650
3651 /* Copy of original rule */
3652 struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
3653 if (tmp == NULL) {
3654 return 1; //XXX error
3655 }
3656
3657 bcopy(rule7, tmp, RULE_MAXSIZE);
3658
3659 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ;
3660 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
3661 ccmdlen = F_LEN(ccmd);
3662
3663 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
3664
3665 if (dst->opcode > O_NAT)
3666 /* O_REASS doesn't exists in 7.2 version, so
3667 * increment opcode if it is after O_REASS
3668 */
3669 dst->opcode++;
3670
3671 if (ccmdlen > ll) {
3672 printf("ipfw: opcode %d size truncated\n",
3673 ccmd->opcode);
3674 return EINVAL;
3675 }
3676 }
3677
3678 rule->_pad = tmp->_pad;
3679 rule->set = tmp->set;
3680 rule->rulenum = tmp->rulenum;
3681 rule->cmd_len = tmp->cmd_len;
3682 rule->act_ofs = tmp->act_ofs;
3683 rule->next_rule = (struct ip_fw *)tmp->next_rule;
3684 rule->cmd_len = tmp->cmd_len;
3685 rule->id = 0; /* XXX see if is ok = 0 */
3686 rule->pcnt = tmp->pcnt;
3687 rule->bcnt = tmp->bcnt;
3688 rule->timestamp = tmp->timestamp;
3689
3690 free (tmp, M_TEMP);
3691 return 0;
3692}
3693
3694/*
3695 * Named object api
3696 *
3697 */
3698
3699void
3700ipfw_init_srv(struct ip_fw_chain *ch)
3701{
3702
3703 ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT);
3704 ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT,
3705 M_IPFW, M_WAITOK | M_ZERO);
3706}
3707
3708void
3709ipfw_destroy_srv(struct ip_fw_chain *ch)
3710{
3711
3712 free(ch->srvstate, M_IPFW);
3713 ipfw_objhash_destroy(ch->srvmap);
3714}
3715
3716/*
3717 * Allocate new bitmask which can be used to enlarge/shrink
3718 * named instance index.
3719 */
3720void
3721ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks)
3722{
3723 size_t size;
3724 int max_blocks;
3725 u_long *idx_mask;
3726
3727 KASSERT((items % BLOCK_ITEMS) == 0,
3728 ("bitmask size needs to power of 2 and greater or equal to %zu",
3729 BLOCK_ITEMS));
3730
3731 max_blocks = items / BLOCK_ITEMS;
3732 size = items / 8;
3733 idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK);
3734 /* Mark all as free */
3735 memset(idx_mask, 0xFF, size * IPFW_MAX_SETS);
3736 *idx_mask &= ~(u_long)1; /* Skip index 0 */
3737
3738 *idx = idx_mask;
3739 *pblocks = max_blocks;
3740}
3741
3742/*
3743 * Copy current bitmask index to new one.
3744 */
3745void
3746ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks)
3747{
3748 int old_blocks, new_blocks;
3749 u_long *old_idx, *new_idx;
3750 int i;
3751
3752 old_idx = ni->idx_mask;
3753 old_blocks = ni->max_blocks;
3754 new_idx = *idx;
3755 new_blocks = *blocks;
3756
3757 for (i = 0; i < IPFW_MAX_SETS; i++) {
3758 memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i],
3759 old_blocks * sizeof(u_long));
3760 }
3761}
3762
3763/*
3764 * Swaps current @ni index with new one.
3765 */
3766void
3767ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks)
3768{
3769 int old_blocks;
3770 u_long *old_idx;
3771
3772 old_idx = ni->idx_mask;
3773 old_blocks = ni->max_blocks;
3774
3775 ni->idx_mask = *idx;
3776 ni->max_blocks = *blocks;
3777
3778 /* Save old values */
3779 *idx = old_idx;
3780 *blocks = old_blocks;
3781}
3782
3783void
3784ipfw_objhash_bitmap_free(void *idx, int blocks)
3785{
3786
3787 free(idx, M_IPFW);
3788}
3789
3790/*
3791 * Creates named hash instance.
3792 * Must be called without holding any locks.
3793 * Return pointer to new instance.
3794 */
3795struct namedobj_instance *
3796ipfw_objhash_create(uint32_t items)
3797{
3798 struct namedobj_instance *ni;
3799 int i;
3800 size_t size;
3801
3802 size = sizeof(struct namedobj_instance) +
3803 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE +
3804 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE;
3805
3806 ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO);
3807 ni->nn_size = NAMEDOBJ_HASH_SIZE;
3808 ni->nv_size = NAMEDOBJ_HASH_SIZE;
3809
3810 ni->names = (struct namedobjects_head *)(ni +1);
3811 ni->values = &ni->names[ni->nn_size];
3812
3813 for (i = 0; i < ni->nn_size; i++)
3814 TAILQ_INIT(&ni->names[i]);
3815
3816 for (i = 0; i < ni->nv_size; i++)
3817 TAILQ_INIT(&ni->values[i]);
3818
3819 /* Set default hashing/comparison functions */
3820 ni->hash_f = objhash_hash_name;
3821 ni->cmp_f = objhash_cmp_name;
3822
3823 /* Allocate bitmask separately due to possible resize */
3824 ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks);
3825
3826 return (ni);
3827}
3828
3829void
3830ipfw_objhash_destroy(struct namedobj_instance *ni)
3831{
3832
3833 free(ni->idx_mask, M_IPFW);
3834 free(ni, M_IPFW);
3835}
3836
3837void
3838ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f,
3839 objhash_cmp_f *cmp_f)
3840{
3841
3842 ni->hash_f = hash_f;
3843 ni->cmp_f = cmp_f;
3844}
3845
3846static uint32_t
3847objhash_hash_name(struct namedobj_instance *ni, void *name, uint32_t set)
3848{
3849
3850 return (fnv_32_str((char *)name, FNV1_32_INIT));
3851}
3852
3853static int
3854objhash_cmp_name(struct named_object *no, void *name, uint32_t set)
3855{
3856
3857 if ((strcmp(no->name, (char *)name) == 0) && (no->set == set))
3858 return (0);
3859
3860 return (1);
3861}
3862
3863static uint32_t
3864objhash_hash_idx(struct namedobj_instance *ni, uint32_t val)
3865{
3866 uint32_t v;
3867
3868 v = val % (ni->nv_size - 1);
3869
3870 return (v);
3871}
3872
3873struct named_object *
3874ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name)
3875{
3876 struct named_object *no;
3877 uint32_t hash;
3878
3879 hash = ni->hash_f(ni, name, set) % ni->nn_size;
3880
3881 TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
3882 if (ni->cmp_f(no, name, set) == 0)
3883 return (no);
3884 }
3885
3886 return (NULL);
3887}
3888
3889/*
3890 * Find named object by name, considering also its TLV type.
3891 */
3892struct named_object *
3893ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set,
3894 uint32_t type, char *name)
3895{
3896 struct named_object *no;
3897 uint32_t hash;
3898
3899 hash = ni->hash_f(ni, name, set) % ni->nn_size;
3900
3901 TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
3902 if (ni->cmp_f(no, name, set) == 0 && no->etlv == type)
3903 return (no);
3904 }
3905
3906 return (NULL);
3907}
3908
3909struct named_object *
3910ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx)
3911{
3912 struct named_object *no;
3913 uint32_t hash;
3914
3915 hash = objhash_hash_idx(ni, kidx);
3916
3917 TAILQ_FOREACH(no, &ni->values[hash], nv_next) {
3918 if (no->kidx == kidx)
3919 return (no);
3920 }
3921
3922 return (NULL);
3923}
3924
3925int
3926ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a,
3927 struct named_object *b)
3928{
3929
3930 if ((strcmp(a->name, b->name) == 0) && a->set == b->set)
3931 return (1);
3932
3933 return (0);
3934}
3935
3936void
3937ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no)
3938{
3939 uint32_t hash;
3940
3941 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
3942 TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next);
3943
3944 hash = objhash_hash_idx(ni, no->kidx);
3945 TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next);
3946
3947 ni->count++;
3948}
3949
3950void
3951ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no)
3952{
3953 uint32_t hash;
3954
3955 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
3956 TAILQ_REMOVE(&ni->names[hash], no, nn_next);
3957
3958 hash = objhash_hash_idx(ni, no->kidx);
3959 TAILQ_REMOVE(&ni->values[hash], no, nv_next);
3960
3961 ni->count--;
3962}
3963
3964uint32_t
3965ipfw_objhash_count(struct namedobj_instance *ni)
3966{
3967
3968 return (ni->count);
3969}
3970
3971/*
3972 * Runs @func for each found named object.
3973 * It is safe to delete objects from callback
3974 */
3975void
3976ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg)
3977{
3978 struct named_object *no, *no_tmp;
3979 int i;
3980
3981 for (i = 0; i < ni->nn_size; i++) {
3982 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp)
3983 f(ni, no, arg);
3984 }
3985}
3986
3987/*
3988 * Removes index from given set.
3989 * Returns 0 on success.
3990 */
3991int
3992ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx)
3993{
3994 u_long *mask;
3995 int i, v;
3996
3997 i = idx / BLOCK_ITEMS;
3998 v = idx % BLOCK_ITEMS;
3999
4000 if (i >= ni->max_blocks)
4001 return (1);
4002
4003 mask = &ni->idx_mask[i];
4004
4005 if ((*mask & ((u_long)1 << v)) != 0)
4006 return (1);
4007
4008 /* Mark as free */
4009 *mask |= (u_long)1 << v;
4010
4011 /* Update free offset */
4012 if (ni->free_off[0] > i)
4013 ni->free_off[0] = i;
4014
4015 return (0);
4016}
4017
4018/*
4019 * Allocate new index in given instance and stores in in @pidx.
4020 * Returns 0 on success.
4021 */
4022int
4023ipfw_objhash_alloc_idx(void *n, uint16_t *pidx)
4024{
4025 struct namedobj_instance *ni;
4026 u_long *mask;
4027 int i, off, v;
4028
4029 ni = (struct namedobj_instance *)n;
4030
4031 off = ni->free_off[0];
4032 mask = &ni->idx_mask[off];
4033
4034 for (i = off; i < ni->max_blocks; i++, mask++) {
4035 if ((v = ffsl(*mask)) == 0)
4036 continue;
4037
4038 /* Mark as busy */
4039 *mask &= ~ ((u_long)1 << (v - 1));
4040
4041 ni->free_off[0] = i;
4042
4043 v = BLOCK_ITEMS * i + v - 1;
4044
4045 *pidx = v;
4046 return (0);
4047 }
4048
4049 return (1);
4050}
4051
4052/* end of file */
2484 if (error != 0)
2485 return (error);
2486
2487 if (ntlv->idx <= idx)
2488 return (EINVAL);
2489
2490 idx = ntlv->idx;
2491 count--;
2492 ntlv++;
2493 }
2494
2495 tstate = ctlv;
2496 read += ctlv->head.length;
2497 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2498 }
2499
2500 if (read + sizeof(*ctlv) > sd->valsize)
2501 return (EINVAL);
2502
2503 if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
2504 clen = ctlv->head.length;
2505 if (clen + read > sd->valsize || clen < sizeof(*ctlv))
2506 return (EINVAL);
2507 if ((clen % sizeof(uint64_t)) != 0)
2508 return (EINVAL);
2509
2510 /*
2511 * TODO: Permit adding multiple rules at once
2512 */
2513 if (ctlv->count != 1)
2514 return (ENOTSUP);
2515
2516 clen -= sizeof(*ctlv);
2517
2518 if (ctlv->count > clen / sizeof(struct ip_fw_rule))
2519 return (EINVAL);
2520
2521 /* Allocate state for each rule or use stack */
2522 if (ctlv->count == 1) {
2523 memset(&rci, 0, sizeof(struct rule_check_info));
2524 cbuf = &rci;
2525 } else
2526 cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP,
2527 M_WAITOK | M_ZERO);
2528 ci = cbuf;
2529
2530 /*
2531 * Check each rule for validness.
2532 * Ensure numbered rules are sorted ascending
2533 * and properly aligned
2534 */
2535 idx = 0;
2536 r = (struct ip_fw_rule *)(ctlv + 1);
2537 count = 0;
2538 error = 0;
2539 while (clen > 0) {
2540 rsize = roundup2(RULESIZE(r), sizeof(uint64_t));
2541 if (rsize > clen || ctlv->count <= count) {
2542 error = EINVAL;
2543 break;
2544 }
2545
2546 ci->ctlv = tstate;
2547 error = check_ipfw_rule1(r, rsize, ci);
2548 if (error != 0)
2549 break;
2550
2551 /* Check sorting */
2552 if (r->rulenum != 0 && r->rulenum < idx) {
2553 printf("rulenum %d idx %d\n", r->rulenum, idx);
2554 error = EINVAL;
2555 break;
2556 }
2557 idx = r->rulenum;
2558
2559 ci->urule = (caddr_t)r;
2560
2561 rsize = roundup2(rsize, sizeof(uint64_t));
2562 clen -= rsize;
2563 r = (struct ip_fw_rule *)((caddr_t)r + rsize);
2564 count++;
2565 ci++;
2566 }
2567
2568 if (ctlv->count != count || error != 0) {
2569 if (cbuf != &rci)
2570 free(cbuf, M_TEMP);
2571 return (EINVAL);
2572 }
2573
2574 rtlv = ctlv;
2575 read += ctlv->head.length;
2576 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2577 }
2578
2579 if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) {
2580 if (cbuf != NULL && cbuf != &rci)
2581 free(cbuf, M_TEMP);
2582 return (EINVAL);
2583 }
2584
2585 /*
2586 * Passed rules seems to be valid.
2587 * Allocate storage and try to add them to chain.
2588 */
2589 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) {
2590 clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule);
2591 ci->krule = ipfw_alloc_rule(chain, clen);
2592 import_rule1(ci);
2593 }
2594
2595 if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) {
2596 /* Free allocate krules */
2597 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++)
2598 free(ci->krule, M_IPFW);
2599 }
2600
2601 if (cbuf != NULL && cbuf != &rci)
2602 free(cbuf, M_TEMP);
2603
2604 return (error);
2605}
2606
2607/*
2608 * Lists all sopts currently registered.
2609 * Data layout (v0)(current):
2610 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
2611 * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ]
2612 *
2613 * Returns 0 on success
2614 */
2615static int
2616dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2617 struct sockopt_data *sd)
2618{
2619 struct _ipfw_obj_lheader *olh;
2620 ipfw_sopt_info *i;
2621 struct ipfw_sopt_handler *sh;
2622 uint32_t count, n, size;
2623
2624 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
2625 if (olh == NULL)
2626 return (EINVAL);
2627 if (sd->valsize < olh->size)
2628 return (EINVAL);
2629
2630 CTL3_LOCK();
2631 count = ctl3_hsize;
2632 size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader);
2633
2634 /* Fill in header regadless of buffer size */
2635 olh->count = count;
2636 olh->objsize = sizeof(ipfw_sopt_info);
2637
2638 if (size > olh->size) {
2639 olh->size = size;
2640 CTL3_UNLOCK();
2641 return (ENOMEM);
2642 }
2643 olh->size = size;
2644
2645 for (n = 1; n <= count; n++) {
2646 i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i));
2647 KASSERT(i != 0, ("previously checked buffer is not enough"));
2648 sh = &ctl3_handlers[n];
2649 i->opcode = sh->opcode;
2650 i->version = sh->version;
2651 i->refcnt = sh->refcnt;
2652 }
2653 CTL3_UNLOCK();
2654
2655 return (0);
2656}
2657
2658/*
2659 * Compares two opcodes.
2660 * Used both in qsort() and bsearch().
2661 *
2662 * Returns 0 if match is found.
2663 */
2664static int
2665compare_opcodes(const void *_a, const void *_b)
2666{
2667 const struct opcode_obj_rewrite *a, *b;
2668
2669 a = (const struct opcode_obj_rewrite *)_a;
2670 b = (const struct opcode_obj_rewrite *)_b;
2671
2672 if (a->opcode < b->opcode)
2673 return (-1);
2674 else if (a->opcode > b->opcode)
2675 return (1);
2676
2677 return (0);
2678}
2679
2680/*
2681 * Finds opcode object rewriter based on @code.
2682 *
2683 * Returns pointer to handler or NULL.
2684 */
2685struct opcode_obj_rewrite *
2686ipfw_find_op_rw(uint16_t opcode)
2687{
2688 struct opcode_obj_rewrite *rw, h;
2689
2690 memset(&h, 0, sizeof(h));
2691 h.opcode = opcode;
2692
2693 rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters,
2694 ctl3_rsize, sizeof(h), compare_opcodes);
2695
2696 return (rw);
2697}
2698
2699int
2700classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx)
2701{
2702 struct opcode_obj_rewrite *rw;
2703 uint8_t subtype;
2704
2705 rw = ipfw_find_op_rw(cmd->opcode);
2706 if (rw == NULL)
2707 return (1);
2708
2709 return (rw->classifier(cmd, puidx, &subtype));
2710}
2711
2712void
2713update_opcode_kidx(ipfw_insn *cmd, uint16_t idx)
2714{
2715 struct opcode_obj_rewrite *rw;
2716
2717 rw = ipfw_find_op_rw(cmd->opcode);
2718 KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode));
2719 rw->update(cmd, idx);
2720}
2721
2722void
2723ipfw_init_obj_rewriter()
2724{
2725
2726 ctl3_rewriters = NULL;
2727 ctl3_rsize = 0;
2728}
2729
2730void
2731ipfw_destroy_obj_rewriter()
2732{
2733
2734 if (ctl3_rewriters != NULL)
2735 free(ctl3_rewriters, M_IPFW);
2736 ctl3_rewriters = NULL;
2737 ctl3_rsize = 0;
2738}
2739
2740/*
2741 * Adds one or more opcode object rewrite handlers to the global array.
2742 * Function may sleep.
2743 */
2744void
2745ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
2746{
2747 size_t sz;
2748 struct opcode_obj_rewrite *tmp;
2749
2750 CTL3_LOCK();
2751
2752 for (;;) {
2753 sz = ctl3_rsize + count;
2754 CTL3_UNLOCK();
2755 tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO);
2756 CTL3_LOCK();
2757 if (ctl3_rsize + count <= sz)
2758 break;
2759
2760 /* Retry */
2761 free(tmp, M_IPFW);
2762 }
2763
2764 /* Merge old & new arrays */
2765 sz = ctl3_rsize + count;
2766 memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw));
2767 memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw));
2768 qsort(tmp, sz, sizeof(*rw), compare_opcodes);
2769 /* Switch new and free old */
2770 if (ctl3_rewriters != NULL)
2771 free(ctl3_rewriters, M_IPFW);
2772 ctl3_rewriters = tmp;
2773 ctl3_rsize = sz;
2774
2775 CTL3_UNLOCK();
2776}
2777
2778/*
2779 * Removes one or more object rewrite handlers from the global array.
2780 */
2781int
2782ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
2783{
2784 size_t sz;
2785 struct opcode_obj_rewrite *tmp, *h;
2786 int i;
2787
2788 CTL3_LOCK();
2789
2790 for (i = 0; i < count; i++) {
2791 tmp = &rw[i];
2792 h = ipfw_find_op_rw(tmp->opcode);
2793 if (h == NULL)
2794 continue;
2795
2796 sz = (ctl3_rewriters + ctl3_rsize - (h + 1)) * sizeof(*h);
2797 memmove(h, h + 1, sz);
2798 ctl3_rsize--;
2799 }
2800
2801 if (ctl3_rsize == 0) {
2802 if (ctl3_rewriters != NULL)
2803 free(ctl3_rewriters, M_IPFW);
2804 ctl3_rewriters = NULL;
2805 }
2806
2807 CTL3_UNLOCK();
2808
2809 return (0);
2810}
2811
2812static void
2813export_objhash_ntlv_internal(struct namedobj_instance *ni,
2814 struct named_object *no, void *arg)
2815{
2816 struct sockopt_data *sd;
2817 ipfw_obj_ntlv *ntlv;
2818
2819 sd = (struct sockopt_data *)arg;
2820 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
2821 if (ntlv == NULL)
2822 return;
2823 ipfw_export_obj_ntlv(no, ntlv);
2824}
2825
2826/*
2827 * Lists all service objects.
2828 * Data layout (v0)(current):
2829 * Request: [ ipfw_obj_lheader ] size = ipfw_cfg_lheader.size
2830 * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ]
2831 * Returns 0 on success
2832 */
2833static int
2834dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2835 struct sockopt_data *sd)
2836{
2837 ipfw_obj_lheader *hdr;
2838 int count;
2839
2840 hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
2841 if (hdr == NULL)
2842 return (EINVAL);
2843
2844 IPFW_UH_RLOCK(chain);
2845 count = ipfw_objhash_count(CHAIN_TO_SRV(chain));
2846 hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv);
2847 if (sd->valsize < hdr->size) {
2848 IPFW_UH_RUNLOCK(chain);
2849 return (ENOMEM);
2850 }
2851 hdr->count = count;
2852 hdr->objsize = sizeof(ipfw_obj_ntlv);
2853 if (count > 0)
2854 ipfw_objhash_foreach(CHAIN_TO_SRV(chain),
2855 export_objhash_ntlv_internal, sd);
2856 IPFW_UH_RUNLOCK(chain);
2857 return (0);
2858}
2859
2860/*
2861 * Compares two sopt handlers (code, version and handler ptr).
2862 * Used both as qsort() and bsearch().
2863 * Does not compare handler for latter case.
2864 *
2865 * Returns 0 if match is found.
2866 */
2867static int
2868compare_sh(const void *_a, const void *_b)
2869{
2870 const struct ipfw_sopt_handler *a, *b;
2871
2872 a = (const struct ipfw_sopt_handler *)_a;
2873 b = (const struct ipfw_sopt_handler *)_b;
2874
2875 if (a->opcode < b->opcode)
2876 return (-1);
2877 else if (a->opcode > b->opcode)
2878 return (1);
2879
2880 if (a->version < b->version)
2881 return (-1);
2882 else if (a->version > b->version)
2883 return (1);
2884
2885 /* bsearch helper */
2886 if (a->handler == NULL)
2887 return (0);
2888
2889 if ((uintptr_t)a->handler < (uintptr_t)b->handler)
2890 return (-1);
2891 else if ((uintptr_t)b->handler > (uintptr_t)b->handler)
2892 return (1);
2893
2894 return (0);
2895}
2896
2897/*
2898 * Finds sopt handler based on @code and @version.
2899 *
2900 * Returns pointer to handler or NULL.
2901 */
2902static struct ipfw_sopt_handler *
2903find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler)
2904{
2905 struct ipfw_sopt_handler *sh, h;
2906
2907 memset(&h, 0, sizeof(h));
2908 h.opcode = code;
2909 h.version = version;
2910 h.handler = handler;
2911
2912 sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers,
2913 ctl3_hsize, sizeof(h), compare_sh);
2914
2915 return (sh);
2916}
2917
2918static int
2919find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh)
2920{
2921 struct ipfw_sopt_handler *sh;
2922
2923 CTL3_LOCK();
2924 if ((sh = find_sh(opcode, version, NULL)) == NULL) {
2925 CTL3_UNLOCK();
2926 printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n",
2927 opcode, version);
2928 return (EINVAL);
2929 }
2930 sh->refcnt++;
2931 ctl3_refct++;
2932 /* Copy handler data to requested buffer */
2933 *psh = *sh;
2934 CTL3_UNLOCK();
2935
2936 return (0);
2937}
2938
2939static void
2940find_unref_sh(struct ipfw_sopt_handler *psh)
2941{
2942 struct ipfw_sopt_handler *sh;
2943
2944 CTL3_LOCK();
2945 sh = find_sh(psh->opcode, psh->version, NULL);
2946 KASSERT(sh != NULL, ("ctl3 handler disappeared"));
2947 sh->refcnt--;
2948 ctl3_refct--;
2949 CTL3_UNLOCK();
2950}
2951
2952void
2953ipfw_init_sopt_handler()
2954{
2955
2956 CTL3_LOCK_INIT();
2957 IPFW_ADD_SOPT_HANDLER(1, scodes);
2958}
2959
2960void
2961ipfw_destroy_sopt_handler()
2962{
2963
2964 IPFW_DEL_SOPT_HANDLER(1, scodes);
2965 CTL3_LOCK_DESTROY();
2966}
2967
2968/*
2969 * Adds one or more sockopt handlers to the global array.
2970 * Function may sleep.
2971 */
2972void
2973ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
2974{
2975 size_t sz;
2976 struct ipfw_sopt_handler *tmp;
2977
2978 CTL3_LOCK();
2979
2980 for (;;) {
2981 sz = ctl3_hsize + count;
2982 CTL3_UNLOCK();
2983 tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO);
2984 CTL3_LOCK();
2985 if (ctl3_hsize + count <= sz)
2986 break;
2987
2988 /* Retry */
2989 free(tmp, M_IPFW);
2990 }
2991
2992 /* Merge old & new arrays */
2993 sz = ctl3_hsize + count;
2994 memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh));
2995 memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh));
2996 qsort(tmp, sz, sizeof(*sh), compare_sh);
2997 /* Switch new and free old */
2998 if (ctl3_handlers != NULL)
2999 free(ctl3_handlers, M_IPFW);
3000 ctl3_handlers = tmp;
3001 ctl3_hsize = sz;
3002 ctl3_gencnt++;
3003
3004 CTL3_UNLOCK();
3005}
3006
3007/*
3008 * Removes one or more sockopt handlers from the global array.
3009 */
3010int
3011ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3012{
3013 size_t sz;
3014 struct ipfw_sopt_handler *tmp, *h;
3015 int i;
3016
3017 CTL3_LOCK();
3018
3019 for (i = 0; i < count; i++) {
3020 tmp = &sh[i];
3021 h = find_sh(tmp->opcode, tmp->version, tmp->handler);
3022 if (h == NULL)
3023 continue;
3024
3025 sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h);
3026 memmove(h, h + 1, sz);
3027 ctl3_hsize--;
3028 }
3029
3030 if (ctl3_hsize == 0) {
3031 if (ctl3_handlers != NULL)
3032 free(ctl3_handlers, M_IPFW);
3033 ctl3_handlers = NULL;
3034 }
3035
3036 ctl3_gencnt++;
3037
3038 CTL3_UNLOCK();
3039
3040 return (0);
3041}
3042
3043/*
3044 * Writes data accumulated in @sd to sockopt buffer.
3045 * Zeroes internal @sd buffer.
3046 */
3047static int
3048ipfw_flush_sopt_data(struct sockopt_data *sd)
3049{
3050 struct sockopt *sopt;
3051 int error;
3052 size_t sz;
3053
3054 sz = sd->koff;
3055 if (sz == 0)
3056 return (0);
3057
3058 sopt = sd->sopt;
3059
3060 if (sopt->sopt_dir == SOPT_GET) {
3061 error = copyout(sd->kbuf, sopt->sopt_val, sz);
3062 if (error != 0)
3063 return (error);
3064 }
3065
3066 memset(sd->kbuf, 0, sd->ksize);
3067 sd->ktotal += sz;
3068 sd->koff = 0;
3069 if (sd->ktotal + sd->ksize < sd->valsize)
3070 sd->kavail = sd->ksize;
3071 else
3072 sd->kavail = sd->valsize - sd->ktotal;
3073
3074 /* Update sopt buffer data */
3075 sopt->sopt_valsize = sd->ktotal;
3076 sopt->sopt_val = sd->sopt_val + sd->ktotal;
3077
3078 return (0);
3079}
3080
3081/*
3082 * Ensures that @sd buffer has contigious @neeeded number of
3083 * bytes.
3084 *
3085 * Returns pointer to requested space or NULL.
3086 */
3087caddr_t
3088ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed)
3089{
3090 int error;
3091 caddr_t addr;
3092
3093 if (sd->kavail < needed) {
3094 /*
3095 * Flush data and try another time.
3096 */
3097 error = ipfw_flush_sopt_data(sd);
3098
3099 if (sd->kavail < needed || error != 0)
3100 return (NULL);
3101 }
3102
3103 addr = sd->kbuf + sd->koff;
3104 sd->koff += needed;
3105 sd->kavail -= needed;
3106 return (addr);
3107}
3108
3109/*
3110 * Requests @needed contigious bytes from @sd buffer.
3111 * Function is used to notify subsystem that we are
3112 * interesed in first @needed bytes (request header)
3113 * and the rest buffer can be safely zeroed.
3114 *
3115 * Returns pointer to requested space or NULL.
3116 */
3117caddr_t
3118ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed)
3119{
3120 caddr_t addr;
3121
3122 if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL)
3123 return (NULL);
3124
3125 if (sd->kavail > 0)
3126 memset(sd->kbuf + sd->koff, 0, sd->kavail);
3127
3128 return (addr);
3129}
3130
3131/*
3132 * New sockopt handler.
3133 */
3134int
3135ipfw_ctl3(struct sockopt *sopt)
3136{
3137 int error, locked;
3138 size_t size, valsize;
3139 struct ip_fw_chain *chain;
3140 char xbuf[256];
3141 struct sockopt_data sdata;
3142 struct ipfw_sopt_handler h;
3143 ip_fw3_opheader *op3 = NULL;
3144
3145 error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW);
3146 if (error != 0)
3147 return (error);
3148
3149 if (sopt->sopt_name != IP_FW3)
3150 return (ipfw_ctl(sopt));
3151
3152 chain = &V_layer3_chain;
3153 error = 0;
3154
3155 /* Save original valsize before it is altered via sooptcopyin() */
3156 valsize = sopt->sopt_valsize;
3157 memset(&sdata, 0, sizeof(sdata));
3158 /* Read op3 header first to determine actual operation */
3159 op3 = (ip_fw3_opheader *)xbuf;
3160 error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3));
3161 if (error != 0)
3162 return (error);
3163 sopt->sopt_valsize = valsize;
3164
3165 /*
3166 * Find and reference command.
3167 */
3168 error = find_ref_sh(op3->opcode, op3->version, &h);
3169 if (error != 0)
3170 return (error);
3171
3172 /*
3173 * Disallow modifications in really-really secure mode, but still allow
3174 * the logging counters to be reset.
3175 */
3176 if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) {
3177 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3178 if (error != 0) {
3179 find_unref_sh(&h);
3180 return (error);
3181 }
3182 }
3183
3184 /*
3185 * Fill in sockopt_data structure that may be useful for
3186 * IP_FW3 get requests.
3187 */
3188 locked = 0;
3189 if (valsize <= sizeof(xbuf)) {
3190 /* use on-stack buffer */
3191 sdata.kbuf = xbuf;
3192 sdata.ksize = sizeof(xbuf);
3193 sdata.kavail = valsize;
3194 } else {
3195
3196 /*
3197 * Determine opcode type/buffer size:
3198 * allocate sliding-window buf for data export or
3199 * contigious buffer for special ops.
3200 */
3201 if ((h.dir & HDIR_SET) != 0) {
3202 /* Set request. Allocate contigous buffer. */
3203 if (valsize > CTL3_LARGEBUF) {
3204 find_unref_sh(&h);
3205 return (EFBIG);
3206 }
3207
3208 size = valsize;
3209 } else {
3210 /* Get request. Allocate sliding window buffer */
3211 size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF;
3212
3213 if (size < valsize) {
3214 /* We have to wire user buffer */
3215 error = vslock(sopt->sopt_val, valsize);
3216 if (error != 0)
3217 return (error);
3218 locked = 1;
3219 }
3220 }
3221
3222 sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3223 sdata.ksize = size;
3224 sdata.kavail = size;
3225 }
3226
3227 sdata.sopt = sopt;
3228 sdata.sopt_val = sopt->sopt_val;
3229 sdata.valsize = valsize;
3230
3231 /*
3232 * Copy either all request (if valsize < bsize_max)
3233 * or first bsize_max bytes to guarantee most consumers
3234 * that all necessary data has been copied).
3235 * Anyway, copy not less than sizeof(ip_fw3_opheader).
3236 */
3237 if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize,
3238 sizeof(ip_fw3_opheader))) != 0)
3239 return (error);
3240 op3 = (ip_fw3_opheader *)sdata.kbuf;
3241
3242 /* Finally, run handler */
3243 error = h.handler(chain, op3, &sdata);
3244 find_unref_sh(&h);
3245
3246 /* Flush state and free buffers */
3247 if (error == 0)
3248 error = ipfw_flush_sopt_data(&sdata);
3249 else
3250 ipfw_flush_sopt_data(&sdata);
3251
3252 if (locked != 0)
3253 vsunlock(sdata.sopt_val, valsize);
3254
3255 /* Restore original pointer and set number of bytes written */
3256 sopt->sopt_val = sdata.sopt_val;
3257 sopt->sopt_valsize = sdata.ktotal;
3258 if (sdata.kbuf != xbuf)
3259 free(sdata.kbuf, M_TEMP);
3260
3261 return (error);
3262}
3263
3264/**
3265 * {set|get}sockopt parser.
3266 */
3267int
3268ipfw_ctl(struct sockopt *sopt)
3269{
3270#define RULE_MAXSIZE (512*sizeof(u_int32_t))
3271 int error;
3272 size_t size, valsize;
3273 struct ip_fw *buf;
3274 struct ip_fw_rule0 *rule;
3275 struct ip_fw_chain *chain;
3276 u_int32_t rulenum[2];
3277 uint32_t opt;
3278 struct rule_check_info ci;
3279 IPFW_RLOCK_TRACKER;
3280
3281 chain = &V_layer3_chain;
3282 error = 0;
3283
3284 /* Save original valsize before it is altered via sooptcopyin() */
3285 valsize = sopt->sopt_valsize;
3286 opt = sopt->sopt_name;
3287
3288 /*
3289 * Disallow modifications in really-really secure mode, but still allow
3290 * the logging counters to be reset.
3291 */
3292 if (opt == IP_FW_ADD ||
3293 (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) {
3294 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3295 if (error != 0)
3296 return (error);
3297 }
3298
3299 switch (opt) {
3300 case IP_FW_GET:
3301 /*
3302 * pass up a copy of the current rules. Static rules
3303 * come first (the last of which has number IPFW_DEFAULT_RULE),
3304 * followed by a possibly empty list of dynamic rule.
3305 * The last dynamic rule has NULL in the "next" field.
3306 *
3307 * Note that the calculated size is used to bound the
3308 * amount of data returned to the user. The rule set may
3309 * change between calculating the size and returning the
3310 * data in which case we'll just return what fits.
3311 */
3312 for (;;) {
3313 int len = 0, want;
3314
3315 size = chain->static_len;
3316 size += ipfw_dyn_len();
3317 if (size >= sopt->sopt_valsize)
3318 break;
3319 buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3320 IPFW_UH_RLOCK(chain);
3321 /* check again how much space we need */
3322 want = chain->static_len + ipfw_dyn_len();
3323 if (size >= want)
3324 len = ipfw_getrules(chain, buf, size);
3325 IPFW_UH_RUNLOCK(chain);
3326 if (size >= want)
3327 error = sooptcopyout(sopt, buf, len);
3328 free(buf, M_TEMP);
3329 if (size >= want)
3330 break;
3331 }
3332 break;
3333
3334 case IP_FW_FLUSH:
3335 /* locking is done within del_entry() */
3336 error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */
3337 break;
3338
3339 case IP_FW_ADD:
3340 rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK);
3341 error = sooptcopyin(sopt, rule, RULE_MAXSIZE,
3342 sizeof(struct ip_fw7) );
3343
3344 memset(&ci, 0, sizeof(struct rule_check_info));
3345
3346 /*
3347 * If the size of commands equals RULESIZE7 then we assume
3348 * a FreeBSD7.2 binary is talking to us (set is7=1).
3349 * is7 is persistent so the next 'ipfw list' command
3350 * will use this format.
3351 * NOTE: If wrong version is guessed (this can happen if
3352 * the first ipfw command is 'ipfw [pipe] list')
3353 * the ipfw binary may crash or loop infinitly...
3354 */
3355 size = sopt->sopt_valsize;
3356 if (size == RULESIZE7(rule)) {
3357 is7 = 1;
3358 error = convert_rule_to_8(rule);
3359 if (error) {
3360 free(rule, M_TEMP);
3361 return error;
3362 }
3363 size = RULESIZE(rule);
3364 } else
3365 is7 = 0;
3366 if (error == 0)
3367 error = check_ipfw_rule0(rule, size, &ci);
3368 if (error == 0) {
3369 /* locking is done within add_rule() */
3370 struct ip_fw *krule;
3371 krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule));
3372 ci.urule = (caddr_t)rule;
3373 ci.krule = krule;
3374 import_rule0(&ci);
3375 error = commit_rules(chain, &ci, 1);
3376 if (!error && sopt->sopt_dir == SOPT_GET) {
3377 if (is7) {
3378 error = convert_rule_to_7(rule);
3379 size = RULESIZE7(rule);
3380 if (error) {
3381 free(rule, M_TEMP);
3382 return error;
3383 }
3384 }
3385 error = sooptcopyout(sopt, rule, size);
3386 }
3387 }
3388 free(rule, M_TEMP);
3389 break;
3390
3391 case IP_FW_DEL:
3392 /*
3393 * IP_FW_DEL is used for deleting single rules or sets,
3394 * and (ab)used to atomically manipulate sets. Argument size
3395 * is used to distinguish between the two:
3396 * sizeof(u_int32_t)
3397 * delete single rule or set of rules,
3398 * or reassign rules (or sets) to a different set.
3399 * 2*sizeof(u_int32_t)
3400 * atomic disable/enable sets.
3401 * first u_int32_t contains sets to be disabled,
3402 * second u_int32_t contains sets to be enabled.
3403 */
3404 error = sooptcopyin(sopt, rulenum,
3405 2*sizeof(u_int32_t), sizeof(u_int32_t));
3406 if (error)
3407 break;
3408 size = sopt->sopt_valsize;
3409 if (size == sizeof(u_int32_t) && rulenum[0] != 0) {
3410 /* delete or reassign, locking done in del_entry() */
3411 error = del_entry(chain, rulenum[0]);
3412 } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */
3413 IPFW_UH_WLOCK(chain);
3414 V_set_disable =
3415 (V_set_disable | rulenum[0]) & ~rulenum[1] &
3416 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */
3417 IPFW_UH_WUNLOCK(chain);
3418 } else
3419 error = EINVAL;
3420 break;
3421
3422 case IP_FW_ZERO:
3423 case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */
3424 rulenum[0] = 0;
3425 if (sopt->sopt_val != 0) {
3426 error = sooptcopyin(sopt, rulenum,
3427 sizeof(u_int32_t), sizeof(u_int32_t));
3428 if (error)
3429 break;
3430 }
3431 error = zero_entry(chain, rulenum[0],
3432 sopt->sopt_name == IP_FW_RESETLOG);
3433 break;
3434
3435 /*--- TABLE opcodes ---*/
3436 case IP_FW_TABLE_ADD:
3437 case IP_FW_TABLE_DEL:
3438 {
3439 ipfw_table_entry ent;
3440 struct tentry_info tei;
3441 struct tid_info ti;
3442 struct table_value v;
3443
3444 error = sooptcopyin(sopt, &ent,
3445 sizeof(ent), sizeof(ent));
3446 if (error)
3447 break;
3448
3449 memset(&tei, 0, sizeof(tei));
3450 tei.paddr = &ent.addr;
3451 tei.subtype = AF_INET;
3452 tei.masklen = ent.masklen;
3453 ipfw_import_table_value_legacy(ent.value, &v);
3454 tei.pvalue = &v;
3455 memset(&ti, 0, sizeof(ti));
3456 ti.uidx = ent.tbl;
3457 ti.type = IPFW_TABLE_CIDR;
3458
3459 error = (opt == IP_FW_TABLE_ADD) ?
3460 add_table_entry(chain, &ti, &tei, 0, 1) :
3461 del_table_entry(chain, &ti, &tei, 0, 1);
3462 }
3463 break;
3464
3465
3466 case IP_FW_TABLE_FLUSH:
3467 {
3468 u_int16_t tbl;
3469 struct tid_info ti;
3470
3471 error = sooptcopyin(sopt, &tbl,
3472 sizeof(tbl), sizeof(tbl));
3473 if (error)
3474 break;
3475 memset(&ti, 0, sizeof(ti));
3476 ti.uidx = tbl;
3477 error = flush_table(chain, &ti);
3478 }
3479 break;
3480
3481 case IP_FW_TABLE_GETSIZE:
3482 {
3483 u_int32_t tbl, cnt;
3484 struct tid_info ti;
3485
3486 if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl),
3487 sizeof(tbl))))
3488 break;
3489 memset(&ti, 0, sizeof(ti));
3490 ti.uidx = tbl;
3491 IPFW_RLOCK(chain);
3492 error = ipfw_count_table(chain, &ti, &cnt);
3493 IPFW_RUNLOCK(chain);
3494 if (error)
3495 break;
3496 error = sooptcopyout(sopt, &cnt, sizeof(cnt));
3497 }
3498 break;
3499
3500 case IP_FW_TABLE_LIST:
3501 {
3502 ipfw_table *tbl;
3503 struct tid_info ti;
3504
3505 if (sopt->sopt_valsize < sizeof(*tbl)) {
3506 error = EINVAL;
3507 break;
3508 }
3509 size = sopt->sopt_valsize;
3510 tbl = malloc(size, M_TEMP, M_WAITOK);
3511 error = sooptcopyin(sopt, tbl, size, sizeof(*tbl));
3512 if (error) {
3513 free(tbl, M_TEMP);
3514 break;
3515 }
3516 tbl->size = (size - sizeof(*tbl)) /
3517 sizeof(ipfw_table_entry);
3518 memset(&ti, 0, sizeof(ti));
3519 ti.uidx = tbl->tbl;
3520 IPFW_RLOCK(chain);
3521 error = ipfw_dump_table_legacy(chain, &ti, tbl);
3522 IPFW_RUNLOCK(chain);
3523 if (error) {
3524 free(tbl, M_TEMP);
3525 break;
3526 }
3527 error = sooptcopyout(sopt, tbl, size);
3528 free(tbl, M_TEMP);
3529 }
3530 break;
3531
3532 /*--- NAT operations are protected by the IPFW_LOCK ---*/
3533 case IP_FW_NAT_CFG:
3534 if (IPFW_NAT_LOADED)
3535 error = ipfw_nat_cfg_ptr(sopt);
3536 else {
3537 printf("IP_FW_NAT_CFG: %s\n",
3538 "ipfw_nat not present, please load it");
3539 error = EINVAL;
3540 }
3541 break;
3542
3543 case IP_FW_NAT_DEL:
3544 if (IPFW_NAT_LOADED)
3545 error = ipfw_nat_del_ptr(sopt);
3546 else {
3547 printf("IP_FW_NAT_DEL: %s\n",
3548 "ipfw_nat not present, please load it");
3549 error = EINVAL;
3550 }
3551 break;
3552
3553 case IP_FW_NAT_GET_CONFIG:
3554 if (IPFW_NAT_LOADED)
3555 error = ipfw_nat_get_cfg_ptr(sopt);
3556 else {
3557 printf("IP_FW_NAT_GET_CFG: %s\n",
3558 "ipfw_nat not present, please load it");
3559 error = EINVAL;
3560 }
3561 break;
3562
3563 case IP_FW_NAT_GET_LOG:
3564 if (IPFW_NAT_LOADED)
3565 error = ipfw_nat_get_log_ptr(sopt);
3566 else {
3567 printf("IP_FW_NAT_GET_LOG: %s\n",
3568 "ipfw_nat not present, please load it");
3569 error = EINVAL;
3570 }
3571 break;
3572
3573 default:
3574 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name);
3575 error = EINVAL;
3576 }
3577
3578 return (error);
3579#undef RULE_MAXSIZE
3580}
3581#define RULE_MAXSIZE (256*sizeof(u_int32_t))
3582
3583/* Functions to convert rules 7.2 <==> 8.0 */
3584static int
3585convert_rule_to_7(struct ip_fw_rule0 *rule)
3586{
3587 /* Used to modify original rule */
3588 struct ip_fw7 *rule7 = (struct ip_fw7 *)rule;
3589 /* copy of original rule, version 8 */
3590 struct ip_fw_rule0 *tmp;
3591
3592 /* Used to copy commands */
3593 ipfw_insn *ccmd, *dst;
3594 int ll = 0, ccmdlen = 0;
3595
3596 tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
3597 if (tmp == NULL) {
3598 return 1; //XXX error
3599 }
3600 bcopy(rule, tmp, RULE_MAXSIZE);
3601
3602 /* Copy fields */
3603 //rule7->_pad = tmp->_pad;
3604 rule7->set = tmp->set;
3605 rule7->rulenum = tmp->rulenum;
3606 rule7->cmd_len = tmp->cmd_len;
3607 rule7->act_ofs = tmp->act_ofs;
3608 rule7->next_rule = (struct ip_fw7 *)tmp->next_rule;
3609 rule7->cmd_len = tmp->cmd_len;
3610 rule7->pcnt = tmp->pcnt;
3611 rule7->bcnt = tmp->bcnt;
3612 rule7->timestamp = tmp->timestamp;
3613
3614 /* Copy commands */
3615 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ;
3616 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
3617 ccmdlen = F_LEN(ccmd);
3618
3619 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
3620
3621 if (dst->opcode > O_NAT)
3622 /* O_REASS doesn't exists in 7.2 version, so
3623 * decrement opcode if it is after O_REASS
3624 */
3625 dst->opcode--;
3626
3627 if (ccmdlen > ll) {
3628 printf("ipfw: opcode %d size truncated\n",
3629 ccmd->opcode);
3630 return EINVAL;
3631 }
3632 }
3633 free(tmp, M_TEMP);
3634
3635 return 0;
3636}
3637
3638static int
3639convert_rule_to_8(struct ip_fw_rule0 *rule)
3640{
3641 /* Used to modify original rule */
3642 struct ip_fw7 *rule7 = (struct ip_fw7 *) rule;
3643
3644 /* Used to copy commands */
3645 ipfw_insn *ccmd, *dst;
3646 int ll = 0, ccmdlen = 0;
3647
3648 /* Copy of original rule */
3649 struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
3650 if (tmp == NULL) {
3651 return 1; //XXX error
3652 }
3653
3654 bcopy(rule7, tmp, RULE_MAXSIZE);
3655
3656 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ;
3657 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
3658 ccmdlen = F_LEN(ccmd);
3659
3660 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
3661
3662 if (dst->opcode > O_NAT)
3663 /* O_REASS doesn't exists in 7.2 version, so
3664 * increment opcode if it is after O_REASS
3665 */
3666 dst->opcode++;
3667
3668 if (ccmdlen > ll) {
3669 printf("ipfw: opcode %d size truncated\n",
3670 ccmd->opcode);
3671 return EINVAL;
3672 }
3673 }
3674
3675 rule->_pad = tmp->_pad;
3676 rule->set = tmp->set;
3677 rule->rulenum = tmp->rulenum;
3678 rule->cmd_len = tmp->cmd_len;
3679 rule->act_ofs = tmp->act_ofs;
3680 rule->next_rule = (struct ip_fw *)tmp->next_rule;
3681 rule->cmd_len = tmp->cmd_len;
3682 rule->id = 0; /* XXX see if is ok = 0 */
3683 rule->pcnt = tmp->pcnt;
3684 rule->bcnt = tmp->bcnt;
3685 rule->timestamp = tmp->timestamp;
3686
3687 free (tmp, M_TEMP);
3688 return 0;
3689}
3690
3691/*
3692 * Named object api
3693 *
3694 */
3695
3696void
3697ipfw_init_srv(struct ip_fw_chain *ch)
3698{
3699
3700 ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT);
3701 ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT,
3702 M_IPFW, M_WAITOK | M_ZERO);
3703}
3704
3705void
3706ipfw_destroy_srv(struct ip_fw_chain *ch)
3707{
3708
3709 free(ch->srvstate, M_IPFW);
3710 ipfw_objhash_destroy(ch->srvmap);
3711}
3712
3713/*
3714 * Allocate new bitmask which can be used to enlarge/shrink
3715 * named instance index.
3716 */
3717void
3718ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks)
3719{
3720 size_t size;
3721 int max_blocks;
3722 u_long *idx_mask;
3723
3724 KASSERT((items % BLOCK_ITEMS) == 0,
3725 ("bitmask size needs to power of 2 and greater or equal to %zu",
3726 BLOCK_ITEMS));
3727
3728 max_blocks = items / BLOCK_ITEMS;
3729 size = items / 8;
3730 idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK);
3731 /* Mark all as free */
3732 memset(idx_mask, 0xFF, size * IPFW_MAX_SETS);
3733 *idx_mask &= ~(u_long)1; /* Skip index 0 */
3734
3735 *idx = idx_mask;
3736 *pblocks = max_blocks;
3737}
3738
3739/*
3740 * Copy current bitmask index to new one.
3741 */
3742void
3743ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks)
3744{
3745 int old_blocks, new_blocks;
3746 u_long *old_idx, *new_idx;
3747 int i;
3748
3749 old_idx = ni->idx_mask;
3750 old_blocks = ni->max_blocks;
3751 new_idx = *idx;
3752 new_blocks = *blocks;
3753
3754 for (i = 0; i < IPFW_MAX_SETS; i++) {
3755 memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i],
3756 old_blocks * sizeof(u_long));
3757 }
3758}
3759
3760/*
3761 * Swaps current @ni index with new one.
3762 */
3763void
3764ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks)
3765{
3766 int old_blocks;
3767 u_long *old_idx;
3768
3769 old_idx = ni->idx_mask;
3770 old_blocks = ni->max_blocks;
3771
3772 ni->idx_mask = *idx;
3773 ni->max_blocks = *blocks;
3774
3775 /* Save old values */
3776 *idx = old_idx;
3777 *blocks = old_blocks;
3778}
3779
3780void
3781ipfw_objhash_bitmap_free(void *idx, int blocks)
3782{
3783
3784 free(idx, M_IPFW);
3785}
3786
3787/*
3788 * Creates named hash instance.
3789 * Must be called without holding any locks.
3790 * Return pointer to new instance.
3791 */
3792struct namedobj_instance *
3793ipfw_objhash_create(uint32_t items)
3794{
3795 struct namedobj_instance *ni;
3796 int i;
3797 size_t size;
3798
3799 size = sizeof(struct namedobj_instance) +
3800 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE +
3801 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE;
3802
3803 ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO);
3804 ni->nn_size = NAMEDOBJ_HASH_SIZE;
3805 ni->nv_size = NAMEDOBJ_HASH_SIZE;
3806
3807 ni->names = (struct namedobjects_head *)(ni +1);
3808 ni->values = &ni->names[ni->nn_size];
3809
3810 for (i = 0; i < ni->nn_size; i++)
3811 TAILQ_INIT(&ni->names[i]);
3812
3813 for (i = 0; i < ni->nv_size; i++)
3814 TAILQ_INIT(&ni->values[i]);
3815
3816 /* Set default hashing/comparison functions */
3817 ni->hash_f = objhash_hash_name;
3818 ni->cmp_f = objhash_cmp_name;
3819
3820 /* Allocate bitmask separately due to possible resize */
3821 ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks);
3822
3823 return (ni);
3824}
3825
3826void
3827ipfw_objhash_destroy(struct namedobj_instance *ni)
3828{
3829
3830 free(ni->idx_mask, M_IPFW);
3831 free(ni, M_IPFW);
3832}
3833
3834void
3835ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f,
3836 objhash_cmp_f *cmp_f)
3837{
3838
3839 ni->hash_f = hash_f;
3840 ni->cmp_f = cmp_f;
3841}
3842
3843static uint32_t
3844objhash_hash_name(struct namedobj_instance *ni, void *name, uint32_t set)
3845{
3846
3847 return (fnv_32_str((char *)name, FNV1_32_INIT));
3848}
3849
3850static int
3851objhash_cmp_name(struct named_object *no, void *name, uint32_t set)
3852{
3853
3854 if ((strcmp(no->name, (char *)name) == 0) && (no->set == set))
3855 return (0);
3856
3857 return (1);
3858}
3859
3860static uint32_t
3861objhash_hash_idx(struct namedobj_instance *ni, uint32_t val)
3862{
3863 uint32_t v;
3864
3865 v = val % (ni->nv_size - 1);
3866
3867 return (v);
3868}
3869
3870struct named_object *
3871ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name)
3872{
3873 struct named_object *no;
3874 uint32_t hash;
3875
3876 hash = ni->hash_f(ni, name, set) % ni->nn_size;
3877
3878 TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
3879 if (ni->cmp_f(no, name, set) == 0)
3880 return (no);
3881 }
3882
3883 return (NULL);
3884}
3885
3886/*
3887 * Find named object by name, considering also its TLV type.
3888 */
3889struct named_object *
3890ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set,
3891 uint32_t type, char *name)
3892{
3893 struct named_object *no;
3894 uint32_t hash;
3895
3896 hash = ni->hash_f(ni, name, set) % ni->nn_size;
3897
3898 TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
3899 if (ni->cmp_f(no, name, set) == 0 && no->etlv == type)
3900 return (no);
3901 }
3902
3903 return (NULL);
3904}
3905
3906struct named_object *
3907ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx)
3908{
3909 struct named_object *no;
3910 uint32_t hash;
3911
3912 hash = objhash_hash_idx(ni, kidx);
3913
3914 TAILQ_FOREACH(no, &ni->values[hash], nv_next) {
3915 if (no->kidx == kidx)
3916 return (no);
3917 }
3918
3919 return (NULL);
3920}
3921
3922int
3923ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a,
3924 struct named_object *b)
3925{
3926
3927 if ((strcmp(a->name, b->name) == 0) && a->set == b->set)
3928 return (1);
3929
3930 return (0);
3931}
3932
3933void
3934ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no)
3935{
3936 uint32_t hash;
3937
3938 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
3939 TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next);
3940
3941 hash = objhash_hash_idx(ni, no->kidx);
3942 TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next);
3943
3944 ni->count++;
3945}
3946
3947void
3948ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no)
3949{
3950 uint32_t hash;
3951
3952 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
3953 TAILQ_REMOVE(&ni->names[hash], no, nn_next);
3954
3955 hash = objhash_hash_idx(ni, no->kidx);
3956 TAILQ_REMOVE(&ni->values[hash], no, nv_next);
3957
3958 ni->count--;
3959}
3960
3961uint32_t
3962ipfw_objhash_count(struct namedobj_instance *ni)
3963{
3964
3965 return (ni->count);
3966}
3967
3968/*
3969 * Runs @func for each found named object.
3970 * It is safe to delete objects from callback
3971 */
3972void
3973ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg)
3974{
3975 struct named_object *no, *no_tmp;
3976 int i;
3977
3978 for (i = 0; i < ni->nn_size; i++) {
3979 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp)
3980 f(ni, no, arg);
3981 }
3982}
3983
3984/*
3985 * Removes index from given set.
3986 * Returns 0 on success.
3987 */
3988int
3989ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx)
3990{
3991 u_long *mask;
3992 int i, v;
3993
3994 i = idx / BLOCK_ITEMS;
3995 v = idx % BLOCK_ITEMS;
3996
3997 if (i >= ni->max_blocks)
3998 return (1);
3999
4000 mask = &ni->idx_mask[i];
4001
4002 if ((*mask & ((u_long)1 << v)) != 0)
4003 return (1);
4004
4005 /* Mark as free */
4006 *mask |= (u_long)1 << v;
4007
4008 /* Update free offset */
4009 if (ni->free_off[0] > i)
4010 ni->free_off[0] = i;
4011
4012 return (0);
4013}
4014
4015/*
4016 * Allocate new index in given instance and stores in in @pidx.
4017 * Returns 0 on success.
4018 */
4019int
4020ipfw_objhash_alloc_idx(void *n, uint16_t *pidx)
4021{
4022 struct namedobj_instance *ni;
4023 u_long *mask;
4024 int i, off, v;
4025
4026 ni = (struct namedobj_instance *)n;
4027
4028 off = ni->free_off[0];
4029 mask = &ni->idx_mask[off];
4030
4031 for (i = off; i < ni->max_blocks; i++, mask++) {
4032 if ((v = ffsl(*mask)) == 0)
4033 continue;
4034
4035 /* Mark as busy */
4036 *mask &= ~ ((u_long)1 << (v - 1));
4037
4038 ni->free_off[0] = i;
4039
4040 v = BLOCK_ITEMS * i + v - 1;
4041
4042 *pidx = v;
4043 return (0);
4044 }
4045
4046 return (1);
4047}
4048
4049/* end of file */