ip_fw_dynamic.c revision 317262
1/*-
2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: stable/11/sys/netpfil/ipfw/ip_fw_dynamic.c 317262 2017-04-21 17:09:37Z ae $");
28
29#define        DEB(x)
30#define        DDB(x) x
31
32/*
33 * Dynamic rule support for ipfw
34 */
35
36#include "opt_ipfw.h"
37#include "opt_inet.h"
38#ifndef INET
39#error IPFIREWALL requires INET.
40#endif /* INET */
41#include "opt_inet6.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/kernel.h>
48#include <sys/ktr.h>
49#include <sys/lock.h>
50#include <sys/rmlock.h>
51#include <sys/socket.h>
52#include <sys/sysctl.h>
53#include <sys/syslog.h>
54#include <net/ethernet.h> /* for ETHERTYPE_IP */
55#include <net/if.h>
56#include <net/if_var.h>
57#include <net/pfil.h>
58#include <net/vnet.h>
59
60#include <netinet/in.h>
61#include <netinet/ip.h>
62#include <netinet/ip_var.h>	/* ip_defttl */
63#include <netinet/ip_fw.h>
64#include <netinet/tcp_var.h>
65#include <netinet/udp.h>
66
67#include <netinet/ip6.h>	/* IN6_ARE_ADDR_EQUAL */
68#ifdef INET6
69#include <netinet6/in6_var.h>
70#include <netinet6/ip6_var.h>
71#endif
72
73#include <netpfil/ipfw/ip_fw_private.h>
74
75#include <machine/in_cksum.h>	/* XXX for in_cksum */
76
77#ifdef MAC
78#include <security/mac/mac_framework.h>
79#endif
80
81/*
82 * Description of dynamic rules.
83 *
84 * Dynamic rules are stored in lists accessed through a hash table
85 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
86 * be modified through the sysctl variable dyn_buckets which is
87 * updated when the table becomes empty.
88 *
89 * XXX currently there is only one list, ipfw_dyn.
90 *
91 * When a packet is received, its address fields are first masked
92 * with the mask defined for the rule, then hashed, then matched
93 * against the entries in the corresponding list.
94 * Dynamic rules can be used for different purposes:
95 *  + stateful rules;
96 *  + enforcing limits on the number of sessions;
97 *  + in-kernel NAT (not implemented yet)
98 *
99 * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
100 * measured in seconds and depending on the flags.
101 *
102 * The total number of dynamic rules is equal to UMA zone items count.
103 * The max number of dynamic rules is dyn_max. When we reach
104 * the maximum number of rules we do not create anymore. This is
105 * done to avoid consuming too much memory, but also too much
106 * time when searching on each packet (ideally, we should try instead
107 * to put a limit on the length of the list on each bucket...).
108 *
109 * Each dynamic rule holds a pointer to the parent ipfw rule so
110 * we know what action to perform. Dynamic rules are removed when
111 * the parent rule is deleted. This can be changed by dyn_keep_states
112 * sysctl.
113 *
114 * There are some limitations with dynamic rules -- we do not
115 * obey the 'randomized match', and we do not do multiple
116 * passes through the firewall. XXX check the latter!!!
117 */
118
119struct ipfw_dyn_bucket {
120	struct mtx	mtx;		/* Bucket protecting lock */
121	ipfw_dyn_rule	*head;		/* Pointer to first rule */
122};
123
124/*
125 * Static variables followed by global ones
126 */
127static VNET_DEFINE(struct ipfw_dyn_bucket *, ipfw_dyn_v);
128static VNET_DEFINE(u_int32_t, dyn_buckets_max);
129static VNET_DEFINE(u_int32_t, curr_dyn_buckets);
130static VNET_DEFINE(struct callout, ipfw_timeout);
131#define	V_ipfw_dyn_v			VNET(ipfw_dyn_v)
132#define	V_dyn_buckets_max		VNET(dyn_buckets_max)
133#define	V_curr_dyn_buckets		VNET(curr_dyn_buckets)
134#define V_ipfw_timeout                  VNET(ipfw_timeout)
135
136static VNET_DEFINE(uma_zone_t, ipfw_dyn_rule_zone);
137#define	V_ipfw_dyn_rule_zone		VNET(ipfw_dyn_rule_zone)
138
139#define	IPFW_BUCK_LOCK_INIT(b)	\
140	mtx_init(&(b)->mtx, "IPFW dynamic bucket", NULL, MTX_DEF)
141#define	IPFW_BUCK_LOCK_DESTROY(b)	\
142	mtx_destroy(&(b)->mtx)
143#define	IPFW_BUCK_LOCK(i)	mtx_lock(&V_ipfw_dyn_v[(i)].mtx)
144#define	IPFW_BUCK_UNLOCK(i)	mtx_unlock(&V_ipfw_dyn_v[(i)].mtx)
145#define	IPFW_BUCK_ASSERT(i)	mtx_assert(&V_ipfw_dyn_v[(i)].mtx, MA_OWNED)
146
147
148static VNET_DEFINE(int, dyn_keep_states);
149#define	V_dyn_keep_states		VNET(dyn_keep_states)
150
151/*
152 * Timeouts for various events in handing dynamic rules.
153 */
154static VNET_DEFINE(u_int32_t, dyn_ack_lifetime);
155static VNET_DEFINE(u_int32_t, dyn_syn_lifetime);
156static VNET_DEFINE(u_int32_t, dyn_fin_lifetime);
157static VNET_DEFINE(u_int32_t, dyn_rst_lifetime);
158static VNET_DEFINE(u_int32_t, dyn_udp_lifetime);
159static VNET_DEFINE(u_int32_t, dyn_short_lifetime);
160
161#define	V_dyn_ack_lifetime		VNET(dyn_ack_lifetime)
162#define	V_dyn_syn_lifetime		VNET(dyn_syn_lifetime)
163#define	V_dyn_fin_lifetime		VNET(dyn_fin_lifetime)
164#define	V_dyn_rst_lifetime		VNET(dyn_rst_lifetime)
165#define	V_dyn_udp_lifetime		VNET(dyn_udp_lifetime)
166#define	V_dyn_short_lifetime		VNET(dyn_short_lifetime)
167
168/*
169 * Keepalives are sent if dyn_keepalive is set. They are sent every
170 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
171 * seconds of lifetime of a rule.
172 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
173 * than dyn_keepalive_period.
174 */
175
176static VNET_DEFINE(u_int32_t, dyn_keepalive_interval);
177static VNET_DEFINE(u_int32_t, dyn_keepalive_period);
178static VNET_DEFINE(u_int32_t, dyn_keepalive);
179static VNET_DEFINE(time_t, dyn_keepalive_last);
180
181#define	V_dyn_keepalive_interval	VNET(dyn_keepalive_interval)
182#define	V_dyn_keepalive_period		VNET(dyn_keepalive_period)
183#define	V_dyn_keepalive			VNET(dyn_keepalive)
184#define	V_dyn_keepalive_last		VNET(dyn_keepalive_last)
185
186static VNET_DEFINE(u_int32_t, dyn_max);		/* max # of dynamic rules */
187
188#define	DYN_COUNT			uma_zone_get_cur(V_ipfw_dyn_rule_zone)
189#define	V_dyn_max			VNET(dyn_max)
190
191/* for userspace, we emulate the uma_zone_counter with ipfw_dyn_count */
192static int ipfw_dyn_count;	/* number of objects */
193
194#ifdef USERSPACE /* emulation of UMA object counters for userspace */
195#define uma_zone_get_cur(x)	ipfw_dyn_count
196#endif /* USERSPACE */
197
198static int last_log;	/* Log ratelimiting */
199
200static void ipfw_dyn_tick(void *vnetx);
201static void check_dyn_rules(struct ip_fw_chain *, ipfw_range_tlv *, int, int);
202#ifdef SYSCTL_NODE
203
204static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS);
205static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS);
206
207SYSBEGIN(f2)
208
209SYSCTL_DECL(_net_inet_ip_fw);
210SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_buckets,
211    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_buckets_max), 0,
212    "Max number of dyn. buckets");
213SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets,
214    CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0,
215    "Current Number of dyn. buckets");
216SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count,
217    CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RD, 0, 0, sysctl_ipfw_dyn_count, "IU",
218    "Number of dyn. rules");
219SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max,
220    CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW, 0, 0, sysctl_ipfw_dyn_max, "IU",
221    "Max number of dyn. rules");
222SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime,
223    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0,
224    "Lifetime of dyn. rules for acks");
225SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime,
226    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0,
227    "Lifetime of dyn. rules for syn");
228SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
229    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0,
230    "Lifetime of dyn. rules for fin");
231SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
232    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0,
233    "Lifetime of dyn. rules for rst");
234SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime,
235    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0,
236    "Lifetime of dyn. rules for UDP");
237SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime,
238    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0,
239    "Lifetime of dyn. rules for other situations");
240SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive,
241    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0,
242    "Enable keepalives for dyn. rules");
243SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keep_states,
244    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keep_states), 0,
245    "Do not flush dynamic states on rule deletion");
246
247SYSEND
248
249#endif /* SYSCTL_NODE */
250
251
252#ifdef INET6
253static __inline int
254hash_packet6(struct ipfw_flow_id *id)
255{
256	u_int32_t i;
257	i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^
258	    (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^
259	    (id->src_ip6.__u6_addr.__u6_addr32[2]) ^
260	    (id->src_ip6.__u6_addr.__u6_addr32[3]);
261	return ntohl(i);
262}
263#endif
264
265/*
266 * IMPORTANT: the hash function for dynamic rules must be commutative
267 * in source and destination (ip,port), because rules are bidirectional
268 * and we want to find both in the same bucket.
269 */
270static __inline int
271hash_packet(struct ipfw_flow_id *id, int buckets)
272{
273	u_int32_t i;
274
275#ifdef INET6
276	if (IS_IP6_FLOW_ID(id))
277		i = hash_packet6(id);
278	else
279#endif /* INET6 */
280	i = (id->dst_ip) ^ (id->src_ip);
281	i ^= (id->dst_port) ^ (id->src_port);
282	return (i & (buckets - 1));
283}
284
285#if 0
286#define	DYN_DEBUG(fmt, ...)	do {			\
287	printf("%s: " fmt "\n", __func__, __VA_ARGS__);	\
288} while (0)
289#else
290#define	DYN_DEBUG(fmt, ...)
291#endif
292
293static char *default_state_name = "default";
294struct dyn_state_obj {
295	struct named_object	no;
296	char			name[64];
297};
298
299#define	DYN_STATE_OBJ(ch, cmd)	\
300    ((struct dyn_state_obj *)SRV_OBJECT(ch, (cmd)->arg1))
301/*
302 * Classifier callback.
303 * Return 0 if opcode contains object that should be referenced
304 * or rewritten.
305 */
306static int
307dyn_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
308{
309
310	DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1);
311	/* Don't rewrite "check-state any" */
312	if (cmd->arg1 == 0 &&
313	    cmd->opcode == O_CHECK_STATE)
314		return (1);
315
316	*puidx = cmd->arg1;
317	*ptype = 0;
318	return (0);
319}
320
321static void
322dyn_update(ipfw_insn *cmd, uint16_t idx)
323{
324
325	cmd->arg1 = idx;
326	DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1);
327}
328
329static int
330dyn_findbyname(struct ip_fw_chain *ch, struct tid_info *ti,
331    struct named_object **pno)
332{
333	ipfw_obj_ntlv *ntlv;
334	const char *name;
335
336	DYN_DEBUG("uidx %d", ti->uidx);
337	if (ti->uidx != 0) {
338		if (ti->tlvs == NULL)
339			return (EINVAL);
340		/* Search ntlv in the buffer provided by user */
341		ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx,
342		    IPFW_TLV_STATE_NAME);
343		if (ntlv == NULL)
344			return (EINVAL);
345		name = ntlv->name;
346	} else
347		name = default_state_name;
348	/*
349	 * Search named object with corresponding name.
350	 * Since states objects are global - ignore the set value
351	 * and use zero instead.
352	 */
353	*pno = ipfw_objhash_lookup_name_type(CHAIN_TO_SRV(ch), 0,
354	    IPFW_TLV_STATE_NAME, name);
355	/*
356	 * We always return success here.
357	 * The caller will check *pno and mark object as unresolved,
358	 * then it will automatically create "default" object.
359	 */
360	return (0);
361}
362
363static struct named_object *
364dyn_findbykidx(struct ip_fw_chain *ch, uint16_t idx)
365{
366
367	DYN_DEBUG("kidx %d", idx);
368	return (ipfw_objhash_lookup_kidx(CHAIN_TO_SRV(ch), idx));
369}
370
371static int
372dyn_create(struct ip_fw_chain *ch, struct tid_info *ti,
373    uint16_t *pkidx)
374{
375	struct namedobj_instance *ni;
376	struct dyn_state_obj *obj;
377	struct named_object *no;
378	ipfw_obj_ntlv *ntlv;
379	char *name;
380
381	DYN_DEBUG("uidx %d", ti->uidx);
382	if (ti->uidx != 0) {
383		if (ti->tlvs == NULL)
384			return (EINVAL);
385		ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx,
386		    IPFW_TLV_STATE_NAME);
387		if (ntlv == NULL)
388			return (EINVAL);
389		name = ntlv->name;
390	} else
391		name = default_state_name;
392
393	ni = CHAIN_TO_SRV(ch);
394	obj = malloc(sizeof(*obj), M_IPFW, M_WAITOK | M_ZERO);
395	obj->no.name = obj->name;
396	obj->no.etlv = IPFW_TLV_STATE_NAME;
397	strlcpy(obj->name, name, sizeof(obj->name));
398
399	IPFW_UH_WLOCK(ch);
400	no = ipfw_objhash_lookup_name_type(ni, 0,
401	    IPFW_TLV_STATE_NAME, name);
402	if (no != NULL) {
403		/*
404		 * Object is already created.
405		 * Just return its kidx and bump refcount.
406		 */
407		*pkidx = no->kidx;
408		no->refcnt++;
409		IPFW_UH_WUNLOCK(ch);
410		free(obj, M_IPFW);
411		DYN_DEBUG("\tfound kidx %d", *pkidx);
412		return (0);
413	}
414	if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) {
415		DYN_DEBUG("\talloc_idx failed for %s", name);
416		IPFW_UH_WUNLOCK(ch);
417		free(obj, M_IPFW);
418		return (ENOSPC);
419	}
420	ipfw_objhash_add(ni, &obj->no);
421	IPFW_WLOCK(ch);
422	SRV_OBJECT(ch, obj->no.kidx) = obj;
423	IPFW_WUNLOCK(ch);
424	obj->no.refcnt++;
425	*pkidx = obj->no.kidx;
426	IPFW_UH_WUNLOCK(ch);
427	DYN_DEBUG("\tcreated kidx %d", *pkidx);
428	return (0);
429}
430
431static void
432dyn_destroy(struct ip_fw_chain *ch, struct named_object *no)
433{
434	struct dyn_state_obj *obj;
435
436	IPFW_UH_WLOCK_ASSERT(ch);
437
438	KASSERT(no->refcnt == 1,
439	    ("Destroying object '%s' (type %u, idx %u) with refcnt %u",
440	    no->name, no->etlv, no->kidx, no->refcnt));
441
442	DYN_DEBUG("kidx %d", no->kidx);
443	IPFW_WLOCK(ch);
444	obj = SRV_OBJECT(ch, no->kidx);
445	SRV_OBJECT(ch, no->kidx) = NULL;
446	IPFW_WUNLOCK(ch);
447	ipfw_objhash_del(CHAIN_TO_SRV(ch), no);
448	ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), no->kidx);
449
450	free(obj, M_IPFW);
451}
452
453static struct opcode_obj_rewrite dyn_opcodes[] = {
454	{
455		O_KEEP_STATE, IPFW_TLV_STATE_NAME,
456		dyn_classify, dyn_update,
457		dyn_findbyname, dyn_findbykidx,
458		dyn_create, dyn_destroy
459	},
460	{
461		O_CHECK_STATE, IPFW_TLV_STATE_NAME,
462		dyn_classify, dyn_update,
463		dyn_findbyname, dyn_findbykidx,
464		dyn_create, dyn_destroy
465	},
466	{
467		O_PROBE_STATE, IPFW_TLV_STATE_NAME,
468		dyn_classify, dyn_update,
469		dyn_findbyname, dyn_findbykidx,
470		dyn_create, dyn_destroy
471	},
472	{
473		O_LIMIT, IPFW_TLV_STATE_NAME,
474		dyn_classify, dyn_update,
475		dyn_findbyname, dyn_findbykidx,
476		dyn_create, dyn_destroy
477	},
478};
479/**
480 * Print customizable flow id description via log(9) facility.
481 */
482static void
483print_dyn_rule_flags(struct ipfw_flow_id *id, int dyn_type, int log_flags,
484    char *prefix, char *postfix)
485{
486	struct in_addr da;
487#ifdef INET6
488	char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN];
489#else
490	char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN];
491#endif
492
493#ifdef INET6
494	if (IS_IP6_FLOW_ID(id)) {
495		ip6_sprintf(src, &id->src_ip6);
496		ip6_sprintf(dst, &id->dst_ip6);
497	} else
498#endif
499	{
500		da.s_addr = htonl(id->src_ip);
501		inet_ntop(AF_INET, &da, src, sizeof(src));
502		da.s_addr = htonl(id->dst_ip);
503		inet_ntop(AF_INET, &da, dst, sizeof(dst));
504	}
505	log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n",
506	    prefix, dyn_type, src, id->src_port, dst,
507	    id->dst_port, DYN_COUNT, postfix);
508}
509
510#define	print_dyn_rule(id, dtype, prefix, postfix)	\
511	print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix)
512
513#define TIME_LEQ(a,b)       ((int)((a)-(b)) <= 0)
514#define TIME_LE(a,b)       ((int)((a)-(b)) < 0)
515
516static void
517dyn_update_proto_state(ipfw_dyn_rule *q, const struct ipfw_flow_id *id,
518    const struct tcphdr *tcp, int dir)
519{
520	uint32_t ack;
521	u_char flags;
522
523	if (id->proto == IPPROTO_TCP) {
524		flags = id->_flags & (TH_FIN | TH_SYN | TH_RST);
525#define BOTH_SYN	(TH_SYN | (TH_SYN << 8))
526#define BOTH_FIN	(TH_FIN | (TH_FIN << 8))
527#define	TCP_FLAGS	(TH_FLAGS | (TH_FLAGS << 8))
528#define	ACK_FWD		0x10000			/* fwd ack seen */
529#define	ACK_REV		0x20000			/* rev ack seen */
530
531		q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8);
532		switch (q->state & TCP_FLAGS) {
533		case TH_SYN:			/* opening */
534			q->expire = time_uptime + V_dyn_syn_lifetime;
535			break;
536
537		case BOTH_SYN:			/* move to established */
538		case BOTH_SYN | TH_FIN:		/* one side tries to close */
539		case BOTH_SYN | (TH_FIN << 8):
540#define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0)
541			if (tcp == NULL)
542				break;
543
544			ack = ntohl(tcp->th_ack);
545			if (dir == MATCH_FORWARD) {
546				if (q->ack_fwd == 0 ||
547				    _SEQ_GE(ack, q->ack_fwd)) {
548					q->ack_fwd = ack;
549					q->state |= ACK_FWD;
550				}
551			} else {
552				if (q->ack_rev == 0 ||
553				    _SEQ_GE(ack, q->ack_rev)) {
554					q->ack_rev = ack;
555					q->state |= ACK_REV;
556				}
557			}
558			if ((q->state & (ACK_FWD | ACK_REV)) ==
559			    (ACK_FWD | ACK_REV)) {
560				q->expire = time_uptime + V_dyn_ack_lifetime;
561				q->state &= ~(ACK_FWD | ACK_REV);
562			}
563			break;
564
565		case BOTH_SYN | BOTH_FIN:	/* both sides closed */
566			if (V_dyn_fin_lifetime >= V_dyn_keepalive_period)
567				V_dyn_fin_lifetime =
568				    V_dyn_keepalive_period - 1;
569			q->expire = time_uptime + V_dyn_fin_lifetime;
570			break;
571
572		default:
573#if 0
574			/*
575			 * reset or some invalid combination, but can also
576			 * occur if we use keep-state the wrong way.
577			 */
578			if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0)
579				printf("invalid state: 0x%x\n", q->state);
580#endif
581			if (V_dyn_rst_lifetime >= V_dyn_keepalive_period)
582				V_dyn_rst_lifetime =
583				    V_dyn_keepalive_period - 1;
584			q->expire = time_uptime + V_dyn_rst_lifetime;
585			break;
586		}
587	} else if (id->proto == IPPROTO_UDP) {
588		q->expire = time_uptime + V_dyn_udp_lifetime;
589	} else {
590		/* other protocols */
591		q->expire = time_uptime + V_dyn_short_lifetime;
592	}
593}
594
595/*
596 * Lookup a dynamic rule, locked version.
597 */
598static ipfw_dyn_rule *
599lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int i, int *match_direction,
600    struct tcphdr *tcp, uint16_t kidx)
601{
602	/*
603	 * Stateful ipfw extensions.
604	 * Lookup into dynamic session queue.
605	 */
606	ipfw_dyn_rule *prev, *q = NULL;
607	int dir;
608
609	IPFW_BUCK_ASSERT(i);
610
611	dir = MATCH_NONE;
612	for (prev = NULL, q = V_ipfw_dyn_v[i].head; q; prev = q, q = q->next) {
613		if (q->dyn_type == O_LIMIT_PARENT)
614			continue;
615
616		if (pkt->proto != q->id.proto)
617			continue;
618
619		if (kidx != 0 && kidx != q->kidx)
620			continue;
621
622		if (IS_IP6_FLOW_ID(pkt)) {
623			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.src_ip6) &&
624			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.dst_ip6) &&
625			    pkt->src_port == q->id.src_port &&
626			    pkt->dst_port == q->id.dst_port) {
627				dir = MATCH_FORWARD;
628				break;
629			}
630			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.dst_ip6) &&
631			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.src_ip6) &&
632			    pkt->src_port == q->id.dst_port &&
633			    pkt->dst_port == q->id.src_port) {
634				dir = MATCH_REVERSE;
635				break;
636			}
637		} else {
638			if (pkt->src_ip == q->id.src_ip &&
639			    pkt->dst_ip == q->id.dst_ip &&
640			    pkt->src_port == q->id.src_port &&
641			    pkt->dst_port == q->id.dst_port) {
642				dir = MATCH_FORWARD;
643				break;
644			}
645			if (pkt->src_ip == q->id.dst_ip &&
646			    pkt->dst_ip == q->id.src_ip &&
647			    pkt->src_port == q->id.dst_port &&
648			    pkt->dst_port == q->id.src_port) {
649				dir = MATCH_REVERSE;
650				break;
651			}
652		}
653	}
654	if (q == NULL)
655		goto done;	/* q = NULL, not found */
656
657	if (prev != NULL) {	/* found and not in front */
658		prev->next = q->next;
659		q->next = V_ipfw_dyn_v[i].head;
660		V_ipfw_dyn_v[i].head = q;
661	}
662
663	/* update state according to flags */
664	dyn_update_proto_state(q, pkt, tcp, dir);
665done:
666	if (match_direction != NULL)
667		*match_direction = dir;
668	return (q);
669}
670
671ipfw_dyn_rule *
672ipfw_lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
673    struct tcphdr *tcp, uint16_t kidx)
674{
675	ipfw_dyn_rule *q;
676	int i;
677
678	i = hash_packet(pkt, V_curr_dyn_buckets);
679
680	IPFW_BUCK_LOCK(i);
681	q = lookup_dyn_rule_locked(pkt, i, match_direction, tcp, kidx);
682	if (q == NULL)
683		IPFW_BUCK_UNLOCK(i);
684	/* NB: return table locked when q is not NULL */
685	return q;
686}
687
688/*
689 * Unlock bucket mtx
690 * @p - pointer to dynamic rule
691 */
692void
693ipfw_dyn_unlock(ipfw_dyn_rule *q)
694{
695
696	IPFW_BUCK_UNLOCK(q->bucket);
697}
698
699static int
700resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets)
701{
702	int i, k, nbuckets_old;
703	ipfw_dyn_rule *q;
704	struct ipfw_dyn_bucket *dyn_v, *dyn_v_old;
705
706	/* Check if given number is power of 2 and less than 64k */
707	if ((nbuckets > 65536) || (!powerof2(nbuckets)))
708		return 1;
709
710	CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__,
711	    V_curr_dyn_buckets, nbuckets);
712
713	/* Allocate and initialize new hash */
714	dyn_v = malloc(nbuckets * sizeof(*dyn_v), M_IPFW,
715	    M_WAITOK | M_ZERO);
716
717	for (i = 0 ; i < nbuckets; i++)
718		IPFW_BUCK_LOCK_INIT(&dyn_v[i]);
719
720	/*
721	 * Call upper half lock, as get_map() do to ease
722	 * read-only access to dynamic rules hash from sysctl
723	 */
724	IPFW_UH_WLOCK(chain);
725
726	/*
727	 * Acquire chain write lock to permit hash access
728	 * for main traffic path without additional locks
729	 */
730	IPFW_WLOCK(chain);
731
732	/* Save old values */
733	nbuckets_old = V_curr_dyn_buckets;
734	dyn_v_old = V_ipfw_dyn_v;
735
736	/* Skip relinking if array is not set up */
737	if (V_ipfw_dyn_v == NULL)
738		V_curr_dyn_buckets = 0;
739
740	/* Re-link all dynamic states */
741	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
742		while (V_ipfw_dyn_v[i].head != NULL) {
743			/* Remove from current chain */
744			q = V_ipfw_dyn_v[i].head;
745			V_ipfw_dyn_v[i].head = q->next;
746
747			/* Get new hash value */
748			k = hash_packet(&q->id, nbuckets);
749			q->bucket = k;
750			/* Add to the new head */
751			q->next = dyn_v[k].head;
752			dyn_v[k].head = q;
753             }
754	}
755
756	/* Update current pointers/buckets values */
757	V_curr_dyn_buckets = nbuckets;
758	V_ipfw_dyn_v = dyn_v;
759
760	IPFW_WUNLOCK(chain);
761
762	IPFW_UH_WUNLOCK(chain);
763
764	/* Start periodic callout on initial creation */
765	if (dyn_v_old == NULL) {
766        	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0);
767		return (0);
768	}
769
770	/* Destroy all mutexes */
771	for (i = 0 ; i < nbuckets_old ; i++)
772		IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]);
773
774	/* Free old hash */
775	free(dyn_v_old, M_IPFW);
776
777	return 0;
778}
779
780/**
781 * Install state of type 'type' for a dynamic session.
782 * The hash table contains two type of rules:
783 * - regular rules (O_KEEP_STATE)
784 * - rules for sessions with limited number of sess per user
785 *   (O_LIMIT). When they are created, the parent is
786 *   increased by 1, and decreased on delete. In this case,
787 *   the third parameter is the parent rule and not the chain.
788 * - "parent" rules for the above (O_LIMIT_PARENT).
789 */
790static ipfw_dyn_rule *
791add_dyn_rule(struct ipfw_flow_id *id, int i, uint8_t dyn_type,
792    struct ip_fw *rule, uint16_t kidx)
793{
794	ipfw_dyn_rule *r;
795
796	IPFW_BUCK_ASSERT(i);
797
798	r = uma_zalloc(V_ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO);
799	if (r == NULL) {
800		if (last_log != time_uptime) {
801			last_log = time_uptime;
802			log(LOG_DEBUG,
803			    "ipfw: Cannot allocate dynamic state, "
804			    "consider increasing net.inet.ip.fw.dyn_max\n");
805		}
806		return NULL;
807	}
808	ipfw_dyn_count++;
809
810	/*
811	 * refcount on parent is already incremented, so
812	 * it is safe to use parent unlocked.
813	 */
814	if (dyn_type == O_LIMIT) {
815		ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
816		if ( parent->dyn_type != O_LIMIT_PARENT)
817			panic("invalid parent");
818		r->parent = parent;
819		rule = parent->rule;
820	}
821
822	r->id = *id;
823	r->expire = time_uptime + V_dyn_syn_lifetime;
824	r->rule = rule;
825	r->dyn_type = dyn_type;
826	IPFW_ZERO_DYN_COUNTER(r);
827	r->count = 0;
828	r->kidx = kidx;
829	r->bucket = i;
830	r->next = V_ipfw_dyn_v[i].head;
831	V_ipfw_dyn_v[i].head = r;
832	DEB(print_dyn_rule(id, dyn_type, "add dyn entry", "total");)
833	return r;
834}
835
836/**
837 * lookup dynamic parent rule using pkt and rule as search keys.
838 * If the lookup fails, then install one.
839 */
840static ipfw_dyn_rule *
841lookup_dyn_parent(struct ipfw_flow_id *pkt, int *pindex, struct ip_fw *rule,
842    uint16_t kidx)
843{
844	ipfw_dyn_rule *q;
845	int i, is_v6;
846
847	is_v6 = IS_IP6_FLOW_ID(pkt);
848	i = hash_packet( pkt, V_curr_dyn_buckets );
849	*pindex = i;
850	IPFW_BUCK_LOCK(i);
851	for (q = V_ipfw_dyn_v[i].head ; q != NULL ; q=q->next)
852		if (q->dyn_type == O_LIMIT_PARENT &&
853		    kidx == q->kidx &&
854		    rule == q->rule &&
855		    pkt->proto == q->id.proto &&
856		    pkt->src_port == q->id.src_port &&
857		    pkt->dst_port == q->id.dst_port &&
858		    (
859			(is_v6 &&
860			 IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6),
861				&(q->id.src_ip6)) &&
862			 IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6),
863				&(q->id.dst_ip6))) ||
864			(!is_v6 &&
865			 pkt->src_ip == q->id.src_ip &&
866			 pkt->dst_ip == q->id.dst_ip)
867		    )
868		) {
869			q->expire = time_uptime + V_dyn_short_lifetime;
870			DEB(print_dyn_rule(pkt, q->dyn_type,
871			    "lookup_dyn_parent found", "");)
872			return q;
873		}
874
875	/* Add virtual limiting rule */
876	return add_dyn_rule(pkt, i, O_LIMIT_PARENT, rule, kidx);
877}
878
879/**
880 * Install dynamic state for rule type cmd->o.opcode
881 *
882 * Returns 1 (failure) if state is not installed because of errors or because
883 * session limitations are enforced.
884 */
885int
886ipfw_install_state(struct ip_fw_chain *chain, struct ip_fw *rule,
887    ipfw_insn_limit *cmd, struct ip_fw_args *args, uint32_t tablearg)
888{
889	ipfw_dyn_rule *q;
890	int i;
891
892	DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state",
893	    (cmd->o.arg1 == 0 ? "": DYN_STATE_OBJ(chain, &cmd->o)->name));)
894
895	i = hash_packet(&args->f_id, V_curr_dyn_buckets);
896
897	IPFW_BUCK_LOCK(i);
898
899	q = lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL, cmd->o.arg1);
900	if (q != NULL) {	/* should never occur */
901		DEB(
902		if (last_log != time_uptime) {
903			last_log = time_uptime;
904			printf("ipfw: %s: entry already present, done\n",
905			    __func__);
906		})
907		IPFW_BUCK_UNLOCK(i);
908		return (0);
909	}
910
911	/*
912	 * State limiting is done via uma(9) zone limiting.
913	 * Save pointer to newly-installed rule and reject
914	 * packet if add_dyn_rule() returned NULL.
915	 * Note q is currently set to NULL.
916	 */
917
918	switch (cmd->o.opcode) {
919	case O_KEEP_STATE:	/* bidir rule */
920		q = add_dyn_rule(&args->f_id, i, O_KEEP_STATE, rule,
921		    cmd->o.arg1);
922		break;
923
924	case O_LIMIT: {		/* limit number of sessions */
925		struct ipfw_flow_id id;
926		ipfw_dyn_rule *parent;
927		uint32_t conn_limit;
928		uint16_t limit_mask = cmd->limit_mask;
929		int pindex;
930
931		conn_limit = IP_FW_ARG_TABLEARG(chain, cmd->conn_limit, limit);
932
933		DEB(
934		if (cmd->conn_limit == IP_FW_TARG)
935			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u "
936			    "(tablearg)\n", __func__, conn_limit);
937		else
938			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n",
939			    __func__, conn_limit);
940		)
941
942		id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0;
943		id.proto = args->f_id.proto;
944		id.addr_type = args->f_id.addr_type;
945		id.fib = M_GETFIB(args->m);
946
947		if (IS_IP6_FLOW_ID (&(args->f_id))) {
948			bzero(&id.src_ip6, sizeof(id.src_ip6));
949			bzero(&id.dst_ip6, sizeof(id.dst_ip6));
950
951			if (limit_mask & DYN_SRC_ADDR)
952				id.src_ip6 = args->f_id.src_ip6;
953			if (limit_mask & DYN_DST_ADDR)
954				id.dst_ip6 = args->f_id.dst_ip6;
955		} else {
956			if (limit_mask & DYN_SRC_ADDR)
957				id.src_ip = args->f_id.src_ip;
958			if (limit_mask & DYN_DST_ADDR)
959				id.dst_ip = args->f_id.dst_ip;
960		}
961		if (limit_mask & DYN_SRC_PORT)
962			id.src_port = args->f_id.src_port;
963		if (limit_mask & DYN_DST_PORT)
964			id.dst_port = args->f_id.dst_port;
965
966		/*
967		 * We have to release lock for previous bucket to
968		 * avoid possible deadlock
969		 */
970		IPFW_BUCK_UNLOCK(i);
971
972		parent = lookup_dyn_parent(&id, &pindex, rule, cmd->o.arg1);
973		if (parent == NULL) {
974			printf("ipfw: %s: add parent failed\n", __func__);
975			IPFW_BUCK_UNLOCK(pindex);
976			return (1);
977		}
978
979		if (parent->count >= conn_limit) {
980			if (V_fw_verbose && last_log != time_uptime) {
981				last_log = time_uptime;
982				char sbuf[24];
983				last_log = time_uptime;
984				snprintf(sbuf, sizeof(sbuf),
985				    "%d drop session",
986				    parent->rule->rulenum);
987				print_dyn_rule_flags(&args->f_id,
988				    cmd->o.opcode,
989				    LOG_SECURITY | LOG_DEBUG,
990				    sbuf, "too many entries");
991			}
992			IPFW_BUCK_UNLOCK(pindex);
993			return (1);
994		}
995		/* Increment counter on parent */
996		parent->count++;
997		IPFW_BUCK_UNLOCK(pindex);
998
999		IPFW_BUCK_LOCK(i);
1000		q = add_dyn_rule(&args->f_id, i, O_LIMIT,
1001		    (struct ip_fw *)parent, cmd->o.arg1);
1002		if (q == NULL) {
1003			/* Decrement index and notify caller */
1004			IPFW_BUCK_UNLOCK(i);
1005			IPFW_BUCK_LOCK(pindex);
1006			parent->count--;
1007			IPFW_BUCK_UNLOCK(pindex);
1008			return (1);
1009		}
1010		break;
1011	}
1012	default:
1013		printf("ipfw: %s: unknown dynamic rule type %u\n",
1014		    __func__, cmd->o.opcode);
1015	}
1016
1017	if (q == NULL) {
1018		IPFW_BUCK_UNLOCK(i);
1019		return (1);	/* Notify caller about failure */
1020	}
1021
1022	dyn_update_proto_state(q, &args->f_id, NULL, MATCH_FORWARD);
1023	IPFW_BUCK_UNLOCK(i);
1024	return (0);
1025}
1026
1027/*
1028 * Generate a TCP packet, containing either a RST or a keepalive.
1029 * When flags & TH_RST, we are sending a RST packet, because of a
1030 * "reset" action matched the packet.
1031 * Otherwise we are sending a keepalive, and flags & TH_
1032 * The 'replyto' mbuf is the mbuf being replied to, if any, and is required
1033 * so that MAC can label the reply appropriately.
1034 */
1035struct mbuf *
1036ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
1037    u_int32_t ack, int flags)
1038{
1039	struct mbuf *m = NULL;		/* stupid compiler */
1040	int len, dir;
1041	struct ip *h = NULL;		/* stupid compiler */
1042#ifdef INET6
1043	struct ip6_hdr *h6 = NULL;
1044#endif
1045	struct tcphdr *th = NULL;
1046
1047	MGETHDR(m, M_NOWAIT, MT_DATA);
1048	if (m == NULL)
1049		return (NULL);
1050
1051	M_SETFIB(m, id->fib);
1052#ifdef MAC
1053	if (replyto != NULL)
1054		mac_netinet_firewall_reply(replyto, m);
1055	else
1056		mac_netinet_firewall_send(m);
1057#else
1058	(void)replyto;		/* don't warn about unused arg */
1059#endif
1060
1061	switch (id->addr_type) {
1062	case 4:
1063		len = sizeof(struct ip) + sizeof(struct tcphdr);
1064		break;
1065#ifdef INET6
1066	case 6:
1067		len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1068		break;
1069#endif
1070	default:
1071		/* XXX: log me?!? */
1072		FREE_PKT(m);
1073		return (NULL);
1074	}
1075	dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN);
1076
1077	m->m_data += max_linkhdr;
1078	m->m_flags |= M_SKIP_FIREWALL;
1079	m->m_pkthdr.len = m->m_len = len;
1080	m->m_pkthdr.rcvif = NULL;
1081	bzero(m->m_data, len);
1082
1083	switch (id->addr_type) {
1084	case 4:
1085		h = mtod(m, struct ip *);
1086
1087		/* prepare for checksum */
1088		h->ip_p = IPPROTO_TCP;
1089		h->ip_len = htons(sizeof(struct tcphdr));
1090		if (dir) {
1091			h->ip_src.s_addr = htonl(id->src_ip);
1092			h->ip_dst.s_addr = htonl(id->dst_ip);
1093		} else {
1094			h->ip_src.s_addr = htonl(id->dst_ip);
1095			h->ip_dst.s_addr = htonl(id->src_ip);
1096		}
1097
1098		th = (struct tcphdr *)(h + 1);
1099		break;
1100#ifdef INET6
1101	case 6:
1102		h6 = mtod(m, struct ip6_hdr *);
1103
1104		/* prepare for checksum */
1105		h6->ip6_nxt = IPPROTO_TCP;
1106		h6->ip6_plen = htons(sizeof(struct tcphdr));
1107		if (dir) {
1108			h6->ip6_src = id->src_ip6;
1109			h6->ip6_dst = id->dst_ip6;
1110		} else {
1111			h6->ip6_src = id->dst_ip6;
1112			h6->ip6_dst = id->src_ip6;
1113		}
1114
1115		th = (struct tcphdr *)(h6 + 1);
1116		break;
1117#endif
1118	}
1119
1120	if (dir) {
1121		th->th_sport = htons(id->src_port);
1122		th->th_dport = htons(id->dst_port);
1123	} else {
1124		th->th_sport = htons(id->dst_port);
1125		th->th_dport = htons(id->src_port);
1126	}
1127	th->th_off = sizeof(struct tcphdr) >> 2;
1128
1129	if (flags & TH_RST) {
1130		if (flags & TH_ACK) {
1131			th->th_seq = htonl(ack);
1132			th->th_flags = TH_RST;
1133		} else {
1134			if (flags & TH_SYN)
1135				seq++;
1136			th->th_ack = htonl(seq);
1137			th->th_flags = TH_RST | TH_ACK;
1138		}
1139	} else {
1140		/*
1141		 * Keepalive - use caller provided sequence numbers
1142		 */
1143		th->th_seq = htonl(seq);
1144		th->th_ack = htonl(ack);
1145		th->th_flags = TH_ACK;
1146	}
1147
1148	switch (id->addr_type) {
1149	case 4:
1150		th->th_sum = in_cksum(m, len);
1151
1152		/* finish the ip header */
1153		h->ip_v = 4;
1154		h->ip_hl = sizeof(*h) >> 2;
1155		h->ip_tos = IPTOS_LOWDELAY;
1156		h->ip_off = htons(0);
1157		h->ip_len = htons(len);
1158		h->ip_ttl = V_ip_defttl;
1159		h->ip_sum = 0;
1160		break;
1161#ifdef INET6
1162	case 6:
1163		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6),
1164		    sizeof(struct tcphdr));
1165
1166		/* finish the ip6 header */
1167		h6->ip6_vfc |= IPV6_VERSION;
1168		h6->ip6_hlim = IPV6_DEFHLIM;
1169		break;
1170#endif
1171	}
1172
1173	return (m);
1174}
1175
1176/*
1177 * Queue keepalive packets for given dynamic rule
1178 */
1179static struct mbuf **
1180ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q)
1181{
1182	struct mbuf *m_rev, *m_fwd;
1183
1184	m_rev = (q->state & ACK_REV) ? NULL :
1185	    ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN);
1186	m_fwd = (q->state & ACK_FWD) ? NULL :
1187	    ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0);
1188
1189	if (m_rev != NULL) {
1190		*mtailp = m_rev;
1191		mtailp = &(*mtailp)->m_nextpkt;
1192	}
1193	if (m_fwd != NULL) {
1194		*mtailp = m_fwd;
1195		mtailp = &(*mtailp)->m_nextpkt;
1196	}
1197
1198	return (mtailp);
1199}
1200
1201/*
1202 * This procedure is used to perform various maintenance
1203 * on dynamic hash list. Currently it is called every second.
1204 */
1205static void
1206ipfw_dyn_tick(void * vnetx)
1207{
1208	struct ip_fw_chain *chain;
1209	int check_ka = 0;
1210#ifdef VIMAGE
1211	struct vnet *vp = vnetx;
1212#endif
1213
1214	CURVNET_SET(vp);
1215
1216	chain = &V_layer3_chain;
1217
1218	/* Run keepalive checks every keepalive_period iff ka is enabled */
1219	if ((V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) &&
1220	    (V_dyn_keepalive != 0)) {
1221		V_dyn_keepalive_last = time_uptime;
1222		check_ka = 1;
1223	}
1224
1225	check_dyn_rules(chain, NULL, check_ka, 1);
1226
1227	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, vnetx, 0);
1228
1229	CURVNET_RESTORE();
1230}
1231
1232
1233/*
1234 * Walk through all dynamic states doing generic maintenance:
1235 * 1) free expired states
1236 * 2) free all states based on deleted rule / set
1237 * 3) send keepalives for states if needed
1238 *
1239 * @chain - pointer to current ipfw rules chain
1240 * @rule - delete all states originated by given rule if != NULL
1241 * @set - delete all states originated by any rule in set @set if != RESVD_SET
1242 * @check_ka - perform checking/sending keepalives
1243 * @timer - indicate call from timer routine.
1244 *
1245 * Timer routine must call this function unlocked to permit
1246 * sending keepalives/resizing table.
1247 *
1248 * Others has to call function with IPFW_UH_WLOCK held.
1249 * Additionally, function assume that dynamic rule/set is
1250 * ALREADY deleted so no new states can be generated by
1251 * 'deleted' rules.
1252 *
1253 * Write lock is needed to ensure that unused parent rules
1254 * are not freed by other instance (see stage 2, 3)
1255 */
1256static void
1257check_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt,
1258    int check_ka, int timer)
1259{
1260	struct mbuf *m0, *m, *mnext, **mtailp;
1261	struct ip *h;
1262	int i, dyn_count, new_buckets = 0, max_buckets;
1263	int expired = 0, expired_limits = 0, parents = 0, total = 0;
1264	ipfw_dyn_rule *q, *q_prev, *q_next;
1265	ipfw_dyn_rule *exp_head, **exptailp;
1266	ipfw_dyn_rule *exp_lhead, **expltailp;
1267
1268	KASSERT(V_ipfw_dyn_v != NULL, ("%s: dynamic table not allocated",
1269	    __func__));
1270
1271	/* Avoid possible LOR */
1272	KASSERT(!check_ka || timer, ("%s: keepalive check with lock held",
1273	    __func__));
1274
1275	/*
1276	 * Do not perform any checks if we currently have no dynamic states
1277	 */
1278	if (DYN_COUNT == 0)
1279		return;
1280
1281	/* Expired states */
1282	exp_head = NULL;
1283	exptailp = &exp_head;
1284
1285	/* Expired limit states */
1286	exp_lhead = NULL;
1287	expltailp = &exp_lhead;
1288
1289	/*
1290	 * We make a chain of packets to go out here -- not deferring
1291	 * until after we drop the IPFW dynamic rule lock would result
1292	 * in a lock order reversal with the normal packet input -> ipfw
1293	 * call stack.
1294	 */
1295	m0 = NULL;
1296	mtailp = &m0;
1297
1298	/* Protect from hash resizing */
1299	if (timer != 0)
1300		IPFW_UH_WLOCK(chain);
1301	else
1302		IPFW_UH_WLOCK_ASSERT(chain);
1303
1304#define	NEXT_RULE()	{ q_prev = q; q = q->next ; continue; }
1305
1306	/* Stage 1: perform requested deletion */
1307	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1308		IPFW_BUCK_LOCK(i);
1309		for (q = V_ipfw_dyn_v[i].head, q_prev = q; q ; ) {
1310			/* account every rule */
1311			total++;
1312
1313			/* Skip parent rules at all */
1314			if (q->dyn_type == O_LIMIT_PARENT) {
1315				parents++;
1316				NEXT_RULE();
1317			}
1318
1319			/*
1320			 * Remove rules which are:
1321			 * 1) expired
1322			 * 2) matches deletion range
1323			 */
1324			if ((TIME_LEQ(q->expire, time_uptime)) ||
1325			    (rt != NULL && ipfw_match_range(q->rule, rt))) {
1326				if (TIME_LE(time_uptime, q->expire) &&
1327				    q->dyn_type == O_KEEP_STATE &&
1328				    V_dyn_keep_states != 0) {
1329					/*
1330					 * Do not delete state if
1331					 * it is not expired and
1332					 * dyn_keep_states is ON.
1333					 * However we need to re-link it
1334					 * to any other stable rule
1335					 */
1336					q->rule = chain->default_rule;
1337					NEXT_RULE();
1338				}
1339
1340				/* Unlink q from current list */
1341				q_next = q->next;
1342				if (q == V_ipfw_dyn_v[i].head)
1343					V_ipfw_dyn_v[i].head = q_next;
1344				else
1345					q_prev->next = q_next;
1346
1347				q->next = NULL;
1348
1349				/* queue q to expire list */
1350				if (q->dyn_type != O_LIMIT) {
1351					*exptailp = q;
1352					exptailp = &(*exptailp)->next;
1353					DEB(print_dyn_rule(&q->id, q->dyn_type,
1354					    "unlink entry", "left");
1355					)
1356				} else {
1357					/* Separate list for limit rules */
1358					*expltailp = q;
1359					expltailp = &(*expltailp)->next;
1360					expired_limits++;
1361					DEB(print_dyn_rule(&q->id, q->dyn_type,
1362					    "unlink limit entry", "left");
1363					)
1364				}
1365
1366				q = q_next;
1367				expired++;
1368				continue;
1369			}
1370
1371			/*
1372			 * Check if we need to send keepalive:
1373			 * we need to ensure if is time to do KA,
1374			 * this is established TCP session, and
1375			 * expire time is within keepalive interval
1376			 */
1377			if ((check_ka != 0) && (q->id.proto == IPPROTO_TCP) &&
1378			    ((q->state & BOTH_SYN) == BOTH_SYN) &&
1379			    (TIME_LEQ(q->expire, time_uptime +
1380			      V_dyn_keepalive_interval)))
1381				mtailp = ipfw_dyn_send_ka(mtailp, q);
1382
1383			NEXT_RULE();
1384		}
1385		IPFW_BUCK_UNLOCK(i);
1386	}
1387
1388	/* Stage 2: decrement counters from O_LIMIT parents */
1389	if (expired_limits != 0) {
1390		/*
1391		 * XXX: Note that deleting set with more than one
1392		 * heavily-used LIMIT rules can result in overwhelming
1393		 * locking due to lack of per-hash value sorting
1394		 *
1395		 * We should probably think about:
1396		 * 1) pre-allocating hash of size, say,
1397		 * MAX(16, V_curr_dyn_buckets / 1024)
1398		 * 2) checking if expired_limits is large enough
1399		 * 3) If yes, init hash (or its part), re-link
1400		 * current list and start decrementing procedure in
1401		 * each bucket separately
1402		 */
1403
1404		/*
1405		 * Small optimization: do not unlock bucket until
1406		 * we see the next item resides in different bucket
1407		 */
1408		if (exp_lhead != NULL) {
1409			i = exp_lhead->parent->bucket;
1410			IPFW_BUCK_LOCK(i);
1411		}
1412		for (q = exp_lhead; q != NULL; q = q->next) {
1413			if (i != q->parent->bucket) {
1414				IPFW_BUCK_UNLOCK(i);
1415				i = q->parent->bucket;
1416				IPFW_BUCK_LOCK(i);
1417			}
1418
1419			/* Decrease parent refcount */
1420			q->parent->count--;
1421		}
1422		if (exp_lhead != NULL)
1423			IPFW_BUCK_UNLOCK(i);
1424	}
1425
1426	/*
1427	 * We protectet ourselves from unused parent deletion
1428	 * (from the timer function) by holding UH write lock.
1429	 */
1430
1431	/* Stage 3: remove unused parent rules */
1432	if ((parents != 0) && (expired != 0)) {
1433		for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1434			IPFW_BUCK_LOCK(i);
1435			for (q = V_ipfw_dyn_v[i].head, q_prev = q ; q ; ) {
1436				if (q->dyn_type != O_LIMIT_PARENT)
1437					NEXT_RULE();
1438
1439				if (q->count != 0)
1440					NEXT_RULE();
1441
1442				/* Parent rule without consumers */
1443
1444				/* Unlink q from current list */
1445				q_next = q->next;
1446				if (q == V_ipfw_dyn_v[i].head)
1447					V_ipfw_dyn_v[i].head = q_next;
1448				else
1449					q_prev->next = q_next;
1450
1451				q->next = NULL;
1452
1453				/* Add to expired list */
1454				*exptailp = q;
1455				exptailp = &(*exptailp)->next;
1456
1457				DEB(print_dyn_rule(&q->id, q->dyn_type,
1458				    "unlink parent entry", "left");
1459				)
1460
1461				expired++;
1462
1463				q = q_next;
1464			}
1465			IPFW_BUCK_UNLOCK(i);
1466		}
1467	}
1468
1469#undef NEXT_RULE
1470
1471	if (timer != 0) {
1472		/*
1473		 * Check if we need to resize hash:
1474		 * if current number of states exceeds number of buckes in hash,
1475		 * grow hash size to the minimum power of 2 which is bigger than
1476		 * current states count. Limit hash size by 64k.
1477		 */
1478		max_buckets = (V_dyn_buckets_max > 65536) ?
1479		    65536 : V_dyn_buckets_max;
1480
1481		dyn_count = DYN_COUNT;
1482
1483		if ((dyn_count > V_curr_dyn_buckets * 2) &&
1484		    (dyn_count < max_buckets)) {
1485			new_buckets = V_curr_dyn_buckets;
1486			while (new_buckets < dyn_count) {
1487				new_buckets *= 2;
1488
1489				if (new_buckets >= max_buckets)
1490					break;
1491			}
1492		}
1493
1494		IPFW_UH_WUNLOCK(chain);
1495	}
1496
1497	/* Finally delete old states ad limits if any */
1498	for (q = exp_head; q != NULL; q = q_next) {
1499		q_next = q->next;
1500		uma_zfree(V_ipfw_dyn_rule_zone, q);
1501		ipfw_dyn_count--;
1502	}
1503
1504	for (q = exp_lhead; q != NULL; q = q_next) {
1505		q_next = q->next;
1506		uma_zfree(V_ipfw_dyn_rule_zone, q);
1507		ipfw_dyn_count--;
1508	}
1509
1510	/*
1511	 * The rest code MUST be called from timer routine only
1512	 * without holding any locks
1513	 */
1514	if (timer == 0)
1515		return;
1516
1517	/* Send keepalive packets if any */
1518	for (m = m0; m != NULL; m = mnext) {
1519		mnext = m->m_nextpkt;
1520		m->m_nextpkt = NULL;
1521		h = mtod(m, struct ip *);
1522		if (h->ip_v == 4)
1523			ip_output(m, NULL, NULL, 0, NULL, NULL);
1524#ifdef INET6
1525		else
1526			ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1527#endif
1528	}
1529
1530	/* Run table resize without holding any locks */
1531	if (new_buckets != 0)
1532		resize_dynamic_table(chain, new_buckets);
1533}
1534
1535/*
1536 * Deletes all dynamic rules originated by given rule or all rules in
1537 * given set. Specify RESVD_SET to indicate set should not be used.
1538 * @chain - pointer to current ipfw rules chain
1539 * @rr - delete all states originated by rules in matched range.
1540 *
1541 * Function has to be called with IPFW_UH_WLOCK held.
1542 * Additionally, function assume that dynamic rule/set is
1543 * ALREADY deleted so no new states can be generated by
1544 * 'deleted' rules.
1545 */
1546void
1547ipfw_expire_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1548{
1549
1550	check_dyn_rules(chain, rt, 0, 0);
1551}
1552
1553/*
1554 * Check if rule contains at least one dynamic opcode.
1555 *
1556 * Returns 1 if such opcode is found, 0 otherwise.
1557 */
1558int
1559ipfw_is_dyn_rule(struct ip_fw *rule)
1560{
1561	int cmdlen, l;
1562	ipfw_insn *cmd;
1563
1564	l = rule->cmd_len;
1565	cmd = rule->cmd;
1566	cmdlen = 0;
1567	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
1568		cmdlen = F_LEN(cmd);
1569
1570		switch (cmd->opcode) {
1571		case O_LIMIT:
1572		case O_KEEP_STATE:
1573		case O_PROBE_STATE:
1574		case O_CHECK_STATE:
1575			return (1);
1576		}
1577	}
1578
1579	return (0);
1580}
1581
1582void
1583ipfw_dyn_init(struct ip_fw_chain *chain)
1584{
1585
1586        V_ipfw_dyn_v = NULL;
1587        V_dyn_buckets_max = 256; /* must be power of 2 */
1588        V_curr_dyn_buckets = 256; /* must be power of 2 */
1589
1590        V_dyn_ack_lifetime = 300;
1591        V_dyn_syn_lifetime = 20;
1592        V_dyn_fin_lifetime = 1;
1593        V_dyn_rst_lifetime = 1;
1594        V_dyn_udp_lifetime = 10;
1595        V_dyn_short_lifetime = 5;
1596
1597        V_dyn_keepalive_interval = 20;
1598        V_dyn_keepalive_period = 5;
1599        V_dyn_keepalive = 1;    /* do send keepalives */
1600	V_dyn_keepalive_last = time_uptime;
1601
1602        V_dyn_max = 16384; /* max # of dynamic rules */
1603
1604	V_ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule",
1605	    sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL,
1606	    UMA_ALIGN_PTR, 0);
1607
1608	/* Enforce limit on dynamic rules */
1609	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1610
1611        callout_init(&V_ipfw_timeout, 1);
1612
1613	/*
1614	 * This can potentially be done on first dynamic rule
1615	 * being added to chain.
1616	 */
1617	resize_dynamic_table(chain, V_curr_dyn_buckets);
1618	IPFW_ADD_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes);
1619}
1620
1621void
1622ipfw_dyn_uninit(int pass)
1623{
1624	int i;
1625
1626	if (pass == 0) {
1627		callout_drain(&V_ipfw_timeout);
1628		return;
1629	}
1630	IPFW_DEL_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes);
1631
1632	if (V_ipfw_dyn_v != NULL) {
1633		/*
1634		 * Skip deleting all dynamic states -
1635		 * uma_zdestroy() does this more efficiently;
1636		 */
1637
1638		/* Destroy all mutexes */
1639		for (i = 0 ; i < V_curr_dyn_buckets ; i++)
1640			IPFW_BUCK_LOCK_DESTROY(&V_ipfw_dyn_v[i]);
1641		free(V_ipfw_dyn_v, M_IPFW);
1642		V_ipfw_dyn_v = NULL;
1643	}
1644
1645        uma_zdestroy(V_ipfw_dyn_rule_zone);
1646}
1647
1648#ifdef SYSCTL_NODE
1649/*
1650 * Get/set maximum number of dynamic states in given VNET instance.
1651 */
1652static int
1653sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS)
1654{
1655	int error;
1656	unsigned int nstates;
1657
1658	nstates = V_dyn_max;
1659
1660	error = sysctl_handle_int(oidp, &nstates, 0, req);
1661	/* Read operation or some error */
1662	if ((error != 0) || (req->newptr == NULL))
1663		return (error);
1664
1665	V_dyn_max = nstates;
1666	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1667
1668	return (0);
1669}
1670
1671/*
1672 * Get current number of dynamic states in given VNET instance.
1673 */
1674static int
1675sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS)
1676{
1677	int error;
1678	unsigned int nstates;
1679
1680	nstates = DYN_COUNT;
1681
1682	error = sysctl_handle_int(oidp, &nstates, 0, req);
1683
1684	return (error);
1685}
1686#endif
1687
1688/*
1689 * Returns size of dynamic states in legacy format
1690 */
1691int
1692ipfw_dyn_len(void)
1693{
1694
1695	return (V_ipfw_dyn_v == NULL) ? 0 :
1696		(DYN_COUNT * sizeof(ipfw_dyn_rule));
1697}
1698
1699/*
1700 * Returns number of dynamic states.
1701 * Used by dump format v1 (current).
1702 */
1703int
1704ipfw_dyn_get_count(void)
1705{
1706
1707	return (V_ipfw_dyn_v == NULL) ? 0 : DYN_COUNT;
1708}
1709
1710static void
1711export_dyn_rule(ipfw_dyn_rule *src, ipfw_dyn_rule *dst)
1712{
1713	uint16_t rulenum;
1714
1715	rulenum = (uint16_t)src->rule->rulenum;
1716	memcpy(dst, src, sizeof(*src));
1717	memcpy(&dst->rule, &rulenum, sizeof(rulenum));
1718	/*
1719	 * store set number into high word of
1720	 * dst->rule pointer.
1721	 */
1722	memcpy((char *)&dst->rule + sizeof(rulenum), &src->rule->set,
1723	    sizeof(src->rule->set));
1724	/*
1725	 * store a non-null value in "next".
1726	 * The userland code will interpret a
1727	 * NULL here as a marker
1728	 * for the last dynamic rule.
1729	 */
1730	memcpy(&dst->next, &dst, sizeof(dst));
1731	dst->expire = TIME_LEQ(dst->expire, time_uptime) ?  0:
1732	    dst->expire - time_uptime;
1733}
1734
1735/*
1736 * Fills int buffer given by @sd with dynamic states.
1737 * Used by dump format v1 (current).
1738 *
1739 * Returns 0 on success.
1740 */
1741int
1742ipfw_dump_states(struct ip_fw_chain *chain, struct sockopt_data *sd)
1743{
1744	ipfw_dyn_rule *p;
1745	ipfw_obj_dyntlv *dst, *last;
1746	ipfw_obj_ctlv *ctlv;
1747	int i;
1748	size_t sz;
1749
1750	if (V_ipfw_dyn_v == NULL)
1751		return (0);
1752
1753	IPFW_UH_RLOCK_ASSERT(chain);
1754
1755	ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1756	if (ctlv == NULL)
1757		return (ENOMEM);
1758	sz = sizeof(ipfw_obj_dyntlv);
1759	ctlv->head.type = IPFW_TLV_DYNSTATE_LIST;
1760	ctlv->objsize = sz;
1761	last = NULL;
1762
1763	for (i = 0 ; i < V_curr_dyn_buckets; i++) {
1764		IPFW_BUCK_LOCK(i);
1765		for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) {
1766			dst = (ipfw_obj_dyntlv *)ipfw_get_sopt_space(sd, sz);
1767			if (dst == NULL) {
1768				IPFW_BUCK_UNLOCK(i);
1769				return (ENOMEM);
1770			}
1771
1772			export_dyn_rule(p, &dst->state);
1773			dst->head.length = sz;
1774			dst->head.type = IPFW_TLV_DYN_ENT;
1775			last = dst;
1776		}
1777		IPFW_BUCK_UNLOCK(i);
1778	}
1779
1780	if (last != NULL) /* mark last dynamic rule */
1781		last->head.flags = IPFW_DF_LAST;
1782
1783	return (0);
1784}
1785
1786/*
1787 * Fill given buffer with dynamic states (legacy format).
1788 * IPFW_UH_RLOCK has to be held while calling.
1789 */
1790void
1791ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep)
1792{
1793	ipfw_dyn_rule *p, *last = NULL;
1794	char *bp;
1795	int i;
1796
1797	if (V_ipfw_dyn_v == NULL)
1798		return;
1799	bp = *pbp;
1800
1801	IPFW_UH_RLOCK_ASSERT(chain);
1802
1803	for (i = 0 ; i < V_curr_dyn_buckets; i++) {
1804		IPFW_BUCK_LOCK(i);
1805		for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) {
1806			if (bp + sizeof *p <= ep) {
1807				ipfw_dyn_rule *dst =
1808					(ipfw_dyn_rule *)bp;
1809
1810				export_dyn_rule(p, dst);
1811				last = dst;
1812				bp += sizeof(ipfw_dyn_rule);
1813			}
1814		}
1815		IPFW_BUCK_UNLOCK(i);
1816	}
1817
1818	if (last != NULL) /* mark last dynamic rule */
1819		bzero(&last->next, sizeof(last));
1820	*pbp = bp;
1821}
1822/* end of file */
1823