pf.c revision 290669
1/*-
2 * Copyright (c) 2001 Daniel Hartmeier
3 * Copyright (c) 2002 - 2008 Henning Brauer
4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 *    - Redistributions of source code must retain the above copyright
12 *      notice, this list of conditions and the following disclaimer.
13 *    - Redistributions in binary form must reproduce the above
14 *      copyright notice, this list of conditions and the following
15 *      disclaimer in the documentation and/or other materials provided
16 *      with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
34 *
35 *	$OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: stable/10/sys/netpfil/pf/pf.c 290669 2015-11-11 12:36:42Z kp $");
40
41#include "opt_inet.h"
42#include "opt_inet6.h"
43#include "opt_bpf.h"
44#include "opt_pf.h"
45
46#include <sys/param.h>
47#include <sys/bus.h>
48#include <sys/endian.h>
49#include <sys/hash.h>
50#include <sys/interrupt.h>
51#include <sys/kernel.h>
52#include <sys/kthread.h>
53#include <sys/limits.h>
54#include <sys/mbuf.h>
55#include <sys/md5.h>
56#include <sys/random.h>
57#include <sys/refcount.h>
58#include <sys/socket.h>
59#include <sys/sysctl.h>
60#include <sys/taskqueue.h>
61#include <sys/ucred.h>
62
63#include <net/if.h>
64#include <net/if_types.h>
65#include <net/route.h>
66#include <net/radix_mpath.h>
67#include <net/vnet.h>
68
69#include <net/pfvar.h>
70#include <net/if_pflog.h>
71#include <net/if_pfsync.h>
72
73#include <netinet/in_pcb.h>
74#include <netinet/in_var.h>
75#include <netinet/ip.h>
76#include <netinet/ip_fw.h>
77#include <netinet/ip_icmp.h>
78#include <netinet/icmp_var.h>
79#include <netinet/ip_var.h>
80#include <netinet/tcp.h>
81#include <netinet/tcp_fsm.h>
82#include <netinet/tcp_seq.h>
83#include <netinet/tcp_timer.h>
84#include <netinet/tcp_var.h>
85#include <netinet/udp.h>
86#include <netinet/udp_var.h>
87
88#include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
89
90#ifdef INET6
91#include <netinet/ip6.h>
92#include <netinet/icmp6.h>
93#include <netinet6/nd6.h>
94#include <netinet6/ip6_var.h>
95#include <netinet6/in6_pcb.h>
96#endif /* INET6 */
97
98#include <machine/in_cksum.h>
99#include <security/mac/mac_framework.h>
100
101#define	DPFPRINTF(n, x)	if (V_pf_status.debug >= (n)) printf x
102
103/*
104 * Global variables
105 */
106
107/* state tables */
108VNET_DEFINE(struct pf_altqqueue,	 pf_altqs[2]);
109VNET_DEFINE(struct pf_palist,		 pf_pabuf);
110VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_active);
111VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_inactive);
112VNET_DEFINE(struct pf_kstatus,		 pf_status);
113
114VNET_DEFINE(u_int32_t,			 ticket_altqs_active);
115VNET_DEFINE(u_int32_t,			 ticket_altqs_inactive);
116VNET_DEFINE(int,			 altqs_inactive_open);
117VNET_DEFINE(u_int32_t,			 ticket_pabuf);
118
119VNET_DEFINE(MD5_CTX,			 pf_tcp_secret_ctx);
120#define	V_pf_tcp_secret_ctx		 VNET(pf_tcp_secret_ctx)
121VNET_DEFINE(u_char,			 pf_tcp_secret[16]);
122#define	V_pf_tcp_secret			 VNET(pf_tcp_secret)
123VNET_DEFINE(int,			 pf_tcp_secret_init);
124#define	V_pf_tcp_secret_init		 VNET(pf_tcp_secret_init)
125VNET_DEFINE(int,			 pf_tcp_iss_off);
126#define	V_pf_tcp_iss_off		 VNET(pf_tcp_iss_off)
127
128/*
129 * Queue for pf_intr() sends.
130 */
131static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
132struct pf_send_entry {
133	STAILQ_ENTRY(pf_send_entry)	pfse_next;
134	struct mbuf			*pfse_m;
135	enum {
136		PFSE_IP,
137		PFSE_IP6,
138		PFSE_ICMP,
139		PFSE_ICMP6,
140	}				pfse_type;
141	union {
142		struct route		ro;
143		struct {
144			int		type;
145			int		code;
146			int		mtu;
147		} icmpopts;
148	} u;
149#define	pfse_ro		u.ro
150#define	pfse_icmp_type	u.icmpopts.type
151#define	pfse_icmp_code	u.icmpopts.code
152#define	pfse_icmp_mtu	u.icmpopts.mtu
153};
154
155STAILQ_HEAD(pf_send_head, pf_send_entry);
156static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
157#define	V_pf_sendqueue	VNET(pf_sendqueue)
158
159static struct mtx pf_sendqueue_mtx;
160#define	PF_SENDQ_LOCK()		mtx_lock(&pf_sendqueue_mtx)
161#define	PF_SENDQ_UNLOCK()	mtx_unlock(&pf_sendqueue_mtx)
162
163/*
164 * Queue for pf_overload_task() tasks.
165 */
166struct pf_overload_entry {
167	SLIST_ENTRY(pf_overload_entry)	next;
168	struct pf_addr  		addr;
169	sa_family_t			af;
170	uint8_t				dir;
171	struct pf_rule  		*rule;
172};
173
174SLIST_HEAD(pf_overload_head, pf_overload_entry);
175static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue);
176#define V_pf_overloadqueue	VNET(pf_overloadqueue)
177static VNET_DEFINE(struct task, pf_overloadtask);
178#define	V_pf_overloadtask	VNET(pf_overloadtask)
179
180static struct mtx pf_overloadqueue_mtx;
181#define	PF_OVERLOADQ_LOCK()	mtx_lock(&pf_overloadqueue_mtx)
182#define	PF_OVERLOADQ_UNLOCK()	mtx_unlock(&pf_overloadqueue_mtx)
183
184VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
185struct mtx pf_unlnkdrules_mtx;
186
187static VNET_DEFINE(uma_zone_t,	pf_sources_z);
188#define	V_pf_sources_z	VNET(pf_sources_z)
189uma_zone_t		pf_mtag_z;
190VNET_DEFINE(uma_zone_t,	 pf_state_z);
191VNET_DEFINE(uma_zone_t,	 pf_state_key_z);
192
193VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
194#define	PFID_CPUBITS	8
195#define	PFID_CPUSHIFT	(sizeof(uint64_t) * NBBY - PFID_CPUBITS)
196#define	PFID_CPUMASK	((uint64_t)((1 << PFID_CPUBITS) - 1) <<	PFID_CPUSHIFT)
197#define	PFID_MAXID	(~PFID_CPUMASK)
198CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
199
200static void		 pf_src_tree_remove_state(struct pf_state *);
201static void		 pf_init_threshold(struct pf_threshold *, u_int32_t,
202			    u_int32_t);
203static void		 pf_add_threshold(struct pf_threshold *);
204static int		 pf_check_threshold(struct pf_threshold *);
205
206static void		 pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
207			    u_int16_t *, u_int16_t *, struct pf_addr *,
208			    u_int16_t, u_int8_t, sa_family_t);
209static int		 pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
210			    struct tcphdr *, struct pf_state_peer *);
211static void		 pf_change_icmp(struct pf_addr *, u_int16_t *,
212			    struct pf_addr *, struct pf_addr *, u_int16_t,
213			    u_int16_t *, u_int16_t *, u_int16_t *,
214			    u_int16_t *, u_int8_t, sa_family_t);
215static void		 pf_send_tcp(struct mbuf *,
216			    const struct pf_rule *, sa_family_t,
217			    const struct pf_addr *, const struct pf_addr *,
218			    u_int16_t, u_int16_t, u_int32_t, u_int32_t,
219			    u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
220			    u_int16_t, struct ifnet *);
221static void		 pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
222			    sa_family_t, struct pf_rule *);
223static void		 pf_detach_state(struct pf_state *);
224static int		 pf_state_key_attach(struct pf_state_key *,
225			    struct pf_state_key *, struct pf_state *);
226static void		 pf_state_key_detach(struct pf_state *, int);
227static int		 pf_state_key_ctor(void *, int, void *, int);
228static u_int32_t	 pf_tcp_iss(struct pf_pdesc *);
229static int		 pf_test_rule(struct pf_rule **, struct pf_state **,
230			    int, struct pfi_kif *, struct mbuf *, int,
231			    struct pf_pdesc *, struct pf_rule **,
232			    struct pf_ruleset **, struct inpcb *);
233static int		 pf_create_state(struct pf_rule *, struct pf_rule *,
234			    struct pf_rule *, struct pf_pdesc *,
235			    struct pf_src_node *, struct pf_state_key *,
236			    struct pf_state_key *, struct mbuf *, int,
237			    u_int16_t, u_int16_t, int *, struct pfi_kif *,
238			    struct pf_state **, int, u_int16_t, u_int16_t,
239			    int);
240static int		 pf_test_fragment(struct pf_rule **, int,
241			    struct pfi_kif *, struct mbuf *, void *,
242			    struct pf_pdesc *, struct pf_rule **,
243			    struct pf_ruleset **);
244static int		 pf_tcp_track_full(struct pf_state_peer *,
245			    struct pf_state_peer *, struct pf_state **,
246			    struct pfi_kif *, struct mbuf *, int,
247			    struct pf_pdesc *, u_short *, int *);
248static int		 pf_tcp_track_sloppy(struct pf_state_peer *,
249			    struct pf_state_peer *, struct pf_state **,
250			    struct pf_pdesc *, u_short *);
251static int		 pf_test_state_tcp(struct pf_state **, int,
252			    struct pfi_kif *, struct mbuf *, int,
253			    void *, struct pf_pdesc *, u_short *);
254static int		 pf_test_state_udp(struct pf_state **, int,
255			    struct pfi_kif *, struct mbuf *, int,
256			    void *, struct pf_pdesc *);
257static int		 pf_test_state_icmp(struct pf_state **, int,
258			    struct pfi_kif *, struct mbuf *, int,
259			    void *, struct pf_pdesc *, u_short *);
260static int		 pf_test_state_other(struct pf_state **, int,
261			    struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
262static u_int8_t		 pf_get_wscale(struct mbuf *, int, u_int16_t,
263			    sa_family_t);
264static u_int16_t	 pf_get_mss(struct mbuf *, int, u_int16_t,
265			    sa_family_t);
266static u_int16_t	 pf_calc_mss(struct pf_addr *, sa_family_t,
267				int, u_int16_t);
268static int		 pf_check_proto_cksum(struct mbuf *, int, int,
269			    u_int8_t, sa_family_t);
270static void		 pf_print_state_parts(struct pf_state *,
271			    struct pf_state_key *, struct pf_state_key *);
272static int		 pf_addr_wrap_neq(struct pf_addr_wrap *,
273			    struct pf_addr_wrap *);
274static struct pf_state	*pf_find_state(struct pfi_kif *,
275			    struct pf_state_key_cmp *, u_int);
276static int		 pf_src_connlimit(struct pf_state **);
277static void		 pf_overload_task(void *v, int pending);
278static int		 pf_insert_src_node(struct pf_src_node **,
279			    struct pf_rule *, struct pf_addr *, sa_family_t);
280static u_int		 pf_purge_expired_states(u_int, int);
281static void		 pf_purge_unlinked_rules(void);
282static int		 pf_mtag_uminit(void *, int, int);
283static void		 pf_mtag_free(struct m_tag *);
284#ifdef INET
285static void		 pf_route(struct mbuf **, struct pf_rule *, int,
286			    struct ifnet *, struct pf_state *,
287			    struct pf_pdesc *);
288#endif /* INET */
289#ifdef INET6
290static void		 pf_change_a6(struct pf_addr *, u_int16_t *,
291			    struct pf_addr *, u_int8_t);
292static void		 pf_route6(struct mbuf **, struct pf_rule *, int,
293			    struct ifnet *, struct pf_state *,
294			    struct pf_pdesc *);
295#endif /* INET6 */
296
297int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
298
299VNET_DECLARE(int, pf_end_threads);
300
301VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
302
303#define	PACKET_LOOPED(pd)	((pd)->pf_mtag &&			\
304				 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
305
306#define	STATE_LOOKUP(i, k, d, s, pd)					\
307	do {								\
308		(s) = pf_find_state((i), (k), (d));			\
309		if ((s) == NULL)					\
310			return (PF_DROP);				\
311		if (PACKET_LOOPED(pd))					\
312			return (PF_PASS);				\
313		if ((d) == PF_OUT &&					\
314		    (((s)->rule.ptr->rt == PF_ROUTETO &&		\
315		    (s)->rule.ptr->direction == PF_OUT) ||		\
316		    ((s)->rule.ptr->rt == PF_REPLYTO &&			\
317		    (s)->rule.ptr->direction == PF_IN)) &&		\
318		    (s)->rt_kif != NULL &&				\
319		    (s)->rt_kif != (i))					\
320			return (PF_PASS);				\
321	} while (0)
322
323#define	BOUND_IFACE(r, k) \
324	((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
325
326#define	STATE_INC_COUNTERS(s)						\
327	do {								\
328		counter_u64_add(s->rule.ptr->states_cur, 1);		\
329		counter_u64_add(s->rule.ptr->states_tot, 1);		\
330		if (s->anchor.ptr != NULL) {				\
331			counter_u64_add(s->anchor.ptr->states_cur, 1);	\
332			counter_u64_add(s->anchor.ptr->states_tot, 1);	\
333		}							\
334		if (s->nat_rule.ptr != NULL) {				\
335			counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
336			counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
337		}							\
338	} while (0)
339
340#define	STATE_DEC_COUNTERS(s)						\
341	do {								\
342		if (s->nat_rule.ptr != NULL)				\
343			counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
344		if (s->anchor.ptr != NULL)				\
345			counter_u64_add(s->anchor.ptr->states_cur, -1);	\
346		counter_u64_add(s->rule.ptr->states_cur, -1);		\
347	} while (0)
348
349static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
350VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
351VNET_DEFINE(struct pf_idhash *, pf_idhash);
352VNET_DEFINE(struct pf_srchash *, pf_srchash);
353
354SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
355
356u_long	pf_hashmask;
357u_long	pf_srchashmask;
358static u_long	pf_hashsize;
359static u_long	pf_srchashsize;
360
361SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
362    &pf_hashsize, 0, "Size of pf(4) states hashtable");
363SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
364    &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
365
366VNET_DEFINE(void *, pf_swi_cookie);
367
368VNET_DEFINE(uint32_t, pf_hashseed);
369#define	V_pf_hashseed	VNET(pf_hashseed)
370
371int
372pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
373{
374
375	switch (af) {
376#ifdef INET
377	case AF_INET:
378		if (a->addr32[0] > b->addr32[0])
379			return (1);
380		if (a->addr32[0] < b->addr32[0])
381			return (-1);
382		break;
383#endif /* INET */
384#ifdef INET6
385	case AF_INET6:
386		if (a->addr32[3] > b->addr32[3])
387			return (1);
388		if (a->addr32[3] < b->addr32[3])
389			return (-1);
390		if (a->addr32[2] > b->addr32[2])
391			return (1);
392		if (a->addr32[2] < b->addr32[2])
393			return (-1);
394		if (a->addr32[1] > b->addr32[1])
395			return (1);
396		if (a->addr32[1] < b->addr32[1])
397			return (-1);
398		if (a->addr32[0] > b->addr32[0])
399			return (1);
400		if (a->addr32[0] < b->addr32[0])
401			return (-1);
402		break;
403#endif /* INET6 */
404	default:
405		panic("%s: unknown address family %u", __func__, af);
406	}
407	return (0);
408}
409
410static __inline uint32_t
411pf_hashkey(struct pf_state_key *sk)
412{
413	uint32_t h;
414
415	h = murmur3_aligned_32((uint32_t *)sk,
416			       sizeof(struct pf_state_key_cmp),
417			       V_pf_hashseed);
418
419	return (h & pf_hashmask);
420}
421
422static __inline uint32_t
423pf_hashsrc(struct pf_addr *addr, sa_family_t af)
424{
425	uint32_t h;
426
427	switch (af) {
428	case AF_INET:
429		h = murmur3_aligned_32((uint32_t *)&addr->v4,
430				       sizeof(addr->v4), V_pf_hashseed);
431		break;
432	case AF_INET6:
433		h = murmur3_aligned_32((uint32_t *)&addr->v6,
434				       sizeof(addr->v6), V_pf_hashseed);
435		break;
436	default:
437		panic("%s: unknown address family %u", __func__, af);
438	}
439
440	return (h & pf_srchashmask);
441}
442
443#ifdef INET6
444void
445pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
446{
447	switch (af) {
448#ifdef INET
449	case AF_INET:
450		dst->addr32[0] = src->addr32[0];
451		break;
452#endif /* INET */
453	case AF_INET6:
454		dst->addr32[0] = src->addr32[0];
455		dst->addr32[1] = src->addr32[1];
456		dst->addr32[2] = src->addr32[2];
457		dst->addr32[3] = src->addr32[3];
458		break;
459	}
460}
461#endif /* INET6 */
462
463static void
464pf_init_threshold(struct pf_threshold *threshold,
465    u_int32_t limit, u_int32_t seconds)
466{
467	threshold->limit = limit * PF_THRESHOLD_MULT;
468	threshold->seconds = seconds;
469	threshold->count = 0;
470	threshold->last = time_uptime;
471}
472
473static void
474pf_add_threshold(struct pf_threshold *threshold)
475{
476	u_int32_t t = time_uptime, diff = t - threshold->last;
477
478	if (diff >= threshold->seconds)
479		threshold->count = 0;
480	else
481		threshold->count -= threshold->count * diff /
482		    threshold->seconds;
483	threshold->count += PF_THRESHOLD_MULT;
484	threshold->last = t;
485}
486
487static int
488pf_check_threshold(struct pf_threshold *threshold)
489{
490	return (threshold->count > threshold->limit);
491}
492
493static int
494pf_src_connlimit(struct pf_state **state)
495{
496	struct pf_overload_entry *pfoe;
497	int bad = 0;
498
499	PF_STATE_LOCK_ASSERT(*state);
500
501	(*state)->src_node->conn++;
502	(*state)->src.tcp_est = 1;
503	pf_add_threshold(&(*state)->src_node->conn_rate);
504
505	if ((*state)->rule.ptr->max_src_conn &&
506	    (*state)->rule.ptr->max_src_conn <
507	    (*state)->src_node->conn) {
508		counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
509		bad++;
510	}
511
512	if ((*state)->rule.ptr->max_src_conn_rate.limit &&
513	    pf_check_threshold(&(*state)->src_node->conn_rate)) {
514		counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
515		bad++;
516	}
517
518	if (!bad)
519		return (0);
520
521	/* Kill this state. */
522	(*state)->timeout = PFTM_PURGE;
523	(*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
524
525	if ((*state)->rule.ptr->overload_tbl == NULL)
526		return (1);
527
528	/* Schedule overloading and flushing task. */
529	pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
530	if (pfoe == NULL)
531		return (1);	/* too bad :( */
532
533	bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
534	pfoe->af = (*state)->key[PF_SK_WIRE]->af;
535	pfoe->rule = (*state)->rule.ptr;
536	pfoe->dir = (*state)->direction;
537	PF_OVERLOADQ_LOCK();
538	SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
539	PF_OVERLOADQ_UNLOCK();
540	taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
541
542	return (1);
543}
544
545static void
546pf_overload_task(void *v, int pending)
547{
548	struct pf_overload_head queue;
549	struct pfr_addr p;
550	struct pf_overload_entry *pfoe, *pfoe1;
551	uint32_t killed = 0;
552
553	CURVNET_SET((struct vnet *)v);
554
555	PF_OVERLOADQ_LOCK();
556	queue = V_pf_overloadqueue;
557	SLIST_INIT(&V_pf_overloadqueue);
558	PF_OVERLOADQ_UNLOCK();
559
560	bzero(&p, sizeof(p));
561	SLIST_FOREACH(pfoe, &queue, next) {
562		counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
563		if (V_pf_status.debug >= PF_DEBUG_MISC) {
564			printf("%s: blocking address ", __func__);
565			pf_print_host(&pfoe->addr, 0, pfoe->af);
566			printf("\n");
567		}
568
569		p.pfra_af = pfoe->af;
570		switch (pfoe->af) {
571#ifdef INET
572		case AF_INET:
573			p.pfra_net = 32;
574			p.pfra_ip4addr = pfoe->addr.v4;
575			break;
576#endif
577#ifdef INET6
578		case AF_INET6:
579			p.pfra_net = 128;
580			p.pfra_ip6addr = pfoe->addr.v6;
581			break;
582#endif
583		}
584
585		PF_RULES_WLOCK();
586		pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
587		PF_RULES_WUNLOCK();
588	}
589
590	/*
591	 * Remove those entries, that don't need flushing.
592	 */
593	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
594		if (pfoe->rule->flush == 0) {
595			SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
596			free(pfoe, M_PFTEMP);
597		} else
598			counter_u64_add(
599			    V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
600
601	/* If nothing to flush, return. */
602	if (SLIST_EMPTY(&queue)) {
603		CURVNET_RESTORE();
604		return;
605	}
606
607	for (int i = 0; i <= pf_hashmask; i++) {
608		struct pf_idhash *ih = &V_pf_idhash[i];
609		struct pf_state_key *sk;
610		struct pf_state *s;
611
612		PF_HASHROW_LOCK(ih);
613		LIST_FOREACH(s, &ih->states, entry) {
614		    sk = s->key[PF_SK_WIRE];
615		    SLIST_FOREACH(pfoe, &queue, next)
616			if (sk->af == pfoe->af &&
617			    ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
618			    pfoe->rule == s->rule.ptr) &&
619			    ((pfoe->dir == PF_OUT &&
620			    PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
621			    (pfoe->dir == PF_IN &&
622			    PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
623				s->timeout = PFTM_PURGE;
624				s->src.state = s->dst.state = TCPS_CLOSED;
625				killed++;
626			}
627		}
628		PF_HASHROW_UNLOCK(ih);
629	}
630	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
631		free(pfoe, M_PFTEMP);
632	if (V_pf_status.debug >= PF_DEBUG_MISC)
633		printf("%s: %u states killed", __func__, killed);
634
635	CURVNET_RESTORE();
636}
637
638/*
639 * Can return locked on failure, so that we can consistently
640 * allocate and insert a new one.
641 */
642struct pf_src_node *
643pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
644	int returnlocked)
645{
646	struct pf_srchash *sh;
647	struct pf_src_node *n;
648
649	counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
650
651	sh = &V_pf_srchash[pf_hashsrc(src, af)];
652	PF_HASHROW_LOCK(sh);
653	LIST_FOREACH(n, &sh->nodes, entry)
654		if (n->rule.ptr == rule && n->af == af &&
655		    ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
656		    (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
657			break;
658	if (n != NULL) {
659		n->states++;
660		PF_HASHROW_UNLOCK(sh);
661	} else if (returnlocked == 0)
662		PF_HASHROW_UNLOCK(sh);
663
664	return (n);
665}
666
667static int
668pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
669    struct pf_addr *src, sa_family_t af)
670{
671
672	KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
673	    rule->rpool.opts & PF_POOL_STICKYADDR),
674	    ("%s for non-tracking rule %p", __func__, rule));
675
676	if (*sn == NULL)
677		*sn = pf_find_src_node(src, rule, af, 1);
678
679	if (*sn == NULL) {
680		struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
681
682		PF_HASHROW_ASSERT(sh);
683
684		if (!rule->max_src_nodes ||
685		    counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
686			(*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
687		else
688			counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
689			    1);
690		if ((*sn) == NULL) {
691			PF_HASHROW_UNLOCK(sh);
692			return (-1);
693		}
694
695		pf_init_threshold(&(*sn)->conn_rate,
696		    rule->max_src_conn_rate.limit,
697		    rule->max_src_conn_rate.seconds);
698
699		(*sn)->af = af;
700		(*sn)->rule.ptr = rule;
701		PF_ACPY(&(*sn)->addr, src, af);
702		LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
703		(*sn)->creation = time_uptime;
704		(*sn)->ruletype = rule->action;
705		(*sn)->states = 1;
706		if ((*sn)->rule.ptr != NULL)
707			counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
708		PF_HASHROW_UNLOCK(sh);
709		counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
710	} else {
711		if (rule->max_src_states &&
712		    (*sn)->states >= rule->max_src_states) {
713			counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
714			    1);
715			return (-1);
716		}
717	}
718	return (0);
719}
720
721void
722pf_unlink_src_node(struct pf_src_node *src)
723{
724
725	PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
726	LIST_REMOVE(src, entry);
727	if (src->rule.ptr)
728		counter_u64_add(src->rule.ptr->src_nodes, -1);
729}
730
731u_int
732pf_free_src_nodes(struct pf_src_node_list *head)
733{
734	struct pf_src_node *sn, *tmp;
735	u_int count = 0;
736
737	LIST_FOREACH_SAFE(sn, head, entry, tmp) {
738		uma_zfree(V_pf_sources_z, sn);
739		count++;
740	}
741
742	counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
743
744	return (count);
745}
746
747void
748pf_mtag_initialize()
749{
750
751	pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
752	    sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
753	    UMA_ALIGN_PTR, 0);
754}
755
756/* Per-vnet data storage structures initialization. */
757void
758pf_initialize()
759{
760	struct pf_keyhash	*kh;
761	struct pf_idhash	*ih;
762	struct pf_srchash	*sh;
763	u_int i;
764
765	TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &pf_hashsize);
766	if (pf_hashsize == 0 || !powerof2(pf_hashsize))
767		pf_hashsize = PF_HASHSIZ;
768	TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &pf_srchashsize);
769	if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
770		pf_srchashsize = PF_HASHSIZ / 4;
771
772	V_pf_hashseed = arc4random();
773
774	/* States and state keys storage. */
775	V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
776	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
777	V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
778	uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
779	uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
780
781	V_pf_state_key_z = uma_zcreate("pf state keys",
782	    sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
783	    UMA_ALIGN_PTR, 0);
784	V_pf_keyhash = malloc(pf_hashsize * sizeof(struct pf_keyhash),
785	    M_PFHASH, M_WAITOK | M_ZERO);
786	V_pf_idhash = malloc(pf_hashsize * sizeof(struct pf_idhash),
787	    M_PFHASH, M_WAITOK | M_ZERO);
788	pf_hashmask = pf_hashsize - 1;
789	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
790	    i++, kh++, ih++) {
791		mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
792		mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
793	}
794
795	/* Source nodes. */
796	V_pf_sources_z = uma_zcreate("pf source nodes",
797	    sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
798	    0);
799	V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
800	uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
801	uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
802	V_pf_srchash = malloc(pf_srchashsize * sizeof(struct pf_srchash),
803	  M_PFHASH, M_WAITOK|M_ZERO);
804	pf_srchashmask = pf_srchashsize - 1;
805	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
806		mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
807
808	/* ALTQ */
809	TAILQ_INIT(&V_pf_altqs[0]);
810	TAILQ_INIT(&V_pf_altqs[1]);
811	TAILQ_INIT(&V_pf_pabuf);
812	V_pf_altqs_active = &V_pf_altqs[0];
813	V_pf_altqs_inactive = &V_pf_altqs[1];
814
815
816	/* Send & overload+flush queues. */
817	STAILQ_INIT(&V_pf_sendqueue);
818	SLIST_INIT(&V_pf_overloadqueue);
819	TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
820	mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF);
821	mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL,
822	    MTX_DEF);
823
824	/* Unlinked, but may be referenced rules. */
825	TAILQ_INIT(&V_pf_unlinked_rules);
826	mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF);
827}
828
829void
830pf_mtag_cleanup()
831{
832
833	uma_zdestroy(pf_mtag_z);
834}
835
836void
837pf_cleanup()
838{
839	struct pf_keyhash	*kh;
840	struct pf_idhash	*ih;
841	struct pf_srchash	*sh;
842	struct pf_send_entry	*pfse, *next;
843	u_int i;
844
845	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
846	    i++, kh++, ih++) {
847		KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
848		    __func__));
849		KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
850		    __func__));
851		mtx_destroy(&kh->lock);
852		mtx_destroy(&ih->lock);
853	}
854	free(V_pf_keyhash, M_PFHASH);
855	free(V_pf_idhash, M_PFHASH);
856
857	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
858		KASSERT(LIST_EMPTY(&sh->nodes),
859		    ("%s: source node hash not empty", __func__));
860		mtx_destroy(&sh->lock);
861	}
862	free(V_pf_srchash, M_PFHASH);
863
864	STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
865		m_freem(pfse->pfse_m);
866		free(pfse, M_PFTEMP);
867	}
868
869	mtx_destroy(&pf_sendqueue_mtx);
870	mtx_destroy(&pf_overloadqueue_mtx);
871	mtx_destroy(&pf_unlnkdrules_mtx);
872
873	uma_zdestroy(V_pf_sources_z);
874	uma_zdestroy(V_pf_state_z);
875	uma_zdestroy(V_pf_state_key_z);
876}
877
878static int
879pf_mtag_uminit(void *mem, int size, int how)
880{
881	struct m_tag *t;
882
883	t = (struct m_tag *)mem;
884	t->m_tag_cookie = MTAG_ABI_COMPAT;
885	t->m_tag_id = PACKET_TAG_PF;
886	t->m_tag_len = sizeof(struct pf_mtag);
887	t->m_tag_free = pf_mtag_free;
888
889	return (0);
890}
891
892static void
893pf_mtag_free(struct m_tag *t)
894{
895
896	uma_zfree(pf_mtag_z, t);
897}
898
899struct pf_mtag *
900pf_get_mtag(struct mbuf *m)
901{
902	struct m_tag *mtag;
903
904	if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
905		return ((struct pf_mtag *)(mtag + 1));
906
907	mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
908	if (mtag == NULL)
909		return (NULL);
910	bzero(mtag + 1, sizeof(struct pf_mtag));
911	m_tag_prepend(m, mtag);
912
913	return ((struct pf_mtag *)(mtag + 1));
914}
915
916static int
917pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
918    struct pf_state *s)
919{
920	struct pf_keyhash	*khs, *khw, *kh;
921	struct pf_state_key	*sk, *cur;
922	struct pf_state		*si, *olds = NULL;
923	int idx;
924
925	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
926	KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
927	KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
928
929	/*
930	 * We need to lock hash slots of both keys. To avoid deadlock
931	 * we always lock the slot with lower address first. Unlock order
932	 * isn't important.
933	 *
934	 * We also need to lock ID hash slot before dropping key
935	 * locks. On success we return with ID hash slot locked.
936	 */
937
938	if (skw == sks) {
939		khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
940		PF_HASHROW_LOCK(khs);
941	} else {
942		khs = &V_pf_keyhash[pf_hashkey(sks)];
943		khw = &V_pf_keyhash[pf_hashkey(skw)];
944		if (khs == khw) {
945			PF_HASHROW_LOCK(khs);
946		} else if (khs < khw) {
947			PF_HASHROW_LOCK(khs);
948			PF_HASHROW_LOCK(khw);
949		} else {
950			PF_HASHROW_LOCK(khw);
951			PF_HASHROW_LOCK(khs);
952		}
953	}
954
955#define	KEYS_UNLOCK()	do {			\
956	if (khs != khw) {			\
957		PF_HASHROW_UNLOCK(khs);		\
958		PF_HASHROW_UNLOCK(khw);		\
959	} else					\
960		PF_HASHROW_UNLOCK(khs);		\
961} while (0)
962
963	/*
964	 * First run: start with wire key.
965	 */
966	sk = skw;
967	kh = khw;
968	idx = PF_SK_WIRE;
969
970keyattach:
971	LIST_FOREACH(cur, &kh->keys, entry)
972		if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
973			break;
974
975	if (cur != NULL) {
976		/* Key exists. Check for same kif, if none, add to key. */
977		TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
978			struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
979
980			PF_HASHROW_LOCK(ih);
981			if (si->kif == s->kif &&
982			    si->direction == s->direction) {
983				if (sk->proto == IPPROTO_TCP &&
984				    si->src.state >= TCPS_FIN_WAIT_2 &&
985				    si->dst.state >= TCPS_FIN_WAIT_2) {
986					/*
987					 * New state matches an old >FIN_WAIT_2
988					 * state. We can't drop key hash locks,
989					 * thus we can't unlink it properly.
990					 *
991					 * As a workaround we drop it into
992					 * TCPS_CLOSED state, schedule purge
993					 * ASAP and push it into the very end
994					 * of the slot TAILQ, so that it won't
995					 * conflict with our new state.
996					 */
997					si->src.state = si->dst.state =
998					    TCPS_CLOSED;
999					si->timeout = PFTM_PURGE;
1000					olds = si;
1001				} else {
1002					if (V_pf_status.debug >= PF_DEBUG_MISC) {
1003						printf("pf: %s key attach "
1004						    "failed on %s: ",
1005						    (idx == PF_SK_WIRE) ?
1006						    "wire" : "stack",
1007						    s->kif->pfik_name);
1008						pf_print_state_parts(s,
1009						    (idx == PF_SK_WIRE) ?
1010						    sk : NULL,
1011						    (idx == PF_SK_STACK) ?
1012						    sk : NULL);
1013						printf(", existing: ");
1014						pf_print_state_parts(si,
1015						    (idx == PF_SK_WIRE) ?
1016						    sk : NULL,
1017						    (idx == PF_SK_STACK) ?
1018						    sk : NULL);
1019						printf("\n");
1020					}
1021					PF_HASHROW_UNLOCK(ih);
1022					KEYS_UNLOCK();
1023					uma_zfree(V_pf_state_key_z, sk);
1024					if (idx == PF_SK_STACK)
1025						pf_detach_state(s);
1026					return (EEXIST); /* collision! */
1027				}
1028			}
1029			PF_HASHROW_UNLOCK(ih);
1030		}
1031		uma_zfree(V_pf_state_key_z, sk);
1032		s->key[idx] = cur;
1033	} else {
1034		LIST_INSERT_HEAD(&kh->keys, sk, entry);
1035		s->key[idx] = sk;
1036	}
1037
1038stateattach:
1039	/* List is sorted, if-bound states before floating. */
1040	if (s->kif == V_pfi_all)
1041		TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1042	else
1043		TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1044
1045	if (olds) {
1046		TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1047		TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1048		    key_list[idx]);
1049		olds = NULL;
1050	}
1051
1052	/*
1053	 * Attach done. See how should we (or should not?)
1054	 * attach a second key.
1055	 */
1056	if (sks == skw) {
1057		s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1058		idx = PF_SK_STACK;
1059		sks = NULL;
1060		goto stateattach;
1061	} else if (sks != NULL) {
1062		/*
1063		 * Continue attaching with stack key.
1064		 */
1065		sk = sks;
1066		kh = khs;
1067		idx = PF_SK_STACK;
1068		sks = NULL;
1069		goto keyattach;
1070	}
1071
1072	PF_STATE_LOCK(s);
1073	KEYS_UNLOCK();
1074
1075	KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1076	    ("%s failure", __func__));
1077
1078	return (0);
1079#undef	KEYS_UNLOCK
1080}
1081
1082static void
1083pf_detach_state(struct pf_state *s)
1084{
1085	struct pf_state_key *sks = s->key[PF_SK_STACK];
1086	struct pf_keyhash *kh;
1087
1088	if (sks != NULL) {
1089		kh = &V_pf_keyhash[pf_hashkey(sks)];
1090		PF_HASHROW_LOCK(kh);
1091		if (s->key[PF_SK_STACK] != NULL)
1092			pf_state_key_detach(s, PF_SK_STACK);
1093		/*
1094		 * If both point to same key, then we are done.
1095		 */
1096		if (sks == s->key[PF_SK_WIRE]) {
1097			pf_state_key_detach(s, PF_SK_WIRE);
1098			PF_HASHROW_UNLOCK(kh);
1099			return;
1100		}
1101		PF_HASHROW_UNLOCK(kh);
1102	}
1103
1104	if (s->key[PF_SK_WIRE] != NULL) {
1105		kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1106		PF_HASHROW_LOCK(kh);
1107		if (s->key[PF_SK_WIRE] != NULL)
1108			pf_state_key_detach(s, PF_SK_WIRE);
1109		PF_HASHROW_UNLOCK(kh);
1110	}
1111}
1112
1113static void
1114pf_state_key_detach(struct pf_state *s, int idx)
1115{
1116	struct pf_state_key *sk = s->key[idx];
1117#ifdef INVARIANTS
1118	struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1119
1120	PF_HASHROW_ASSERT(kh);
1121#endif
1122	TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1123	s->key[idx] = NULL;
1124
1125	if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1126		LIST_REMOVE(sk, entry);
1127		uma_zfree(V_pf_state_key_z, sk);
1128	}
1129}
1130
1131static int
1132pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1133{
1134	struct pf_state_key *sk = mem;
1135
1136	bzero(sk, sizeof(struct pf_state_key_cmp));
1137	TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1138	TAILQ_INIT(&sk->states[PF_SK_STACK]);
1139
1140	return (0);
1141}
1142
1143struct pf_state_key *
1144pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1145	struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1146{
1147	struct pf_state_key *sk;
1148
1149	sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1150	if (sk == NULL)
1151		return (NULL);
1152
1153	PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1154	PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1155	sk->port[pd->sidx] = sport;
1156	sk->port[pd->didx] = dport;
1157	sk->proto = pd->proto;
1158	sk->af = pd->af;
1159
1160	return (sk);
1161}
1162
1163struct pf_state_key *
1164pf_state_key_clone(struct pf_state_key *orig)
1165{
1166	struct pf_state_key *sk;
1167
1168	sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1169	if (sk == NULL)
1170		return (NULL);
1171
1172	bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1173
1174	return (sk);
1175}
1176
1177int
1178pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1179    struct pf_state_key *sks, struct pf_state *s)
1180{
1181	struct pf_idhash *ih;
1182	struct pf_state *cur;
1183	int error;
1184
1185	KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1186	    ("%s: sks not pristine", __func__));
1187	KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1188	    ("%s: skw not pristine", __func__));
1189	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1190
1191	s->kif = kif;
1192
1193	if (s->id == 0 && s->creatorid == 0) {
1194		/* XXX: should be atomic, but probability of collision low */
1195		if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1196			V_pf_stateid[curcpu] = 1;
1197		s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1198		s->id = htobe64(s->id);
1199		s->creatorid = V_pf_status.hostid;
1200	}
1201
1202	/* Returns with ID locked on success. */
1203	if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1204		return (error);
1205
1206	ih = &V_pf_idhash[PF_IDHASH(s)];
1207	PF_HASHROW_ASSERT(ih);
1208	LIST_FOREACH(cur, &ih->states, entry)
1209		if (cur->id == s->id && cur->creatorid == s->creatorid)
1210			break;
1211
1212	if (cur != NULL) {
1213		PF_HASHROW_UNLOCK(ih);
1214		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1215			printf("pf: state ID collision: "
1216			    "id: %016llx creatorid: %08x\n",
1217			    (unsigned long long)be64toh(s->id),
1218			    ntohl(s->creatorid));
1219		}
1220		pf_detach_state(s);
1221		return (EEXIST);
1222	}
1223	LIST_INSERT_HEAD(&ih->states, s, entry);
1224	/* One for keys, one for ID hash. */
1225	refcount_init(&s->refs, 2);
1226
1227	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1228	if (pfsync_insert_state_ptr != NULL)
1229		pfsync_insert_state_ptr(s);
1230
1231	/* Returns locked. */
1232	return (0);
1233}
1234
1235/*
1236 * Find state by ID: returns with locked row on success.
1237 */
1238struct pf_state *
1239pf_find_state_byid(uint64_t id, uint32_t creatorid)
1240{
1241	struct pf_idhash *ih;
1242	struct pf_state *s;
1243
1244	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1245
1246	ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1247
1248	PF_HASHROW_LOCK(ih);
1249	LIST_FOREACH(s, &ih->states, entry)
1250		if (s->id == id && s->creatorid == creatorid)
1251			break;
1252
1253	if (s == NULL)
1254		PF_HASHROW_UNLOCK(ih);
1255
1256	return (s);
1257}
1258
1259/*
1260 * Find state by key.
1261 * Returns with ID hash slot locked on success.
1262 */
1263static struct pf_state *
1264pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1265{
1266	struct pf_keyhash	*kh;
1267	struct pf_state_key	*sk;
1268	struct pf_state		*s;
1269	int idx;
1270
1271	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1272
1273	kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1274
1275	PF_HASHROW_LOCK(kh);
1276	LIST_FOREACH(sk, &kh->keys, entry)
1277		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1278			break;
1279	if (sk == NULL) {
1280		PF_HASHROW_UNLOCK(kh);
1281		return (NULL);
1282	}
1283
1284	idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1285
1286	/* List is sorted, if-bound states before floating ones. */
1287	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1288		if (s->kif == V_pfi_all || s->kif == kif) {
1289			PF_STATE_LOCK(s);
1290			PF_HASHROW_UNLOCK(kh);
1291			if (s->timeout >= PFTM_MAX) {
1292				/*
1293				 * State is either being processed by
1294				 * pf_unlink_state() in an other thread, or
1295				 * is scheduled for immediate expiry.
1296				 */
1297				PF_STATE_UNLOCK(s);
1298				return (NULL);
1299			}
1300			return (s);
1301		}
1302	PF_HASHROW_UNLOCK(kh);
1303
1304	return (NULL);
1305}
1306
1307struct pf_state *
1308pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1309{
1310	struct pf_keyhash	*kh;
1311	struct pf_state_key	*sk;
1312	struct pf_state		*s, *ret = NULL;
1313	int			 idx, inout = 0;
1314
1315	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1316
1317	kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1318
1319	PF_HASHROW_LOCK(kh);
1320	LIST_FOREACH(sk, &kh->keys, entry)
1321		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1322			break;
1323	if (sk == NULL) {
1324		PF_HASHROW_UNLOCK(kh);
1325		return (NULL);
1326	}
1327	switch (dir) {
1328	case PF_IN:
1329		idx = PF_SK_WIRE;
1330		break;
1331	case PF_OUT:
1332		idx = PF_SK_STACK;
1333		break;
1334	case PF_INOUT:
1335		idx = PF_SK_WIRE;
1336		inout = 1;
1337		break;
1338	default:
1339		panic("%s: dir %u", __func__, dir);
1340	}
1341second_run:
1342	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1343		if (more == NULL) {
1344			PF_HASHROW_UNLOCK(kh);
1345			return (s);
1346		}
1347
1348		if (ret)
1349			(*more)++;
1350		else
1351			ret = s;
1352	}
1353	if (inout == 1) {
1354		inout = 0;
1355		idx = PF_SK_STACK;
1356		goto second_run;
1357	}
1358	PF_HASHROW_UNLOCK(kh);
1359
1360	return (ret);
1361}
1362
1363/* END state table stuff */
1364
1365static void
1366pf_send(struct pf_send_entry *pfse)
1367{
1368
1369	PF_SENDQ_LOCK();
1370	STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1371	PF_SENDQ_UNLOCK();
1372	swi_sched(V_pf_swi_cookie, 0);
1373}
1374
1375void
1376pf_intr(void *v)
1377{
1378	struct pf_send_head queue;
1379	struct pf_send_entry *pfse, *next;
1380
1381	CURVNET_SET((struct vnet *)v);
1382
1383	PF_SENDQ_LOCK();
1384	queue = V_pf_sendqueue;
1385	STAILQ_INIT(&V_pf_sendqueue);
1386	PF_SENDQ_UNLOCK();
1387
1388	STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1389		switch (pfse->pfse_type) {
1390#ifdef INET
1391		case PFSE_IP:
1392			ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1393			break;
1394		case PFSE_ICMP:
1395			icmp_error(pfse->pfse_m, pfse->pfse_icmp_type,
1396			    pfse->pfse_icmp_code, 0, pfse->pfse_icmp_mtu);
1397			break;
1398#endif /* INET */
1399#ifdef INET6
1400		case PFSE_IP6:
1401			ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1402			    NULL);
1403			break;
1404		case PFSE_ICMP6:
1405			icmp6_error(pfse->pfse_m, pfse->pfse_icmp_type,
1406			    pfse->pfse_icmp_code, pfse->pfse_icmp_mtu);
1407			break;
1408#endif /* INET6 */
1409		default:
1410			panic("%s: unknown type", __func__);
1411		}
1412		free(pfse, M_PFTEMP);
1413	}
1414	CURVNET_RESTORE();
1415}
1416
1417void
1418pf_purge_thread(void *v)
1419{
1420	u_int idx = 0;
1421
1422	CURVNET_SET((struct vnet *)v);
1423
1424	for (;;) {
1425		PF_RULES_RLOCK();
1426		rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
1427
1428		if (V_pf_end_threads) {
1429			/*
1430			 * To cleanse up all kifs and rules we need
1431			 * two runs: first one clears reference flags,
1432			 * then pf_purge_expired_states() doesn't
1433			 * raise them, and then second run frees.
1434			 */
1435			PF_RULES_RUNLOCK();
1436			pf_purge_unlinked_rules();
1437			pfi_kif_purge();
1438
1439			/*
1440			 * Now purge everything.
1441			 */
1442			pf_purge_expired_states(0, pf_hashmask);
1443			pf_purge_expired_fragments();
1444			pf_purge_expired_src_nodes();
1445
1446			/*
1447			 * Now all kifs & rules should be unreferenced,
1448			 * thus should be successfully freed.
1449			 */
1450			pf_purge_unlinked_rules();
1451			pfi_kif_purge();
1452
1453			/*
1454			 * Announce success and exit.
1455			 */
1456			PF_RULES_RLOCK();
1457			V_pf_end_threads++;
1458			PF_RULES_RUNLOCK();
1459			wakeup(pf_purge_thread);
1460			kproc_exit(0);
1461		}
1462		PF_RULES_RUNLOCK();
1463
1464		/* Process 1/interval fraction of the state table every run. */
1465		idx = pf_purge_expired_states(idx, pf_hashmask /
1466			    (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1467
1468		/* Purge other expired types every PFTM_INTERVAL seconds. */
1469		if (idx == 0) {
1470			/*
1471			 * Order is important:
1472			 * - states and src nodes reference rules
1473			 * - states and rules reference kifs
1474			 */
1475			pf_purge_expired_fragments();
1476			pf_purge_expired_src_nodes();
1477			pf_purge_unlinked_rules();
1478			pfi_kif_purge();
1479		}
1480	}
1481	/* not reached */
1482	CURVNET_RESTORE();
1483}
1484
1485u_int32_t
1486pf_state_expires(const struct pf_state *state)
1487{
1488	u_int32_t	timeout;
1489	u_int32_t	start;
1490	u_int32_t	end;
1491	u_int32_t	states;
1492
1493	/* handle all PFTM_* > PFTM_MAX here */
1494	if (state->timeout == PFTM_PURGE)
1495		return (time_uptime);
1496	KASSERT(state->timeout != PFTM_UNLINKED,
1497	    ("pf_state_expires: timeout == PFTM_UNLINKED"));
1498	KASSERT((state->timeout < PFTM_MAX),
1499	    ("pf_state_expires: timeout > PFTM_MAX"));
1500	timeout = state->rule.ptr->timeout[state->timeout];
1501	if (!timeout)
1502		timeout = V_pf_default_rule.timeout[state->timeout];
1503	start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1504	if (start) {
1505		end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1506		states = counter_u64_fetch(state->rule.ptr->states_cur);
1507	} else {
1508		start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1509		end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1510		states = V_pf_status.states;
1511	}
1512	if (end && states > start && start < end) {
1513		if (states < end)
1514			return (state->expire + timeout * (end - states) /
1515			    (end - start));
1516		else
1517			return (time_uptime);
1518	}
1519	return (state->expire + timeout);
1520}
1521
1522void
1523pf_purge_expired_src_nodes()
1524{
1525	struct pf_src_node_list	 freelist;
1526	struct pf_srchash	*sh;
1527	struct pf_src_node	*cur, *next;
1528	int i;
1529
1530	LIST_INIT(&freelist);
1531	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1532	    PF_HASHROW_LOCK(sh);
1533	    LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1534		if (cur->states == 0 && cur->expire <= time_uptime) {
1535			pf_unlink_src_node(cur);
1536			LIST_INSERT_HEAD(&freelist, cur, entry);
1537		} else if (cur->rule.ptr != NULL)
1538			cur->rule.ptr->rule_flag |= PFRULE_REFS;
1539	    PF_HASHROW_UNLOCK(sh);
1540	}
1541
1542	pf_free_src_nodes(&freelist);
1543
1544	V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1545}
1546
1547static void
1548pf_src_tree_remove_state(struct pf_state *s)
1549{
1550	struct pf_src_node *sn;
1551	struct pf_srchash *sh;
1552	uint32_t timeout;
1553
1554	timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1555	    s->rule.ptr->timeout[PFTM_SRC_NODE] :
1556	    V_pf_default_rule.timeout[PFTM_SRC_NODE];
1557
1558	if (s->src_node != NULL) {
1559		sn = s->src_node;
1560		sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1561	    	PF_HASHROW_LOCK(sh);
1562		if (s->src.tcp_est)
1563			--sn->conn;
1564		if (--sn->states == 0)
1565			sn->expire = time_uptime + timeout;
1566	    	PF_HASHROW_UNLOCK(sh);
1567	}
1568	if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1569		sn = s->nat_src_node;
1570		sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1571	    	PF_HASHROW_LOCK(sh);
1572		if (--sn->states == 0)
1573			sn->expire = time_uptime + timeout;
1574	    	PF_HASHROW_UNLOCK(sh);
1575	}
1576	s->src_node = s->nat_src_node = NULL;
1577}
1578
1579/*
1580 * Unlink and potentilly free a state. Function may be
1581 * called with ID hash row locked, but always returns
1582 * unlocked, since it needs to go through key hash locking.
1583 */
1584int
1585pf_unlink_state(struct pf_state *s, u_int flags)
1586{
1587	struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1588
1589	if ((flags & PF_ENTER_LOCKED) == 0)
1590		PF_HASHROW_LOCK(ih);
1591	else
1592		PF_HASHROW_ASSERT(ih);
1593
1594	if (s->timeout == PFTM_UNLINKED) {
1595		/*
1596		 * State is being processed
1597		 * by pf_unlink_state() in
1598		 * an other thread.
1599		 */
1600		PF_HASHROW_UNLOCK(ih);
1601		return (0);	/* XXXGL: undefined actually */
1602	}
1603
1604	if (s->src.state == PF_TCPS_PROXY_DST) {
1605		/* XXX wire key the right one? */
1606		pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1607		    &s->key[PF_SK_WIRE]->addr[1],
1608		    &s->key[PF_SK_WIRE]->addr[0],
1609		    s->key[PF_SK_WIRE]->port[1],
1610		    s->key[PF_SK_WIRE]->port[0],
1611		    s->src.seqhi, s->src.seqlo + 1,
1612		    TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1613	}
1614
1615	LIST_REMOVE(s, entry);
1616	pf_src_tree_remove_state(s);
1617
1618	if (pfsync_delete_state_ptr != NULL)
1619		pfsync_delete_state_ptr(s);
1620
1621	STATE_DEC_COUNTERS(s);
1622
1623	s->timeout = PFTM_UNLINKED;
1624
1625	PF_HASHROW_UNLOCK(ih);
1626
1627	pf_detach_state(s);
1628	refcount_release(&s->refs);
1629
1630	return (pf_release_state(s));
1631}
1632
1633void
1634pf_free_state(struct pf_state *cur)
1635{
1636
1637	KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1638	KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1639	    cur->timeout));
1640
1641	pf_normalize_tcp_cleanup(cur);
1642	uma_zfree(V_pf_state_z, cur);
1643	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1644}
1645
1646/*
1647 * Called only from pf_purge_thread(), thus serialized.
1648 */
1649static u_int
1650pf_purge_expired_states(u_int i, int maxcheck)
1651{
1652	struct pf_idhash *ih;
1653	struct pf_state *s;
1654
1655	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1656
1657	/*
1658	 * Go through hash and unlink states that expire now.
1659	 */
1660	while (maxcheck > 0) {
1661
1662		ih = &V_pf_idhash[i];
1663relock:
1664		PF_HASHROW_LOCK(ih);
1665		LIST_FOREACH(s, &ih->states, entry) {
1666			if (pf_state_expires(s) <= time_uptime) {
1667				V_pf_status.states -=
1668				    pf_unlink_state(s, PF_ENTER_LOCKED);
1669				goto relock;
1670			}
1671			s->rule.ptr->rule_flag |= PFRULE_REFS;
1672			if (s->nat_rule.ptr != NULL)
1673				s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1674			if (s->anchor.ptr != NULL)
1675				s->anchor.ptr->rule_flag |= PFRULE_REFS;
1676			s->kif->pfik_flags |= PFI_IFLAG_REFS;
1677			if (s->rt_kif)
1678				s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1679		}
1680		PF_HASHROW_UNLOCK(ih);
1681
1682		/* Return when we hit end of hash. */
1683		if (++i > pf_hashmask) {
1684			V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1685			return (0);
1686		}
1687
1688		maxcheck--;
1689	}
1690
1691	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1692
1693	return (i);
1694}
1695
1696static void
1697pf_purge_unlinked_rules()
1698{
1699	struct pf_rulequeue tmpq;
1700	struct pf_rule *r, *r1;
1701
1702	/*
1703	 * If we have overloading task pending, then we'd
1704	 * better skip purging this time. There is a tiny
1705	 * probability that overloading task references
1706	 * an already unlinked rule.
1707	 */
1708	PF_OVERLOADQ_LOCK();
1709	if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1710		PF_OVERLOADQ_UNLOCK();
1711		return;
1712	}
1713	PF_OVERLOADQ_UNLOCK();
1714
1715	/*
1716	 * Do naive mark-and-sweep garbage collecting of old rules.
1717	 * Reference flag is raised by pf_purge_expired_states()
1718	 * and pf_purge_expired_src_nodes().
1719	 *
1720	 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1721	 * use a temporary queue.
1722	 */
1723	TAILQ_INIT(&tmpq);
1724	PF_UNLNKDRULES_LOCK();
1725	TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1726		if (!(r->rule_flag & PFRULE_REFS)) {
1727			TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1728			TAILQ_INSERT_TAIL(&tmpq, r, entries);
1729		} else
1730			r->rule_flag &= ~PFRULE_REFS;
1731	}
1732	PF_UNLNKDRULES_UNLOCK();
1733
1734	if (!TAILQ_EMPTY(&tmpq)) {
1735		PF_RULES_WLOCK();
1736		TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1737			TAILQ_REMOVE(&tmpq, r, entries);
1738			pf_free_rule(r);
1739		}
1740		PF_RULES_WUNLOCK();
1741	}
1742}
1743
1744void
1745pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1746{
1747	switch (af) {
1748#ifdef INET
1749	case AF_INET: {
1750		u_int32_t a = ntohl(addr->addr32[0]);
1751		printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1752		    (a>>8)&255, a&255);
1753		if (p) {
1754			p = ntohs(p);
1755			printf(":%u", p);
1756		}
1757		break;
1758	}
1759#endif /* INET */
1760#ifdef INET6
1761	case AF_INET6: {
1762		u_int16_t b;
1763		u_int8_t i, curstart, curend, maxstart, maxend;
1764		curstart = curend = maxstart = maxend = 255;
1765		for (i = 0; i < 8; i++) {
1766			if (!addr->addr16[i]) {
1767				if (curstart == 255)
1768					curstart = i;
1769				curend = i;
1770			} else {
1771				if ((curend - curstart) >
1772				    (maxend - maxstart)) {
1773					maxstart = curstart;
1774					maxend = curend;
1775				}
1776				curstart = curend = 255;
1777			}
1778		}
1779		if ((curend - curstart) >
1780		    (maxend - maxstart)) {
1781			maxstart = curstart;
1782			maxend = curend;
1783		}
1784		for (i = 0; i < 8; i++) {
1785			if (i >= maxstart && i <= maxend) {
1786				if (i == 0)
1787					printf(":");
1788				if (i == maxend)
1789					printf(":");
1790			} else {
1791				b = ntohs(addr->addr16[i]);
1792				printf("%x", b);
1793				if (i < 7)
1794					printf(":");
1795			}
1796		}
1797		if (p) {
1798			p = ntohs(p);
1799			printf("[%u]", p);
1800		}
1801		break;
1802	}
1803#endif /* INET6 */
1804	}
1805}
1806
1807void
1808pf_print_state(struct pf_state *s)
1809{
1810	pf_print_state_parts(s, NULL, NULL);
1811}
1812
1813static void
1814pf_print_state_parts(struct pf_state *s,
1815    struct pf_state_key *skwp, struct pf_state_key *sksp)
1816{
1817	struct pf_state_key *skw, *sks;
1818	u_int8_t proto, dir;
1819
1820	/* Do our best to fill these, but they're skipped if NULL */
1821	skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1822	sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1823	proto = skw ? skw->proto : (sks ? sks->proto : 0);
1824	dir = s ? s->direction : 0;
1825
1826	switch (proto) {
1827	case IPPROTO_IPV4:
1828		printf("IPv4");
1829		break;
1830	case IPPROTO_IPV6:
1831		printf("IPv6");
1832		break;
1833	case IPPROTO_TCP:
1834		printf("TCP");
1835		break;
1836	case IPPROTO_UDP:
1837		printf("UDP");
1838		break;
1839	case IPPROTO_ICMP:
1840		printf("ICMP");
1841		break;
1842	case IPPROTO_ICMPV6:
1843		printf("ICMPv6");
1844		break;
1845	default:
1846		printf("%u", skw->proto);
1847		break;
1848	}
1849	switch (dir) {
1850	case PF_IN:
1851		printf(" in");
1852		break;
1853	case PF_OUT:
1854		printf(" out");
1855		break;
1856	}
1857	if (skw) {
1858		printf(" wire: ");
1859		pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1860		printf(" ");
1861		pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1862	}
1863	if (sks) {
1864		printf(" stack: ");
1865		if (sks != skw) {
1866			pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1867			printf(" ");
1868			pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1869		} else
1870			printf("-");
1871	}
1872	if (s) {
1873		if (proto == IPPROTO_TCP) {
1874			printf(" [lo=%u high=%u win=%u modulator=%u",
1875			    s->src.seqlo, s->src.seqhi,
1876			    s->src.max_win, s->src.seqdiff);
1877			if (s->src.wscale && s->dst.wscale)
1878				printf(" wscale=%u",
1879				    s->src.wscale & PF_WSCALE_MASK);
1880			printf("]");
1881			printf(" [lo=%u high=%u win=%u modulator=%u",
1882			    s->dst.seqlo, s->dst.seqhi,
1883			    s->dst.max_win, s->dst.seqdiff);
1884			if (s->src.wscale && s->dst.wscale)
1885				printf(" wscale=%u",
1886				s->dst.wscale & PF_WSCALE_MASK);
1887			printf("]");
1888		}
1889		printf(" %u:%u", s->src.state, s->dst.state);
1890	}
1891}
1892
1893void
1894pf_print_flags(u_int8_t f)
1895{
1896	if (f)
1897		printf(" ");
1898	if (f & TH_FIN)
1899		printf("F");
1900	if (f & TH_SYN)
1901		printf("S");
1902	if (f & TH_RST)
1903		printf("R");
1904	if (f & TH_PUSH)
1905		printf("P");
1906	if (f & TH_ACK)
1907		printf("A");
1908	if (f & TH_URG)
1909		printf("U");
1910	if (f & TH_ECE)
1911		printf("E");
1912	if (f & TH_CWR)
1913		printf("W");
1914}
1915
1916#define	PF_SET_SKIP_STEPS(i)					\
1917	do {							\
1918		while (head[i] != cur) {			\
1919			head[i]->skip[i].ptr = cur;		\
1920			head[i] = TAILQ_NEXT(head[i], entries);	\
1921		}						\
1922	} while (0)
1923
1924void
1925pf_calc_skip_steps(struct pf_rulequeue *rules)
1926{
1927	struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1928	int i;
1929
1930	cur = TAILQ_FIRST(rules);
1931	prev = cur;
1932	for (i = 0; i < PF_SKIP_COUNT; ++i)
1933		head[i] = cur;
1934	while (cur != NULL) {
1935
1936		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1937			PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1938		if (cur->direction != prev->direction)
1939			PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1940		if (cur->af != prev->af)
1941			PF_SET_SKIP_STEPS(PF_SKIP_AF);
1942		if (cur->proto != prev->proto)
1943			PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1944		if (cur->src.neg != prev->src.neg ||
1945		    pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1946			PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1947		if (cur->src.port[0] != prev->src.port[0] ||
1948		    cur->src.port[1] != prev->src.port[1] ||
1949		    cur->src.port_op != prev->src.port_op)
1950			PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1951		if (cur->dst.neg != prev->dst.neg ||
1952		    pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1953			PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1954		if (cur->dst.port[0] != prev->dst.port[0] ||
1955		    cur->dst.port[1] != prev->dst.port[1] ||
1956		    cur->dst.port_op != prev->dst.port_op)
1957			PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1958
1959		prev = cur;
1960		cur = TAILQ_NEXT(cur, entries);
1961	}
1962	for (i = 0; i < PF_SKIP_COUNT; ++i)
1963		PF_SET_SKIP_STEPS(i);
1964}
1965
1966static int
1967pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1968{
1969	if (aw1->type != aw2->type)
1970		return (1);
1971	switch (aw1->type) {
1972	case PF_ADDR_ADDRMASK:
1973	case PF_ADDR_RANGE:
1974		if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1975			return (1);
1976		if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1977			return (1);
1978		return (0);
1979	case PF_ADDR_DYNIFTL:
1980		return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1981	case PF_ADDR_NOROUTE:
1982	case PF_ADDR_URPFFAILED:
1983		return (0);
1984	case PF_ADDR_TABLE:
1985		return (aw1->p.tbl != aw2->p.tbl);
1986	default:
1987		printf("invalid address type: %d\n", aw1->type);
1988		return (1);
1989	}
1990}
1991
1992/**
1993 * Checksum updates are a little complicated because the checksum in the TCP/UDP
1994 * header isn't always a full checksum. In some cases (i.e. output) it's a
1995 * pseudo-header checksum, which is a partial checksum over src/dst IP
1996 * addresses, protocol number and length.
1997 *
1998 * That means we have the following cases:
1999 *  * Input or forwarding: we don't have TSO, the checksum fields are full
2000 *  	checksums, we need to update the checksum whenever we change anything.
2001 *  * Output (i.e. the checksum is a pseudo-header checksum):
2002 *  	x The field being updated is src/dst address or affects the length of
2003 *  	the packet. We need to update the pseudo-header checksum (note that this
2004 *  	checksum is not ones' complement).
2005 *  	x Some other field is being modified (e.g. src/dst port numbers): We
2006 *  	don't have to update anything.
2007 **/
2008u_int16_t
2009pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2010{
2011	u_int32_t	l;
2012
2013	if (udp && !cksum)
2014		return (0x0000);
2015	l = cksum + old - new;
2016	l = (l >> 16) + (l & 65535);
2017	l = l & 65535;
2018	if (udp && !l)
2019		return (0xFFFF);
2020	return (l);
2021}
2022
2023u_int16_t
2024pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2025        u_int16_t new, u_int8_t udp)
2026{
2027	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2028		return (cksum);
2029
2030	return (pf_cksum_fixup(cksum, old, new, udp));
2031}
2032
2033static void
2034pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2035        u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2036        sa_family_t af)
2037{
2038	struct pf_addr	ao;
2039	u_int16_t	po = *p;
2040
2041	PF_ACPY(&ao, a, af);
2042	PF_ACPY(a, an, af);
2043
2044	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2045		*pc = ~*pc;
2046
2047	*p = pn;
2048
2049	switch (af) {
2050#ifdef INET
2051	case AF_INET:
2052		*ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2053		    ao.addr16[0], an->addr16[0], 0),
2054		    ao.addr16[1], an->addr16[1], 0);
2055		*p = pn;
2056
2057		*pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2058		    ao.addr16[0], an->addr16[0], u),
2059		    ao.addr16[1], an->addr16[1], u);
2060
2061		*pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2062		break;
2063#endif /* INET */
2064#ifdef INET6
2065	case AF_INET6:
2066		*pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2067		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2068		    pf_cksum_fixup(pf_cksum_fixup(*pc,
2069		    ao.addr16[0], an->addr16[0], u),
2070		    ao.addr16[1], an->addr16[1], u),
2071		    ao.addr16[2], an->addr16[2], u),
2072		    ao.addr16[3], an->addr16[3], u),
2073		    ao.addr16[4], an->addr16[4], u),
2074		    ao.addr16[5], an->addr16[5], u),
2075		    ao.addr16[6], an->addr16[6], u),
2076		    ao.addr16[7], an->addr16[7], u);
2077
2078		*pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2079		break;
2080#endif /* INET6 */
2081	}
2082
2083	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2084	    CSUM_DELAY_DATA_IPV6)) {
2085		*pc = ~*pc;
2086		if (! *pc)
2087			*pc = 0xffff;
2088	}
2089}
2090
2091/* Changes a u_int32_t.  Uses a void * so there are no align restrictions */
2092void
2093pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2094{
2095	u_int32_t	ao;
2096
2097	memcpy(&ao, a, sizeof(ao));
2098	memcpy(a, &an, sizeof(u_int32_t));
2099	*c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2100	    ao % 65536, an % 65536, u);
2101}
2102
2103void
2104pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2105{
2106	u_int32_t	ao;
2107
2108	memcpy(&ao, a, sizeof(ao));
2109	memcpy(a, &an, sizeof(u_int32_t));
2110
2111	*c = pf_proto_cksum_fixup(m,
2112	    pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2113	    ao % 65536, an % 65536, udp);
2114}
2115
2116#ifdef INET6
2117static void
2118pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2119{
2120	struct pf_addr	ao;
2121
2122	PF_ACPY(&ao, a, AF_INET6);
2123	PF_ACPY(a, an, AF_INET6);
2124
2125	*c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2126	    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2127	    pf_cksum_fixup(pf_cksum_fixup(*c,
2128	    ao.addr16[0], an->addr16[0], u),
2129	    ao.addr16[1], an->addr16[1], u),
2130	    ao.addr16[2], an->addr16[2], u),
2131	    ao.addr16[3], an->addr16[3], u),
2132	    ao.addr16[4], an->addr16[4], u),
2133	    ao.addr16[5], an->addr16[5], u),
2134	    ao.addr16[6], an->addr16[6], u),
2135	    ao.addr16[7], an->addr16[7], u);
2136}
2137#endif /* INET6 */
2138
2139static void
2140pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2141    struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2142    u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2143{
2144	struct pf_addr	oia, ooa;
2145
2146	PF_ACPY(&oia, ia, af);
2147	if (oa)
2148		PF_ACPY(&ooa, oa, af);
2149
2150	/* Change inner protocol port, fix inner protocol checksum. */
2151	if (ip != NULL) {
2152		u_int16_t	oip = *ip;
2153		u_int32_t	opc;
2154
2155		if (pc != NULL)
2156			opc = *pc;
2157		*ip = np;
2158		if (pc != NULL)
2159			*pc = pf_cksum_fixup(*pc, oip, *ip, u);
2160		*ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2161		if (pc != NULL)
2162			*ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2163	}
2164	/* Change inner ip address, fix inner ip and icmp checksums. */
2165	PF_ACPY(ia, na, af);
2166	switch (af) {
2167#ifdef INET
2168	case AF_INET: {
2169		u_int32_t	 oh2c = *h2c;
2170
2171		*h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2172		    oia.addr16[0], ia->addr16[0], 0),
2173		    oia.addr16[1], ia->addr16[1], 0);
2174		*ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2175		    oia.addr16[0], ia->addr16[0], 0),
2176		    oia.addr16[1], ia->addr16[1], 0);
2177		*ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2178		break;
2179	}
2180#endif /* INET */
2181#ifdef INET6
2182	case AF_INET6:
2183		*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2184		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2185		    pf_cksum_fixup(pf_cksum_fixup(*ic,
2186		    oia.addr16[0], ia->addr16[0], u),
2187		    oia.addr16[1], ia->addr16[1], u),
2188		    oia.addr16[2], ia->addr16[2], u),
2189		    oia.addr16[3], ia->addr16[3], u),
2190		    oia.addr16[4], ia->addr16[4], u),
2191		    oia.addr16[5], ia->addr16[5], u),
2192		    oia.addr16[6], ia->addr16[6], u),
2193		    oia.addr16[7], ia->addr16[7], u);
2194		break;
2195#endif /* INET6 */
2196	}
2197	/* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2198	if (oa) {
2199		PF_ACPY(oa, na, af);
2200		switch (af) {
2201#ifdef INET
2202		case AF_INET:
2203			*hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2204			    ooa.addr16[0], oa->addr16[0], 0),
2205			    ooa.addr16[1], oa->addr16[1], 0);
2206			break;
2207#endif /* INET */
2208#ifdef INET6
2209		case AF_INET6:
2210			*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2211			    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2212			    pf_cksum_fixup(pf_cksum_fixup(*ic,
2213			    ooa.addr16[0], oa->addr16[0], u),
2214			    ooa.addr16[1], oa->addr16[1], u),
2215			    ooa.addr16[2], oa->addr16[2], u),
2216			    ooa.addr16[3], oa->addr16[3], u),
2217			    ooa.addr16[4], oa->addr16[4], u),
2218			    ooa.addr16[5], oa->addr16[5], u),
2219			    ooa.addr16[6], oa->addr16[6], u),
2220			    ooa.addr16[7], oa->addr16[7], u);
2221			break;
2222#endif /* INET6 */
2223		}
2224	}
2225}
2226
2227
2228/*
2229 * Need to modulate the sequence numbers in the TCP SACK option
2230 * (credits to Krzysztof Pfaff for report and patch)
2231 */
2232static int
2233pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2234    struct tcphdr *th, struct pf_state_peer *dst)
2235{
2236	int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2237	u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2238	int copyback = 0, i, olen;
2239	struct sackblk sack;
2240
2241#define	TCPOLEN_SACKLEN	(TCPOLEN_SACK + 2)
2242	if (hlen < TCPOLEN_SACKLEN ||
2243	    !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2244		return 0;
2245
2246	while (hlen >= TCPOLEN_SACKLEN) {
2247		olen = opt[1];
2248		switch (*opt) {
2249		case TCPOPT_EOL:	/* FALLTHROUGH */
2250		case TCPOPT_NOP:
2251			opt++;
2252			hlen--;
2253			break;
2254		case TCPOPT_SACK:
2255			if (olen > hlen)
2256				olen = hlen;
2257			if (olen >= TCPOLEN_SACKLEN) {
2258				for (i = 2; i + TCPOLEN_SACK <= olen;
2259				    i += TCPOLEN_SACK) {
2260					memcpy(&sack, &opt[i], sizeof(sack));
2261					pf_change_proto_a(m, &sack.start, &th->th_sum,
2262					    htonl(ntohl(sack.start) - dst->seqdiff), 0);
2263					pf_change_proto_a(m, &sack.end, &th->th_sum,
2264					    htonl(ntohl(sack.end) - dst->seqdiff), 0);
2265					memcpy(&opt[i], &sack, sizeof(sack));
2266				}
2267				copyback = 1;
2268			}
2269			/* FALLTHROUGH */
2270		default:
2271			if (olen < 2)
2272				olen = 2;
2273			hlen -= olen;
2274			opt += olen;
2275		}
2276	}
2277
2278	if (copyback)
2279		m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2280	return (copyback);
2281}
2282
2283static void
2284pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2285    const struct pf_addr *saddr, const struct pf_addr *daddr,
2286    u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2287    u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2288    u_int16_t rtag, struct ifnet *ifp)
2289{
2290	struct pf_send_entry *pfse;
2291	struct mbuf	*m;
2292	int		 len, tlen;
2293#ifdef INET
2294	struct ip	*h = NULL;
2295#endif /* INET */
2296#ifdef INET6
2297	struct ip6_hdr	*h6 = NULL;
2298#endif /* INET6 */
2299	struct tcphdr	*th;
2300	char		*opt;
2301	struct pf_mtag  *pf_mtag;
2302
2303	len = 0;
2304	th = NULL;
2305
2306	/* maximum segment size tcp option */
2307	tlen = sizeof(struct tcphdr);
2308	if (mss)
2309		tlen += 4;
2310
2311	switch (af) {
2312#ifdef INET
2313	case AF_INET:
2314		len = sizeof(struct ip) + tlen;
2315		break;
2316#endif /* INET */
2317#ifdef INET6
2318	case AF_INET6:
2319		len = sizeof(struct ip6_hdr) + tlen;
2320		break;
2321#endif /* INET6 */
2322	default:
2323		panic("%s: unsupported af %d", __func__, af);
2324	}
2325
2326	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
2327	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2328	if (pfse == NULL)
2329		return;
2330	m = m_gethdr(M_NOWAIT, MT_DATA);
2331	if (m == NULL) {
2332		free(pfse, M_PFTEMP);
2333		return;
2334	}
2335#ifdef MAC
2336	mac_netinet_firewall_send(m);
2337#endif
2338	if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2339		free(pfse, M_PFTEMP);
2340		m_freem(m);
2341		return;
2342	}
2343	if (tag)
2344		m->m_flags |= M_SKIP_FIREWALL;
2345	pf_mtag->tag = rtag;
2346
2347	if (r != NULL && r->rtableid >= 0)
2348		M_SETFIB(m, r->rtableid);
2349
2350#ifdef ALTQ
2351	if (r != NULL && r->qid) {
2352		pf_mtag->qid = r->qid;
2353
2354		/* add hints for ecn */
2355		pf_mtag->hdr = mtod(m, struct ip *);
2356	}
2357#endif /* ALTQ */
2358	m->m_data += max_linkhdr;
2359	m->m_pkthdr.len = m->m_len = len;
2360	m->m_pkthdr.rcvif = NULL;
2361	bzero(m->m_data, len);
2362	switch (af) {
2363#ifdef INET
2364	case AF_INET:
2365		h = mtod(m, struct ip *);
2366
2367		/* IP header fields included in the TCP checksum */
2368		h->ip_p = IPPROTO_TCP;
2369		h->ip_len = htons(tlen);
2370		h->ip_src.s_addr = saddr->v4.s_addr;
2371		h->ip_dst.s_addr = daddr->v4.s_addr;
2372
2373		th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2374		break;
2375#endif /* INET */
2376#ifdef INET6
2377	case AF_INET6:
2378		h6 = mtod(m, struct ip6_hdr *);
2379
2380		/* IP header fields included in the TCP checksum */
2381		h6->ip6_nxt = IPPROTO_TCP;
2382		h6->ip6_plen = htons(tlen);
2383		memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2384		memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2385
2386		th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2387		break;
2388#endif /* INET6 */
2389	}
2390
2391	/* TCP header */
2392	th->th_sport = sport;
2393	th->th_dport = dport;
2394	th->th_seq = htonl(seq);
2395	th->th_ack = htonl(ack);
2396	th->th_off = tlen >> 2;
2397	th->th_flags = flags;
2398	th->th_win = htons(win);
2399
2400	if (mss) {
2401		opt = (char *)(th + 1);
2402		opt[0] = TCPOPT_MAXSEG;
2403		opt[1] = 4;
2404		HTONS(mss);
2405		bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2406	}
2407
2408	switch (af) {
2409#ifdef INET
2410	case AF_INET:
2411		/* TCP checksum */
2412		th->th_sum = in_cksum(m, len);
2413
2414		/* Finish the IP header */
2415		h->ip_v = 4;
2416		h->ip_hl = sizeof(*h) >> 2;
2417		h->ip_tos = IPTOS_LOWDELAY;
2418		h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2419		h->ip_len = htons(len);
2420		h->ip_ttl = ttl ? ttl : V_ip_defttl;
2421		h->ip_sum = 0;
2422
2423		pfse->pfse_type = PFSE_IP;
2424		break;
2425#endif /* INET */
2426#ifdef INET6
2427	case AF_INET6:
2428		/* TCP checksum */
2429		th->th_sum = in6_cksum(m, IPPROTO_TCP,
2430		    sizeof(struct ip6_hdr), tlen);
2431
2432		h6->ip6_vfc |= IPV6_VERSION;
2433		h6->ip6_hlim = IPV6_DEFHLIM;
2434
2435		pfse->pfse_type = PFSE_IP6;
2436		break;
2437#endif /* INET6 */
2438	}
2439	pfse->pfse_m = m;
2440	pf_send(pfse);
2441}
2442
2443static void
2444pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2445    struct pf_rule *r)
2446{
2447	struct pf_send_entry *pfse;
2448	struct mbuf *m0;
2449	struct pf_mtag *pf_mtag;
2450
2451	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
2452	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2453	if (pfse == NULL)
2454		return;
2455
2456	if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2457		free(pfse, M_PFTEMP);
2458		return;
2459	}
2460
2461	if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2462		free(pfse, M_PFTEMP);
2463		return;
2464	}
2465	/* XXX: revisit */
2466	m0->m_flags |= M_SKIP_FIREWALL;
2467
2468	if (r->rtableid >= 0)
2469		M_SETFIB(m0, r->rtableid);
2470
2471#ifdef ALTQ
2472	if (r->qid) {
2473		pf_mtag->qid = r->qid;
2474		/* add hints for ecn */
2475		pf_mtag->hdr = mtod(m0, struct ip *);
2476	}
2477#endif /* ALTQ */
2478
2479	switch (af) {
2480#ifdef INET
2481	case AF_INET:
2482		pfse->pfse_type = PFSE_ICMP;
2483		break;
2484#endif /* INET */
2485#ifdef INET6
2486	case AF_INET6:
2487		pfse->pfse_type = PFSE_ICMP6;
2488		break;
2489#endif /* INET6 */
2490	}
2491	pfse->pfse_m = m0;
2492	pfse->pfse_icmp_type = type;
2493	pfse->pfse_icmp_code = code;
2494	pf_send(pfse);
2495}
2496
2497/*
2498 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2499 * If n is 0, they match if they are equal. If n is != 0, they match if they
2500 * are different.
2501 */
2502int
2503pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2504    struct pf_addr *b, sa_family_t af)
2505{
2506	int	match = 0;
2507
2508	switch (af) {
2509#ifdef INET
2510	case AF_INET:
2511		if ((a->addr32[0] & m->addr32[0]) ==
2512		    (b->addr32[0] & m->addr32[0]))
2513			match++;
2514		break;
2515#endif /* INET */
2516#ifdef INET6
2517	case AF_INET6:
2518		if (((a->addr32[0] & m->addr32[0]) ==
2519		     (b->addr32[0] & m->addr32[0])) &&
2520		    ((a->addr32[1] & m->addr32[1]) ==
2521		     (b->addr32[1] & m->addr32[1])) &&
2522		    ((a->addr32[2] & m->addr32[2]) ==
2523		     (b->addr32[2] & m->addr32[2])) &&
2524		    ((a->addr32[3] & m->addr32[3]) ==
2525		     (b->addr32[3] & m->addr32[3])))
2526			match++;
2527		break;
2528#endif /* INET6 */
2529	}
2530	if (match) {
2531		if (n)
2532			return (0);
2533		else
2534			return (1);
2535	} else {
2536		if (n)
2537			return (1);
2538		else
2539			return (0);
2540	}
2541}
2542
2543/*
2544 * Return 1 if b <= a <= e, otherwise return 0.
2545 */
2546int
2547pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2548    struct pf_addr *a, sa_family_t af)
2549{
2550	switch (af) {
2551#ifdef INET
2552	case AF_INET:
2553		if ((a->addr32[0] < b->addr32[0]) ||
2554		    (a->addr32[0] > e->addr32[0]))
2555			return (0);
2556		break;
2557#endif /* INET */
2558#ifdef INET6
2559	case AF_INET6: {
2560		int	i;
2561
2562		/* check a >= b */
2563		for (i = 0; i < 4; ++i)
2564			if (a->addr32[i] > b->addr32[i])
2565				break;
2566			else if (a->addr32[i] < b->addr32[i])
2567				return (0);
2568		/* check a <= e */
2569		for (i = 0; i < 4; ++i)
2570			if (a->addr32[i] < e->addr32[i])
2571				break;
2572			else if (a->addr32[i] > e->addr32[i])
2573				return (0);
2574		break;
2575	}
2576#endif /* INET6 */
2577	}
2578	return (1);
2579}
2580
2581static int
2582pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2583{
2584	switch (op) {
2585	case PF_OP_IRG:
2586		return ((p > a1) && (p < a2));
2587	case PF_OP_XRG:
2588		return ((p < a1) || (p > a2));
2589	case PF_OP_RRG:
2590		return ((p >= a1) && (p <= a2));
2591	case PF_OP_EQ:
2592		return (p == a1);
2593	case PF_OP_NE:
2594		return (p != a1);
2595	case PF_OP_LT:
2596		return (p < a1);
2597	case PF_OP_LE:
2598		return (p <= a1);
2599	case PF_OP_GT:
2600		return (p > a1);
2601	case PF_OP_GE:
2602		return (p >= a1);
2603	}
2604	return (0); /* never reached */
2605}
2606
2607int
2608pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2609{
2610	NTOHS(a1);
2611	NTOHS(a2);
2612	NTOHS(p);
2613	return (pf_match(op, a1, a2, p));
2614}
2615
2616static int
2617pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2618{
2619	if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2620		return (0);
2621	return (pf_match(op, a1, a2, u));
2622}
2623
2624static int
2625pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2626{
2627	if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2628		return (0);
2629	return (pf_match(op, a1, a2, g));
2630}
2631
2632int
2633pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2634{
2635	if (*tag == -1)
2636		*tag = mtag;
2637
2638	return ((!r->match_tag_not && r->match_tag == *tag) ||
2639	    (r->match_tag_not && r->match_tag != *tag));
2640}
2641
2642int
2643pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2644{
2645
2646	KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2647
2648	if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2649		return (ENOMEM);
2650
2651	pd->pf_mtag->tag = tag;
2652
2653	return (0);
2654}
2655
2656#define	PF_ANCHOR_STACKSIZE	32
2657struct pf_anchor_stackframe {
2658	struct pf_ruleset	*rs;
2659	struct pf_rule		*r;	/* XXX: + match bit */
2660	struct pf_anchor	*child;
2661};
2662
2663/*
2664 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2665 */
2666#define	PF_ANCHORSTACK_MATCH	0x00000001
2667#define	PF_ANCHORSTACK_MASK	(PF_ANCHORSTACK_MATCH)
2668
2669#define	PF_ANCHOR_MATCH(f)	((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2670#define	PF_ANCHOR_RULE(f)	(struct pf_rule *)			\
2671				((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2672#define	PF_ANCHOR_SET_MATCH(f)	do { (f)->r = (void *) 			\
2673				((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH);  \
2674} while (0)
2675
2676void
2677pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2678    struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2679    int *match)
2680{
2681	struct pf_anchor_stackframe	*f;
2682
2683	PF_RULES_RASSERT();
2684
2685	if (match)
2686		*match = 0;
2687	if (*depth >= PF_ANCHOR_STACKSIZE) {
2688		printf("%s: anchor stack overflow on %s\n",
2689		    __func__, (*r)->anchor->name);
2690		*r = TAILQ_NEXT(*r, entries);
2691		return;
2692	} else if (*depth == 0 && a != NULL)
2693		*a = *r;
2694	f = stack + (*depth)++;
2695	f->rs = *rs;
2696	f->r = *r;
2697	if ((*r)->anchor_wildcard) {
2698		struct pf_anchor_node *parent = &(*r)->anchor->children;
2699
2700		if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2701			*r = NULL;
2702			return;
2703		}
2704		*rs = &f->child->ruleset;
2705	} else {
2706		f->child = NULL;
2707		*rs = &(*r)->anchor->ruleset;
2708	}
2709	*r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2710}
2711
2712int
2713pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2714    struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2715    int *match)
2716{
2717	struct pf_anchor_stackframe	*f;
2718	struct pf_rule *fr;
2719	int quick = 0;
2720
2721	PF_RULES_RASSERT();
2722
2723	do {
2724		if (*depth <= 0)
2725			break;
2726		f = stack + *depth - 1;
2727		fr = PF_ANCHOR_RULE(f);
2728		if (f->child != NULL) {
2729			struct pf_anchor_node *parent;
2730
2731			/*
2732			 * This block traverses through
2733			 * a wildcard anchor.
2734			 */
2735			parent = &fr->anchor->children;
2736			if (match != NULL && *match) {
2737				/*
2738				 * If any of "*" matched, then
2739				 * "foo/ *" matched, mark frame
2740				 * appropriately.
2741				 */
2742				PF_ANCHOR_SET_MATCH(f);
2743				*match = 0;
2744			}
2745			f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2746			if (f->child != NULL) {
2747				*rs = &f->child->ruleset;
2748				*r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2749				if (*r == NULL)
2750					continue;
2751				else
2752					break;
2753			}
2754		}
2755		(*depth)--;
2756		if (*depth == 0 && a != NULL)
2757			*a = NULL;
2758		*rs = f->rs;
2759		if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2760			quick = fr->quick;
2761		*r = TAILQ_NEXT(fr, entries);
2762	} while (*r == NULL);
2763
2764	return (quick);
2765}
2766
2767#ifdef INET6
2768void
2769pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2770    struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2771{
2772	switch (af) {
2773#ifdef INET
2774	case AF_INET:
2775		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2776		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2777		break;
2778#endif /* INET */
2779	case AF_INET6:
2780		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2781		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2782		naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2783		((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2784		naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2785		((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2786		naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2787		((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2788		break;
2789	}
2790}
2791
2792void
2793pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2794{
2795	switch (af) {
2796#ifdef INET
2797	case AF_INET:
2798		addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2799		break;
2800#endif /* INET */
2801	case AF_INET6:
2802		if (addr->addr32[3] == 0xffffffff) {
2803			addr->addr32[3] = 0;
2804			if (addr->addr32[2] == 0xffffffff) {
2805				addr->addr32[2] = 0;
2806				if (addr->addr32[1] == 0xffffffff) {
2807					addr->addr32[1] = 0;
2808					addr->addr32[0] =
2809					    htonl(ntohl(addr->addr32[0]) + 1);
2810				} else
2811					addr->addr32[1] =
2812					    htonl(ntohl(addr->addr32[1]) + 1);
2813			} else
2814				addr->addr32[2] =
2815				    htonl(ntohl(addr->addr32[2]) + 1);
2816		} else
2817			addr->addr32[3] =
2818			    htonl(ntohl(addr->addr32[3]) + 1);
2819		break;
2820	}
2821}
2822#endif /* INET6 */
2823
2824int
2825pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
2826{
2827	struct pf_addr		*saddr, *daddr;
2828	u_int16_t		 sport, dport;
2829	struct inpcbinfo	*pi;
2830	struct inpcb		*inp;
2831
2832	pd->lookup.uid = UID_MAX;
2833	pd->lookup.gid = GID_MAX;
2834
2835	switch (pd->proto) {
2836	case IPPROTO_TCP:
2837		if (pd->hdr.tcp == NULL)
2838			return (-1);
2839		sport = pd->hdr.tcp->th_sport;
2840		dport = pd->hdr.tcp->th_dport;
2841		pi = &V_tcbinfo;
2842		break;
2843	case IPPROTO_UDP:
2844		if (pd->hdr.udp == NULL)
2845			return (-1);
2846		sport = pd->hdr.udp->uh_sport;
2847		dport = pd->hdr.udp->uh_dport;
2848		pi = &V_udbinfo;
2849		break;
2850	default:
2851		return (-1);
2852	}
2853	if (direction == PF_IN) {
2854		saddr = pd->src;
2855		daddr = pd->dst;
2856	} else {
2857		u_int16_t	p;
2858
2859		p = sport;
2860		sport = dport;
2861		dport = p;
2862		saddr = pd->dst;
2863		daddr = pd->src;
2864	}
2865	switch (pd->af) {
2866#ifdef INET
2867	case AF_INET:
2868		inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
2869		    dport, INPLOOKUP_RLOCKPCB, NULL, m);
2870		if (inp == NULL) {
2871			inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
2872			   daddr->v4, dport, INPLOOKUP_WILDCARD |
2873			   INPLOOKUP_RLOCKPCB, NULL, m);
2874			if (inp == NULL)
2875				return (-1);
2876		}
2877		break;
2878#endif /* INET */
2879#ifdef INET6
2880	case AF_INET6:
2881		inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
2882		    dport, INPLOOKUP_RLOCKPCB, NULL, m);
2883		if (inp == NULL) {
2884			inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
2885			    &daddr->v6, dport, INPLOOKUP_WILDCARD |
2886			    INPLOOKUP_RLOCKPCB, NULL, m);
2887			if (inp == NULL)
2888				return (-1);
2889		}
2890		break;
2891#endif /* INET6 */
2892
2893	default:
2894		return (-1);
2895	}
2896	INP_RLOCK_ASSERT(inp);
2897	pd->lookup.uid = inp->inp_cred->cr_uid;
2898	pd->lookup.gid = inp->inp_cred->cr_groups[0];
2899	INP_RUNLOCK(inp);
2900
2901	return (1);
2902}
2903
2904static u_int8_t
2905pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2906{
2907	int		 hlen;
2908	u_int8_t	 hdr[60];
2909	u_int8_t	*opt, optlen;
2910	u_int8_t	 wscale = 0;
2911
2912	hlen = th_off << 2;		/* hlen <= sizeof(hdr) */
2913	if (hlen <= sizeof(struct tcphdr))
2914		return (0);
2915	if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2916		return (0);
2917	opt = hdr + sizeof(struct tcphdr);
2918	hlen -= sizeof(struct tcphdr);
2919	while (hlen >= 3) {
2920		switch (*opt) {
2921		case TCPOPT_EOL:
2922		case TCPOPT_NOP:
2923			++opt;
2924			--hlen;
2925			break;
2926		case TCPOPT_WINDOW:
2927			wscale = opt[2];
2928			if (wscale > TCP_MAX_WINSHIFT)
2929				wscale = TCP_MAX_WINSHIFT;
2930			wscale |= PF_WSCALE_FLAG;
2931			/* FALLTHROUGH */
2932		default:
2933			optlen = opt[1];
2934			if (optlen < 2)
2935				optlen = 2;
2936			hlen -= optlen;
2937			opt += optlen;
2938			break;
2939		}
2940	}
2941	return (wscale);
2942}
2943
2944static u_int16_t
2945pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2946{
2947	int		 hlen;
2948	u_int8_t	 hdr[60];
2949	u_int8_t	*opt, optlen;
2950	u_int16_t	 mss = V_tcp_mssdflt;
2951
2952	hlen = th_off << 2;	/* hlen <= sizeof(hdr) */
2953	if (hlen <= sizeof(struct tcphdr))
2954		return (0);
2955	if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2956		return (0);
2957	opt = hdr + sizeof(struct tcphdr);
2958	hlen -= sizeof(struct tcphdr);
2959	while (hlen >= TCPOLEN_MAXSEG) {
2960		switch (*opt) {
2961		case TCPOPT_EOL:
2962		case TCPOPT_NOP:
2963			++opt;
2964			--hlen;
2965			break;
2966		case TCPOPT_MAXSEG:
2967			bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
2968			NTOHS(mss);
2969			/* FALLTHROUGH */
2970		default:
2971			optlen = opt[1];
2972			if (optlen < 2)
2973				optlen = 2;
2974			hlen -= optlen;
2975			opt += optlen;
2976			break;
2977		}
2978	}
2979	return (mss);
2980}
2981
2982static u_int16_t
2983pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
2984{
2985#ifdef INET
2986	struct sockaddr_in	*dst;
2987	struct route		 ro;
2988#endif /* INET */
2989#ifdef INET6
2990	struct sockaddr_in6	*dst6;
2991	struct route_in6	 ro6;
2992#endif /* INET6 */
2993	struct rtentry		*rt = NULL;
2994	int			 hlen = 0;
2995	u_int16_t		 mss = V_tcp_mssdflt;
2996
2997	switch (af) {
2998#ifdef INET
2999	case AF_INET:
3000		hlen = sizeof(struct ip);
3001		bzero(&ro, sizeof(ro));
3002		dst = (struct sockaddr_in *)&ro.ro_dst;
3003		dst->sin_family = AF_INET;
3004		dst->sin_len = sizeof(*dst);
3005		dst->sin_addr = addr->v4;
3006		in_rtalloc_ign(&ro, 0, rtableid);
3007		rt = ro.ro_rt;
3008		break;
3009#endif /* INET */
3010#ifdef INET6
3011	case AF_INET6:
3012		hlen = sizeof(struct ip6_hdr);
3013		bzero(&ro6, sizeof(ro6));
3014		dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
3015		dst6->sin6_family = AF_INET6;
3016		dst6->sin6_len = sizeof(*dst6);
3017		dst6->sin6_addr = addr->v6;
3018		in6_rtalloc_ign(&ro6, 0, rtableid);
3019		rt = ro6.ro_rt;
3020		break;
3021#endif /* INET6 */
3022	}
3023
3024	if (rt && rt->rt_ifp) {
3025		mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
3026		mss = max(V_tcp_mssdflt, mss);
3027		RTFREE(rt);
3028	}
3029	mss = min(mss, offer);
3030	mss = max(mss, 64);		/* sanity - at least max opt space */
3031	return (mss);
3032}
3033
3034static u_int32_t
3035pf_tcp_iss(struct pf_pdesc *pd)
3036{
3037	MD5_CTX ctx;
3038	u_int32_t digest[4];
3039
3040	if (V_pf_tcp_secret_init == 0) {
3041		read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3042		MD5Init(&V_pf_tcp_secret_ctx);
3043		MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3044		    sizeof(V_pf_tcp_secret));
3045		V_pf_tcp_secret_init = 1;
3046	}
3047
3048	ctx = V_pf_tcp_secret_ctx;
3049
3050	MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3051	MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3052	if (pd->af == AF_INET6) {
3053		MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3054		MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3055	} else {
3056		MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3057		MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3058	}
3059	MD5Final((u_char *)digest, &ctx);
3060	V_pf_tcp_iss_off += 4096;
3061#define	ISN_RANDOM_INCREMENT (4096 - 1)
3062	return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3063	    V_pf_tcp_iss_off);
3064#undef	ISN_RANDOM_INCREMENT
3065}
3066
3067static int
3068pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3069    struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3070    struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3071{
3072	struct pf_rule		*nr = NULL;
3073	struct pf_addr		* const saddr = pd->src;
3074	struct pf_addr		* const daddr = pd->dst;
3075	sa_family_t		 af = pd->af;
3076	struct pf_rule		*r, *a = NULL;
3077	struct pf_ruleset	*ruleset = NULL;
3078	struct pf_src_node	*nsn = NULL;
3079	struct tcphdr		*th = pd->hdr.tcp;
3080	struct pf_state_key	*sk = NULL, *nk = NULL;
3081	u_short			 reason;
3082	int			 rewrite = 0, hdrlen = 0;
3083	int			 tag = -1, rtableid = -1;
3084	int			 asd = 0;
3085	int			 match = 0;
3086	int			 state_icmp = 0;
3087	u_int16_t		 sport = 0, dport = 0;
3088	u_int16_t		 bproto_sum = 0, bip_sum = 0;
3089	u_int8_t		 icmptype = 0, icmpcode = 0;
3090	struct pf_anchor_stackframe	anchor_stack[PF_ANCHOR_STACKSIZE];
3091
3092	PF_RULES_RASSERT();
3093
3094	if (inp != NULL) {
3095		INP_LOCK_ASSERT(inp);
3096		pd->lookup.uid = inp->inp_cred->cr_uid;
3097		pd->lookup.gid = inp->inp_cred->cr_groups[0];
3098		pd->lookup.done = 1;
3099	}
3100
3101	switch (pd->proto) {
3102	case IPPROTO_TCP:
3103		sport = th->th_sport;
3104		dport = th->th_dport;
3105		hdrlen = sizeof(*th);
3106		break;
3107	case IPPROTO_UDP:
3108		sport = pd->hdr.udp->uh_sport;
3109		dport = pd->hdr.udp->uh_dport;
3110		hdrlen = sizeof(*pd->hdr.udp);
3111		break;
3112#ifdef INET
3113	case IPPROTO_ICMP:
3114		if (pd->af != AF_INET)
3115			break;
3116		sport = dport = pd->hdr.icmp->icmp_id;
3117		hdrlen = sizeof(*pd->hdr.icmp);
3118		icmptype = pd->hdr.icmp->icmp_type;
3119		icmpcode = pd->hdr.icmp->icmp_code;
3120
3121		if (icmptype == ICMP_UNREACH ||
3122		    icmptype == ICMP_SOURCEQUENCH ||
3123		    icmptype == ICMP_REDIRECT ||
3124		    icmptype == ICMP_TIMXCEED ||
3125		    icmptype == ICMP_PARAMPROB)
3126			state_icmp++;
3127		break;
3128#endif /* INET */
3129#ifdef INET6
3130	case IPPROTO_ICMPV6:
3131		if (af != AF_INET6)
3132			break;
3133		sport = dport = pd->hdr.icmp6->icmp6_id;
3134		hdrlen = sizeof(*pd->hdr.icmp6);
3135		icmptype = pd->hdr.icmp6->icmp6_type;
3136		icmpcode = pd->hdr.icmp6->icmp6_code;
3137
3138		if (icmptype == ICMP6_DST_UNREACH ||
3139		    icmptype == ICMP6_PACKET_TOO_BIG ||
3140		    icmptype == ICMP6_TIME_EXCEEDED ||
3141		    icmptype == ICMP6_PARAM_PROB)
3142			state_icmp++;
3143		break;
3144#endif /* INET6 */
3145	default:
3146		sport = dport = hdrlen = 0;
3147		break;
3148	}
3149
3150	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3151
3152	/* check packet for BINAT/NAT/RDR */
3153	if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3154	    &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3155		KASSERT(sk != NULL, ("%s: null sk", __func__));
3156		KASSERT(nk != NULL, ("%s: null nk", __func__));
3157
3158		if (pd->ip_sum)
3159			bip_sum = *pd->ip_sum;
3160
3161		switch (pd->proto) {
3162		case IPPROTO_TCP:
3163			bproto_sum = th->th_sum;
3164			pd->proto_sum = &th->th_sum;
3165
3166			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3167			    nk->port[pd->sidx] != sport) {
3168				pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3169				    &th->th_sum, &nk->addr[pd->sidx],
3170				    nk->port[pd->sidx], 0, af);
3171				pd->sport = &th->th_sport;
3172				sport = th->th_sport;
3173			}
3174
3175			if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3176			    nk->port[pd->didx] != dport) {
3177				pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3178				    &th->th_sum, &nk->addr[pd->didx],
3179				    nk->port[pd->didx], 0, af);
3180				dport = th->th_dport;
3181				pd->dport = &th->th_dport;
3182			}
3183			rewrite++;
3184			break;
3185		case IPPROTO_UDP:
3186			bproto_sum = pd->hdr.udp->uh_sum;
3187			pd->proto_sum = &pd->hdr.udp->uh_sum;
3188
3189			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3190			    nk->port[pd->sidx] != sport) {
3191				pf_change_ap(m, saddr, &pd->hdr.udp->uh_sport,
3192				    pd->ip_sum, &pd->hdr.udp->uh_sum,
3193				    &nk->addr[pd->sidx],
3194				    nk->port[pd->sidx], 1, af);
3195				sport = pd->hdr.udp->uh_sport;
3196				pd->sport = &pd->hdr.udp->uh_sport;
3197			}
3198
3199			if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3200			    nk->port[pd->didx] != dport) {
3201				pf_change_ap(m, daddr, &pd->hdr.udp->uh_dport,
3202				    pd->ip_sum, &pd->hdr.udp->uh_sum,
3203				    &nk->addr[pd->didx],
3204				    nk->port[pd->didx], 1, af);
3205				dport = pd->hdr.udp->uh_dport;
3206				pd->dport = &pd->hdr.udp->uh_dport;
3207			}
3208			rewrite++;
3209			break;
3210#ifdef INET
3211		case IPPROTO_ICMP:
3212			nk->port[0] = nk->port[1];
3213			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3214				pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3215				    nk->addr[pd->sidx].v4.s_addr, 0);
3216
3217			if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3218				pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3219				    nk->addr[pd->didx].v4.s_addr, 0);
3220
3221			if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3222				pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3223				    pd->hdr.icmp->icmp_cksum, sport,
3224				    nk->port[1], 0);
3225				pd->hdr.icmp->icmp_id = nk->port[1];
3226				pd->sport = &pd->hdr.icmp->icmp_id;
3227			}
3228			m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3229			break;
3230#endif /* INET */
3231#ifdef INET6
3232		case IPPROTO_ICMPV6:
3233			nk->port[0] = nk->port[1];
3234			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3235				pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3236				    &nk->addr[pd->sidx], 0);
3237
3238			if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3239				pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3240				    &nk->addr[pd->didx], 0);
3241			rewrite++;
3242			break;
3243#endif /* INET */
3244		default:
3245			switch (af) {
3246#ifdef INET
3247			case AF_INET:
3248				if (PF_ANEQ(saddr,
3249				    &nk->addr[pd->sidx], AF_INET))
3250					pf_change_a(&saddr->v4.s_addr,
3251					    pd->ip_sum,
3252					    nk->addr[pd->sidx].v4.s_addr, 0);
3253
3254				if (PF_ANEQ(daddr,
3255				    &nk->addr[pd->didx], AF_INET))
3256					pf_change_a(&daddr->v4.s_addr,
3257					    pd->ip_sum,
3258					    nk->addr[pd->didx].v4.s_addr, 0);
3259				break;
3260#endif /* INET */
3261#ifdef INET6
3262			case AF_INET6:
3263				if (PF_ANEQ(saddr,
3264				    &nk->addr[pd->sidx], AF_INET6))
3265					PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3266
3267				if (PF_ANEQ(daddr,
3268				    &nk->addr[pd->didx], AF_INET6))
3269					PF_ACPY(saddr, &nk->addr[pd->didx], af);
3270				break;
3271#endif /* INET */
3272			}
3273			break;
3274		}
3275		if (nr->natpass)
3276			r = NULL;
3277		pd->nat_rule = nr;
3278	}
3279
3280	while (r != NULL) {
3281		r->evaluations++;
3282		if (pfi_kif_match(r->kif, kif) == r->ifnot)
3283			r = r->skip[PF_SKIP_IFP].ptr;
3284		else if (r->direction && r->direction != direction)
3285			r = r->skip[PF_SKIP_DIR].ptr;
3286		else if (r->af && r->af != af)
3287			r = r->skip[PF_SKIP_AF].ptr;
3288		else if (r->proto && r->proto != pd->proto)
3289			r = r->skip[PF_SKIP_PROTO].ptr;
3290		else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3291		    r->src.neg, kif, M_GETFIB(m)))
3292			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3293		/* tcp/udp only. port_op always 0 in other cases */
3294		else if (r->src.port_op && !pf_match_port(r->src.port_op,
3295		    r->src.port[0], r->src.port[1], sport))
3296			r = r->skip[PF_SKIP_SRC_PORT].ptr;
3297		else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3298		    r->dst.neg, NULL, M_GETFIB(m)))
3299			r = r->skip[PF_SKIP_DST_ADDR].ptr;
3300		/* tcp/udp only. port_op always 0 in other cases */
3301		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3302		    r->dst.port[0], r->dst.port[1], dport))
3303			r = r->skip[PF_SKIP_DST_PORT].ptr;
3304		/* icmp only. type always 0 in other cases */
3305		else if (r->type && r->type != icmptype + 1)
3306			r = TAILQ_NEXT(r, entries);
3307		/* icmp only. type always 0 in other cases */
3308		else if (r->code && r->code != icmpcode + 1)
3309			r = TAILQ_NEXT(r, entries);
3310		else if (r->tos && !(r->tos == pd->tos))
3311			r = TAILQ_NEXT(r, entries);
3312		else if (r->rule_flag & PFRULE_FRAGMENT)
3313			r = TAILQ_NEXT(r, entries);
3314		else if (pd->proto == IPPROTO_TCP &&
3315		    (r->flagset & th->th_flags) != r->flags)
3316			r = TAILQ_NEXT(r, entries);
3317		/* tcp/udp only. uid.op always 0 in other cases */
3318		else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3319		    pf_socket_lookup(direction, pd, m), 1)) &&
3320		    !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3321		    pd->lookup.uid))
3322			r = TAILQ_NEXT(r, entries);
3323		/* tcp/udp only. gid.op always 0 in other cases */
3324		else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3325		    pf_socket_lookup(direction, pd, m), 1)) &&
3326		    !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3327		    pd->lookup.gid))
3328			r = TAILQ_NEXT(r, entries);
3329		else if (r->prob &&
3330		    r->prob <= arc4random())
3331			r = TAILQ_NEXT(r, entries);
3332		else if (r->match_tag && !pf_match_tag(m, r, &tag,
3333		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
3334			r = TAILQ_NEXT(r, entries);
3335		else if (r->os_fingerprint != PF_OSFP_ANY &&
3336		    (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3337		    pf_osfp_fingerprint(pd, m, off, th),
3338		    r->os_fingerprint)))
3339			r = TAILQ_NEXT(r, entries);
3340		else {
3341			if (r->tag)
3342				tag = r->tag;
3343			if (r->rtableid >= 0)
3344				rtableid = r->rtableid;
3345			if (r->anchor == NULL) {
3346				match = 1;
3347				*rm = r;
3348				*am = a;
3349				*rsm = ruleset;
3350				if ((*rm)->quick)
3351					break;
3352				r = TAILQ_NEXT(r, entries);
3353			} else
3354				pf_step_into_anchor(anchor_stack, &asd,
3355				    &ruleset, PF_RULESET_FILTER, &r, &a,
3356				    &match);
3357		}
3358		if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3359		    &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3360			break;
3361	}
3362	r = *rm;
3363	a = *am;
3364	ruleset = *rsm;
3365
3366	REASON_SET(&reason, PFRES_MATCH);
3367
3368	if (r->log || (nr != NULL && nr->log)) {
3369		if (rewrite)
3370			m_copyback(m, off, hdrlen, pd->hdr.any);
3371		PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3372		    ruleset, pd, 1);
3373	}
3374
3375	if ((r->action == PF_DROP) &&
3376	    ((r->rule_flag & PFRULE_RETURNRST) ||
3377	    (r->rule_flag & PFRULE_RETURNICMP) ||
3378	    (r->rule_flag & PFRULE_RETURN))) {
3379		/* undo NAT changes, if they have taken place */
3380		if (nr != NULL) {
3381			PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3382			PF_ACPY(daddr, &sk->addr[pd->didx], af);
3383			if (pd->sport)
3384				*pd->sport = sk->port[pd->sidx];
3385			if (pd->dport)
3386				*pd->dport = sk->port[pd->didx];
3387			if (pd->proto_sum)
3388				*pd->proto_sum = bproto_sum;
3389			if (pd->ip_sum)
3390				*pd->ip_sum = bip_sum;
3391			m_copyback(m, off, hdrlen, pd->hdr.any);
3392		}
3393		if (pd->proto == IPPROTO_TCP &&
3394		    ((r->rule_flag & PFRULE_RETURNRST) ||
3395		    (r->rule_flag & PFRULE_RETURN)) &&
3396		    !(th->th_flags & TH_RST)) {
3397			u_int32_t	 ack = ntohl(th->th_seq) + pd->p_len;
3398			int		 len = 0;
3399#ifdef INET
3400			struct ip	*h4;
3401#endif
3402#ifdef INET6
3403			struct ip6_hdr	*h6;
3404#endif
3405
3406			switch (af) {
3407#ifdef INET
3408			case AF_INET:
3409				h4 = mtod(m, struct ip *);
3410				len = ntohs(h4->ip_len) - off;
3411				break;
3412#endif
3413#ifdef INET6
3414			case AF_INET6:
3415				h6 = mtod(m, struct ip6_hdr *);
3416				len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3417				break;
3418#endif
3419			}
3420
3421			if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3422				REASON_SET(&reason, PFRES_PROTCKSUM);
3423			else {
3424				if (th->th_flags & TH_SYN)
3425					ack++;
3426				if (th->th_flags & TH_FIN)
3427					ack++;
3428				pf_send_tcp(m, r, af, pd->dst,
3429				    pd->src, th->th_dport, th->th_sport,
3430				    ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3431				    r->return_ttl, 1, 0, kif->pfik_ifp);
3432			}
3433		} else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3434		    r->return_icmp)
3435			pf_send_icmp(m, r->return_icmp >> 8,
3436			    r->return_icmp & 255, af, r);
3437		else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3438		    r->return_icmp6)
3439			pf_send_icmp(m, r->return_icmp6 >> 8,
3440			    r->return_icmp6 & 255, af, r);
3441	}
3442
3443	if (r->action == PF_DROP)
3444		goto cleanup;
3445
3446	if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3447		REASON_SET(&reason, PFRES_MEMORY);
3448		goto cleanup;
3449	}
3450	if (rtableid >= 0)
3451		M_SETFIB(m, rtableid);
3452
3453	if (!state_icmp && (r->keep_state || nr != NULL ||
3454	    (pd->flags & PFDESC_TCP_NORM))) {
3455		int action;
3456		action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3457		    sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3458		    hdrlen);
3459		if (action != PF_PASS)
3460			return (action);
3461	} else {
3462		if (sk != NULL)
3463			uma_zfree(V_pf_state_key_z, sk);
3464		if (nk != NULL)
3465			uma_zfree(V_pf_state_key_z, nk);
3466	}
3467
3468	/* copy back packet headers if we performed NAT operations */
3469	if (rewrite)
3470		m_copyback(m, off, hdrlen, pd->hdr.any);
3471
3472	if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3473	    direction == PF_OUT &&
3474	    pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3475		/*
3476		 * We want the state created, but we dont
3477		 * want to send this in case a partner
3478		 * firewall has to know about it to allow
3479		 * replies through it.
3480		 */
3481		return (PF_DEFER);
3482
3483	return (PF_PASS);
3484
3485cleanup:
3486	if (sk != NULL)
3487		uma_zfree(V_pf_state_key_z, sk);
3488	if (nk != NULL)
3489		uma_zfree(V_pf_state_key_z, nk);
3490	return (PF_DROP);
3491}
3492
3493static int
3494pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3495    struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3496    struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3497    u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3498    int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3499{
3500	struct pf_state		*s = NULL;
3501	struct pf_src_node	*sn = NULL;
3502	struct tcphdr		*th = pd->hdr.tcp;
3503	u_int16_t		 mss = V_tcp_mssdflt;
3504	u_short			 reason;
3505
3506	/* check maximums */
3507	if (r->max_states &&
3508	    (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3509		counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3510		REASON_SET(&reason, PFRES_MAXSTATES);
3511		return (PF_DROP);
3512	}
3513	/* src node for filter rule */
3514	if ((r->rule_flag & PFRULE_SRCTRACK ||
3515	    r->rpool.opts & PF_POOL_STICKYADDR) &&
3516	    pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3517		REASON_SET(&reason, PFRES_SRCLIMIT);
3518		goto csfailed;
3519	}
3520	/* src node for translation rule */
3521	if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3522	    pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3523		REASON_SET(&reason, PFRES_SRCLIMIT);
3524		goto csfailed;
3525	}
3526	s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3527	if (s == NULL) {
3528		REASON_SET(&reason, PFRES_MEMORY);
3529		goto csfailed;
3530	}
3531	s->rule.ptr = r;
3532	s->nat_rule.ptr = nr;
3533	s->anchor.ptr = a;
3534	STATE_INC_COUNTERS(s);
3535	if (r->allow_opts)
3536		s->state_flags |= PFSTATE_ALLOWOPTS;
3537	if (r->rule_flag & PFRULE_STATESLOPPY)
3538		s->state_flags |= PFSTATE_SLOPPY;
3539	s->log = r->log & PF_LOG_ALL;
3540	s->sync_state = PFSYNC_S_NONE;
3541	if (nr != NULL)
3542		s->log |= nr->log & PF_LOG_ALL;
3543	switch (pd->proto) {
3544	case IPPROTO_TCP:
3545		s->src.seqlo = ntohl(th->th_seq);
3546		s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3547		if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3548		    r->keep_state == PF_STATE_MODULATE) {
3549			/* Generate sequence number modulator */
3550			if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3551			    0)
3552				s->src.seqdiff = 1;
3553			pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3554			    htonl(s->src.seqlo + s->src.seqdiff), 0);
3555			*rewrite = 1;
3556		} else
3557			s->src.seqdiff = 0;
3558		if (th->th_flags & TH_SYN) {
3559			s->src.seqhi++;
3560			s->src.wscale = pf_get_wscale(m, off,
3561			    th->th_off, pd->af);
3562		}
3563		s->src.max_win = MAX(ntohs(th->th_win), 1);
3564		if (s->src.wscale & PF_WSCALE_MASK) {
3565			/* Remove scale factor from initial window */
3566			int win = s->src.max_win;
3567			win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3568			s->src.max_win = (win - 1) >>
3569			    (s->src.wscale & PF_WSCALE_MASK);
3570		}
3571		if (th->th_flags & TH_FIN)
3572			s->src.seqhi++;
3573		s->dst.seqhi = 1;
3574		s->dst.max_win = 1;
3575		s->src.state = TCPS_SYN_SENT;
3576		s->dst.state = TCPS_CLOSED;
3577		s->timeout = PFTM_TCP_FIRST_PACKET;
3578		break;
3579	case IPPROTO_UDP:
3580		s->src.state = PFUDPS_SINGLE;
3581		s->dst.state = PFUDPS_NO_TRAFFIC;
3582		s->timeout = PFTM_UDP_FIRST_PACKET;
3583		break;
3584	case IPPROTO_ICMP:
3585#ifdef INET6
3586	case IPPROTO_ICMPV6:
3587#endif
3588		s->timeout = PFTM_ICMP_FIRST_PACKET;
3589		break;
3590	default:
3591		s->src.state = PFOTHERS_SINGLE;
3592		s->dst.state = PFOTHERS_NO_TRAFFIC;
3593		s->timeout = PFTM_OTHER_FIRST_PACKET;
3594	}
3595
3596	if (r->rt && r->rt != PF_FASTROUTE) {
3597		if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
3598			REASON_SET(&reason, PFRES_BADSTATE);
3599			pf_src_tree_remove_state(s);
3600			STATE_DEC_COUNTERS(s);
3601			uma_zfree(V_pf_state_z, s);
3602			goto csfailed;
3603		}
3604		s->rt_kif = r->rpool.cur->kif;
3605	}
3606
3607	s->creation = time_uptime;
3608	s->expire = time_uptime;
3609
3610	if (sn != NULL)
3611		s->src_node = sn;
3612	if (nsn != NULL) {
3613		/* XXX We only modify one side for now. */
3614		PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3615		s->nat_src_node = nsn;
3616	}
3617	if (pd->proto == IPPROTO_TCP) {
3618		if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3619		    off, pd, th, &s->src, &s->dst)) {
3620			REASON_SET(&reason, PFRES_MEMORY);
3621			pf_src_tree_remove_state(s);
3622			STATE_DEC_COUNTERS(s);
3623			uma_zfree(V_pf_state_z, s);
3624			return (PF_DROP);
3625		}
3626		if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3627		    pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3628		    &s->src, &s->dst, rewrite)) {
3629			/* This really shouldn't happen!!! */
3630			DPFPRINTF(PF_DEBUG_URGENT,
3631			    ("pf_normalize_tcp_stateful failed on first pkt"));
3632			pf_normalize_tcp_cleanup(s);
3633			pf_src_tree_remove_state(s);
3634			STATE_DEC_COUNTERS(s);
3635			uma_zfree(V_pf_state_z, s);
3636			return (PF_DROP);
3637		}
3638	}
3639	s->direction = pd->dir;
3640
3641	/*
3642	 * sk/nk could already been setup by pf_get_translation().
3643	 */
3644	if (nr == NULL) {
3645		KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3646		    __func__, nr, sk, nk));
3647		sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3648		if (sk == NULL)
3649			goto csfailed;
3650		nk = sk;
3651	} else
3652		KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3653		    __func__, nr, sk, nk));
3654
3655	/* Swap sk/nk for PF_OUT. */
3656	if (pf_state_insert(BOUND_IFACE(r, kif),
3657	    (pd->dir == PF_IN) ? sk : nk,
3658	    (pd->dir == PF_IN) ? nk : sk, s)) {
3659		if (pd->proto == IPPROTO_TCP)
3660			pf_normalize_tcp_cleanup(s);
3661		REASON_SET(&reason, PFRES_STATEINS);
3662		pf_src_tree_remove_state(s);
3663		STATE_DEC_COUNTERS(s);
3664		uma_zfree(V_pf_state_z, s);
3665		return (PF_DROP);
3666	} else
3667		*sm = s;
3668
3669	if (tag > 0)
3670		s->tag = tag;
3671	if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3672	    TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3673		s->src.state = PF_TCPS_PROXY_SRC;
3674		/* undo NAT changes, if they have taken place */
3675		if (nr != NULL) {
3676			struct pf_state_key *skt = s->key[PF_SK_WIRE];
3677			if (pd->dir == PF_OUT)
3678				skt = s->key[PF_SK_STACK];
3679			PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3680			PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3681			if (pd->sport)
3682				*pd->sport = skt->port[pd->sidx];
3683			if (pd->dport)
3684				*pd->dport = skt->port[pd->didx];
3685			if (pd->proto_sum)
3686				*pd->proto_sum = bproto_sum;
3687			if (pd->ip_sum)
3688				*pd->ip_sum = bip_sum;
3689			m_copyback(m, off, hdrlen, pd->hdr.any);
3690		}
3691		s->src.seqhi = htonl(arc4random());
3692		/* Find mss option */
3693		int rtid = M_GETFIB(m);
3694		mss = pf_get_mss(m, off, th->th_off, pd->af);
3695		mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3696		mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3697		s->src.mss = mss;
3698		pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3699		    th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3700		    TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3701		REASON_SET(&reason, PFRES_SYNPROXY);
3702		return (PF_SYNPROXY_DROP);
3703	}
3704
3705	return (PF_PASS);
3706
3707csfailed:
3708	if (sk != NULL)
3709		uma_zfree(V_pf_state_key_z, sk);
3710	if (nk != NULL)
3711		uma_zfree(V_pf_state_key_z, nk);
3712
3713	if (sn != NULL) {
3714		struct pf_srchash *sh;
3715
3716		sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
3717		PF_HASHROW_LOCK(sh);
3718		if (--sn->states == 0 && sn->expire == 0) {
3719			pf_unlink_src_node(sn);
3720			uma_zfree(V_pf_sources_z, sn);
3721			counter_u64_add(
3722			    V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3723		}
3724		PF_HASHROW_UNLOCK(sh);
3725	}
3726
3727	if (nsn != sn && nsn != NULL) {
3728		struct pf_srchash *sh;
3729
3730		sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
3731		PF_HASHROW_LOCK(sh);
3732		if (--nsn->states == 0 && nsn->expire == 0) {
3733			pf_unlink_src_node(nsn);
3734			uma_zfree(V_pf_sources_z, nsn);
3735			counter_u64_add(
3736			    V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3737		}
3738		PF_HASHROW_UNLOCK(sh);
3739	}
3740
3741	return (PF_DROP);
3742}
3743
3744static int
3745pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3746    struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3747    struct pf_ruleset **rsm)
3748{
3749	struct pf_rule		*r, *a = NULL;
3750	struct pf_ruleset	*ruleset = NULL;
3751	sa_family_t		 af = pd->af;
3752	u_short			 reason;
3753	int			 tag = -1;
3754	int			 asd = 0;
3755	int			 match = 0;
3756	struct pf_anchor_stackframe	anchor_stack[PF_ANCHOR_STACKSIZE];
3757
3758	PF_RULES_RASSERT();
3759
3760	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3761	while (r != NULL) {
3762		r->evaluations++;
3763		if (pfi_kif_match(r->kif, kif) == r->ifnot)
3764			r = r->skip[PF_SKIP_IFP].ptr;
3765		else if (r->direction && r->direction != direction)
3766			r = r->skip[PF_SKIP_DIR].ptr;
3767		else if (r->af && r->af != af)
3768			r = r->skip[PF_SKIP_AF].ptr;
3769		else if (r->proto && r->proto != pd->proto)
3770			r = r->skip[PF_SKIP_PROTO].ptr;
3771		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3772		    r->src.neg, kif, M_GETFIB(m)))
3773			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3774		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3775		    r->dst.neg, NULL, M_GETFIB(m)))
3776			r = r->skip[PF_SKIP_DST_ADDR].ptr;
3777		else if (r->tos && !(r->tos == pd->tos))
3778			r = TAILQ_NEXT(r, entries);
3779		else if (r->os_fingerprint != PF_OSFP_ANY)
3780			r = TAILQ_NEXT(r, entries);
3781		else if (pd->proto == IPPROTO_UDP &&
3782		    (r->src.port_op || r->dst.port_op))
3783			r = TAILQ_NEXT(r, entries);
3784		else if (pd->proto == IPPROTO_TCP &&
3785		    (r->src.port_op || r->dst.port_op || r->flagset))
3786			r = TAILQ_NEXT(r, entries);
3787		else if ((pd->proto == IPPROTO_ICMP ||
3788		    pd->proto == IPPROTO_ICMPV6) &&
3789		    (r->type || r->code))
3790			r = TAILQ_NEXT(r, entries);
3791		else if (r->prob && r->prob <=
3792		    (arc4random() % (UINT_MAX - 1) + 1))
3793			r = TAILQ_NEXT(r, entries);
3794		else if (r->match_tag && !pf_match_tag(m, r, &tag,
3795		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
3796			r = TAILQ_NEXT(r, entries);
3797		else {
3798			if (r->anchor == NULL) {
3799				match = 1;
3800				*rm = r;
3801				*am = a;
3802				*rsm = ruleset;
3803				if ((*rm)->quick)
3804					break;
3805				r = TAILQ_NEXT(r, entries);
3806			} else
3807				pf_step_into_anchor(anchor_stack, &asd,
3808				    &ruleset, PF_RULESET_FILTER, &r, &a,
3809				    &match);
3810		}
3811		if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3812		    &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3813			break;
3814	}
3815	r = *rm;
3816	a = *am;
3817	ruleset = *rsm;
3818
3819	REASON_SET(&reason, PFRES_MATCH);
3820
3821	if (r->log)
3822		PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3823		    1);
3824
3825	if (r->action != PF_PASS)
3826		return (PF_DROP);
3827
3828	if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3829		REASON_SET(&reason, PFRES_MEMORY);
3830		return (PF_DROP);
3831	}
3832
3833	return (PF_PASS);
3834}
3835
3836static int
3837pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3838	struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3839	struct pf_pdesc *pd, u_short *reason, int *copyback)
3840{
3841	struct tcphdr		*th = pd->hdr.tcp;
3842	u_int16_t		 win = ntohs(th->th_win);
3843	u_int32_t		 ack, end, seq, orig_seq;
3844	u_int8_t		 sws, dws;
3845	int			 ackskew;
3846
3847	if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3848		sws = src->wscale & PF_WSCALE_MASK;
3849		dws = dst->wscale & PF_WSCALE_MASK;
3850	} else
3851		sws = dws = 0;
3852
3853	/*
3854	 * Sequence tracking algorithm from Guido van Rooij's paper:
3855	 *   http://www.madison-gurkha.com/publications/tcp_filtering/
3856	 *	tcp_filtering.ps
3857	 */
3858
3859	orig_seq = seq = ntohl(th->th_seq);
3860	if (src->seqlo == 0) {
3861		/* First packet from this end. Set its state */
3862
3863		if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3864		    src->scrub == NULL) {
3865			if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3866				REASON_SET(reason, PFRES_MEMORY);
3867				return (PF_DROP);
3868			}
3869		}
3870
3871		/* Deferred generation of sequence number modulator */
3872		if (dst->seqdiff && !src->seqdiff) {
3873			/* use random iss for the TCP server */
3874			while ((src->seqdiff = arc4random() - seq) == 0)
3875				;
3876			ack = ntohl(th->th_ack) - dst->seqdiff;
3877			pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3878			    src->seqdiff), 0);
3879			pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3880			*copyback = 1;
3881		} else {
3882			ack = ntohl(th->th_ack);
3883		}
3884
3885		end = seq + pd->p_len;
3886		if (th->th_flags & TH_SYN) {
3887			end++;
3888			if (dst->wscale & PF_WSCALE_FLAG) {
3889				src->wscale = pf_get_wscale(m, off, th->th_off,
3890				    pd->af);
3891				if (src->wscale & PF_WSCALE_FLAG) {
3892					/* Remove scale factor from initial
3893					 * window */
3894					sws = src->wscale & PF_WSCALE_MASK;
3895					win = ((u_int32_t)win + (1 << sws) - 1)
3896					    >> sws;
3897					dws = dst->wscale & PF_WSCALE_MASK;
3898				} else {
3899					/* fixup other window */
3900					dst->max_win <<= dst->wscale &
3901					    PF_WSCALE_MASK;
3902					/* in case of a retrans SYN|ACK */
3903					dst->wscale = 0;
3904				}
3905			}
3906		}
3907		if (th->th_flags & TH_FIN)
3908			end++;
3909
3910		src->seqlo = seq;
3911		if (src->state < TCPS_SYN_SENT)
3912			src->state = TCPS_SYN_SENT;
3913
3914		/*
3915		 * May need to slide the window (seqhi may have been set by
3916		 * the crappy stack check or if we picked up the connection
3917		 * after establishment)
3918		 */
3919		if (src->seqhi == 1 ||
3920		    SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3921			src->seqhi = end + MAX(1, dst->max_win << dws);
3922		if (win > src->max_win)
3923			src->max_win = win;
3924
3925	} else {
3926		ack = ntohl(th->th_ack) - dst->seqdiff;
3927		if (src->seqdiff) {
3928			/* Modulate sequence numbers */
3929			pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3930			    src->seqdiff), 0);
3931			pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3932			*copyback = 1;
3933		}
3934		end = seq + pd->p_len;
3935		if (th->th_flags & TH_SYN)
3936			end++;
3937		if (th->th_flags & TH_FIN)
3938			end++;
3939	}
3940
3941	if ((th->th_flags & TH_ACK) == 0) {
3942		/* Let it pass through the ack skew check */
3943		ack = dst->seqlo;
3944	} else if ((ack == 0 &&
3945	    (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3946	    /* broken tcp stacks do not set ack */
3947	    (dst->state < TCPS_SYN_SENT)) {
3948		/*
3949		 * Many stacks (ours included) will set the ACK number in an
3950		 * FIN|ACK if the SYN times out -- no sequence to ACK.
3951		 */
3952		ack = dst->seqlo;
3953	}
3954
3955	if (seq == end) {
3956		/* Ease sequencing restrictions on no data packets */
3957		seq = src->seqlo;
3958		end = seq;
3959	}
3960
3961	ackskew = dst->seqlo - ack;
3962
3963
3964	/*
3965	 * Need to demodulate the sequence numbers in any TCP SACK options
3966	 * (Selective ACK). We could optionally validate the SACK values
3967	 * against the current ACK window, either forwards or backwards, but
3968	 * I'm not confident that SACK has been implemented properly
3969	 * everywhere. It wouldn't surprise me if several stacks accidently
3970	 * SACK too far backwards of previously ACKed data. There really aren't
3971	 * any security implications of bad SACKing unless the target stack
3972	 * doesn't validate the option length correctly. Someone trying to
3973	 * spoof into a TCP connection won't bother blindly sending SACK
3974	 * options anyway.
3975	 */
3976	if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
3977		if (pf_modulate_sack(m, off, pd, th, dst))
3978			*copyback = 1;
3979	}
3980
3981
3982#define	MAXACKWINDOW (0xffff + 1500)	/* 1500 is an arbitrary fudge factor */
3983	if (SEQ_GEQ(src->seqhi, end) &&
3984	    /* Last octet inside other's window space */
3985	    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
3986	    /* Retrans: not more than one window back */
3987	    (ackskew >= -MAXACKWINDOW) &&
3988	    /* Acking not more than one reassembled fragment backwards */
3989	    (ackskew <= (MAXACKWINDOW << sws)) &&
3990	    /* Acking not more than one window forward */
3991	    ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
3992	    (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
3993	    (pd->flags & PFDESC_IP_REAS) == 0)) {
3994	    /* Require an exact/+1 sequence match on resets when possible */
3995
3996		if (dst->scrub || src->scrub) {
3997			if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
3998			    *state, src, dst, copyback))
3999				return (PF_DROP);
4000		}
4001
4002		/* update max window */
4003		if (src->max_win < win)
4004			src->max_win = win;
4005		/* synchronize sequencing */
4006		if (SEQ_GT(end, src->seqlo))
4007			src->seqlo = end;
4008		/* slide the window of what the other end can send */
4009		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4010			dst->seqhi = ack + MAX((win << sws), 1);
4011
4012
4013		/* update states */
4014		if (th->th_flags & TH_SYN)
4015			if (src->state < TCPS_SYN_SENT)
4016				src->state = TCPS_SYN_SENT;
4017		if (th->th_flags & TH_FIN)
4018			if (src->state < TCPS_CLOSING)
4019				src->state = TCPS_CLOSING;
4020		if (th->th_flags & TH_ACK) {
4021			if (dst->state == TCPS_SYN_SENT) {
4022				dst->state = TCPS_ESTABLISHED;
4023				if (src->state == TCPS_ESTABLISHED &&
4024				    (*state)->src_node != NULL &&
4025				    pf_src_connlimit(state)) {
4026					REASON_SET(reason, PFRES_SRCLIMIT);
4027					return (PF_DROP);
4028				}
4029			} else if (dst->state == TCPS_CLOSING)
4030				dst->state = TCPS_FIN_WAIT_2;
4031		}
4032		if (th->th_flags & TH_RST)
4033			src->state = dst->state = TCPS_TIME_WAIT;
4034
4035		/* update expire time */
4036		(*state)->expire = time_uptime;
4037		if (src->state >= TCPS_FIN_WAIT_2 &&
4038		    dst->state >= TCPS_FIN_WAIT_2)
4039			(*state)->timeout = PFTM_TCP_CLOSED;
4040		else if (src->state >= TCPS_CLOSING &&
4041		    dst->state >= TCPS_CLOSING)
4042			(*state)->timeout = PFTM_TCP_FIN_WAIT;
4043		else if (src->state < TCPS_ESTABLISHED ||
4044		    dst->state < TCPS_ESTABLISHED)
4045			(*state)->timeout = PFTM_TCP_OPENING;
4046		else if (src->state >= TCPS_CLOSING ||
4047		    dst->state >= TCPS_CLOSING)
4048			(*state)->timeout = PFTM_TCP_CLOSING;
4049		else
4050			(*state)->timeout = PFTM_TCP_ESTABLISHED;
4051
4052		/* Fall through to PASS packet */
4053
4054	} else if ((dst->state < TCPS_SYN_SENT ||
4055		dst->state >= TCPS_FIN_WAIT_2 ||
4056		src->state >= TCPS_FIN_WAIT_2) &&
4057	    SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4058	    /* Within a window forward of the originating packet */
4059	    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4060	    /* Within a window backward of the originating packet */
4061
4062		/*
4063		 * This currently handles three situations:
4064		 *  1) Stupid stacks will shotgun SYNs before their peer
4065		 *     replies.
4066		 *  2) When PF catches an already established stream (the
4067		 *     firewall rebooted, the state table was flushed, routes
4068		 *     changed...)
4069		 *  3) Packets get funky immediately after the connection
4070		 *     closes (this should catch Solaris spurious ACK|FINs
4071		 *     that web servers like to spew after a close)
4072		 *
4073		 * This must be a little more careful than the above code
4074		 * since packet floods will also be caught here. We don't
4075		 * update the TTL here to mitigate the damage of a packet
4076		 * flood and so the same code can handle awkward establishment
4077		 * and a loosened connection close.
4078		 * In the establishment case, a correct peer response will
4079		 * validate the connection, go through the normal state code
4080		 * and keep updating the state TTL.
4081		 */
4082
4083		if (V_pf_status.debug >= PF_DEBUG_MISC) {
4084			printf("pf: loose state match: ");
4085			pf_print_state(*state);
4086			pf_print_flags(th->th_flags);
4087			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4088			    "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4089			    pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4090			    (unsigned long long)(*state)->packets[1],
4091			    pd->dir == PF_IN ? "in" : "out",
4092			    pd->dir == (*state)->direction ? "fwd" : "rev");
4093		}
4094
4095		if (dst->scrub || src->scrub) {
4096			if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4097			    *state, src, dst, copyback))
4098				return (PF_DROP);
4099		}
4100
4101		/* update max window */
4102		if (src->max_win < win)
4103			src->max_win = win;
4104		/* synchronize sequencing */
4105		if (SEQ_GT(end, src->seqlo))
4106			src->seqlo = end;
4107		/* slide the window of what the other end can send */
4108		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4109			dst->seqhi = ack + MAX((win << sws), 1);
4110
4111		/*
4112		 * Cannot set dst->seqhi here since this could be a shotgunned
4113		 * SYN and not an already established connection.
4114		 */
4115
4116		if (th->th_flags & TH_FIN)
4117			if (src->state < TCPS_CLOSING)
4118				src->state = TCPS_CLOSING;
4119		if (th->th_flags & TH_RST)
4120			src->state = dst->state = TCPS_TIME_WAIT;
4121
4122		/* Fall through to PASS packet */
4123
4124	} else {
4125		if ((*state)->dst.state == TCPS_SYN_SENT &&
4126		    (*state)->src.state == TCPS_SYN_SENT) {
4127			/* Send RST for state mismatches during handshake */
4128			if (!(th->th_flags & TH_RST))
4129				pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4130				    pd->dst, pd->src, th->th_dport,
4131				    th->th_sport, ntohl(th->th_ack), 0,
4132				    TH_RST, 0, 0,
4133				    (*state)->rule.ptr->return_ttl, 1, 0,
4134				    kif->pfik_ifp);
4135			src->seqlo = 0;
4136			src->seqhi = 1;
4137			src->max_win = 1;
4138		} else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4139			printf("pf: BAD state: ");
4140			pf_print_state(*state);
4141			pf_print_flags(th->th_flags);
4142			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4143			    "pkts=%llu:%llu dir=%s,%s\n",
4144			    seq, orig_seq, ack, pd->p_len, ackskew,
4145			    (unsigned long long)(*state)->packets[0],
4146			    (unsigned long long)(*state)->packets[1],
4147			    pd->dir == PF_IN ? "in" : "out",
4148			    pd->dir == (*state)->direction ? "fwd" : "rev");
4149			printf("pf: State failure on: %c %c %c %c | %c %c\n",
4150			    SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4151			    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4152			    ' ': '2',
4153			    (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4154			    (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4155			    SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4156			    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4157		}
4158		REASON_SET(reason, PFRES_BADSTATE);
4159		return (PF_DROP);
4160	}
4161
4162	return (PF_PASS);
4163}
4164
4165static int
4166pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4167	struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4168{
4169	struct tcphdr		*th = pd->hdr.tcp;
4170
4171	if (th->th_flags & TH_SYN)
4172		if (src->state < TCPS_SYN_SENT)
4173			src->state = TCPS_SYN_SENT;
4174	if (th->th_flags & TH_FIN)
4175		if (src->state < TCPS_CLOSING)
4176			src->state = TCPS_CLOSING;
4177	if (th->th_flags & TH_ACK) {
4178		if (dst->state == TCPS_SYN_SENT) {
4179			dst->state = TCPS_ESTABLISHED;
4180			if (src->state == TCPS_ESTABLISHED &&
4181			    (*state)->src_node != NULL &&
4182			    pf_src_connlimit(state)) {
4183				REASON_SET(reason, PFRES_SRCLIMIT);
4184				return (PF_DROP);
4185			}
4186		} else if (dst->state == TCPS_CLOSING) {
4187			dst->state = TCPS_FIN_WAIT_2;
4188		} else if (src->state == TCPS_SYN_SENT &&
4189		    dst->state < TCPS_SYN_SENT) {
4190			/*
4191			 * Handle a special sloppy case where we only see one
4192			 * half of the connection. If there is a ACK after
4193			 * the initial SYN without ever seeing a packet from
4194			 * the destination, set the connection to established.
4195			 */
4196			dst->state = src->state = TCPS_ESTABLISHED;
4197			if ((*state)->src_node != NULL &&
4198			    pf_src_connlimit(state)) {
4199				REASON_SET(reason, PFRES_SRCLIMIT);
4200				return (PF_DROP);
4201			}
4202		} else if (src->state == TCPS_CLOSING &&
4203		    dst->state == TCPS_ESTABLISHED &&
4204		    dst->seqlo == 0) {
4205			/*
4206			 * Handle the closing of half connections where we
4207			 * don't see the full bidirectional FIN/ACK+ACK
4208			 * handshake.
4209			 */
4210			dst->state = TCPS_CLOSING;
4211		}
4212	}
4213	if (th->th_flags & TH_RST)
4214		src->state = dst->state = TCPS_TIME_WAIT;
4215
4216	/* update expire time */
4217	(*state)->expire = time_uptime;
4218	if (src->state >= TCPS_FIN_WAIT_2 &&
4219	    dst->state >= TCPS_FIN_WAIT_2)
4220		(*state)->timeout = PFTM_TCP_CLOSED;
4221	else if (src->state >= TCPS_CLOSING &&
4222	    dst->state >= TCPS_CLOSING)
4223		(*state)->timeout = PFTM_TCP_FIN_WAIT;
4224	else if (src->state < TCPS_ESTABLISHED ||
4225	    dst->state < TCPS_ESTABLISHED)
4226		(*state)->timeout = PFTM_TCP_OPENING;
4227	else if (src->state >= TCPS_CLOSING ||
4228	    dst->state >= TCPS_CLOSING)
4229		(*state)->timeout = PFTM_TCP_CLOSING;
4230	else
4231		(*state)->timeout = PFTM_TCP_ESTABLISHED;
4232
4233	return (PF_PASS);
4234}
4235
4236static int
4237pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4238    struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4239    u_short *reason)
4240{
4241	struct pf_state_key_cmp	 key;
4242	struct tcphdr		*th = pd->hdr.tcp;
4243	int			 copyback = 0;
4244	struct pf_state_peer	*src, *dst;
4245	struct pf_state_key	*sk;
4246
4247	bzero(&key, sizeof(key));
4248	key.af = pd->af;
4249	key.proto = IPPROTO_TCP;
4250	if (direction == PF_IN)	{	/* wire side, straight */
4251		PF_ACPY(&key.addr[0], pd->src, key.af);
4252		PF_ACPY(&key.addr[1], pd->dst, key.af);
4253		key.port[0] = th->th_sport;
4254		key.port[1] = th->th_dport;
4255	} else {			/* stack side, reverse */
4256		PF_ACPY(&key.addr[1], pd->src, key.af);
4257		PF_ACPY(&key.addr[0], pd->dst, key.af);
4258		key.port[1] = th->th_sport;
4259		key.port[0] = th->th_dport;
4260	}
4261
4262	STATE_LOOKUP(kif, &key, direction, *state, pd);
4263
4264	if (direction == (*state)->direction) {
4265		src = &(*state)->src;
4266		dst = &(*state)->dst;
4267	} else {
4268		src = &(*state)->dst;
4269		dst = &(*state)->src;
4270	}
4271
4272	sk = (*state)->key[pd->didx];
4273
4274	if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4275		if (direction != (*state)->direction) {
4276			REASON_SET(reason, PFRES_SYNPROXY);
4277			return (PF_SYNPROXY_DROP);
4278		}
4279		if (th->th_flags & TH_SYN) {
4280			if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4281				REASON_SET(reason, PFRES_SYNPROXY);
4282				return (PF_DROP);
4283			}
4284			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4285			    pd->src, th->th_dport, th->th_sport,
4286			    (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4287			    TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4288			REASON_SET(reason, PFRES_SYNPROXY);
4289			return (PF_SYNPROXY_DROP);
4290		} else if (!(th->th_flags & TH_ACK) ||
4291		    (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4292		    (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4293			REASON_SET(reason, PFRES_SYNPROXY);
4294			return (PF_DROP);
4295		} else if ((*state)->src_node != NULL &&
4296		    pf_src_connlimit(state)) {
4297			REASON_SET(reason, PFRES_SRCLIMIT);
4298			return (PF_DROP);
4299		} else
4300			(*state)->src.state = PF_TCPS_PROXY_DST;
4301	}
4302	if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4303		if (direction == (*state)->direction) {
4304			if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4305			    (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4306			    (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4307				REASON_SET(reason, PFRES_SYNPROXY);
4308				return (PF_DROP);
4309			}
4310			(*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4311			if ((*state)->dst.seqhi == 1)
4312				(*state)->dst.seqhi = htonl(arc4random());
4313			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4314			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
4315			    sk->port[pd->sidx], sk->port[pd->didx],
4316			    (*state)->dst.seqhi, 0, TH_SYN, 0,
4317			    (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4318			REASON_SET(reason, PFRES_SYNPROXY);
4319			return (PF_SYNPROXY_DROP);
4320		} else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4321		    (TH_SYN|TH_ACK)) ||
4322		    (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4323			REASON_SET(reason, PFRES_SYNPROXY);
4324			return (PF_DROP);
4325		} else {
4326			(*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4327			(*state)->dst.seqlo = ntohl(th->th_seq);
4328			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4329			    pd->src, th->th_dport, th->th_sport,
4330			    ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4331			    TH_ACK, (*state)->src.max_win, 0, 0, 0,
4332			    (*state)->tag, NULL);
4333			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4334			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
4335			    sk->port[pd->sidx], sk->port[pd->didx],
4336			    (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4337			    TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4338			(*state)->src.seqdiff = (*state)->dst.seqhi -
4339			    (*state)->src.seqlo;
4340			(*state)->dst.seqdiff = (*state)->src.seqhi -
4341			    (*state)->dst.seqlo;
4342			(*state)->src.seqhi = (*state)->src.seqlo +
4343			    (*state)->dst.max_win;
4344			(*state)->dst.seqhi = (*state)->dst.seqlo +
4345			    (*state)->src.max_win;
4346			(*state)->src.wscale = (*state)->dst.wscale = 0;
4347			(*state)->src.state = (*state)->dst.state =
4348			    TCPS_ESTABLISHED;
4349			REASON_SET(reason, PFRES_SYNPROXY);
4350			return (PF_SYNPROXY_DROP);
4351		}
4352	}
4353
4354	if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4355	    dst->state >= TCPS_FIN_WAIT_2 &&
4356	    src->state >= TCPS_FIN_WAIT_2) {
4357		if (V_pf_status.debug >= PF_DEBUG_MISC) {
4358			printf("pf: state reuse ");
4359			pf_print_state(*state);
4360			pf_print_flags(th->th_flags);
4361			printf("\n");
4362		}
4363		/* XXX make sure it's the same direction ?? */
4364		(*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4365		pf_unlink_state(*state, PF_ENTER_LOCKED);
4366		*state = NULL;
4367		return (PF_DROP);
4368	}
4369
4370	if ((*state)->state_flags & PFSTATE_SLOPPY) {
4371		if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4372			return (PF_DROP);
4373	} else {
4374		if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4375		    &copyback) == PF_DROP)
4376			return (PF_DROP);
4377	}
4378
4379	/* translate source/destination address, if necessary */
4380	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4381		struct pf_state_key *nk = (*state)->key[pd->didx];
4382
4383		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4384		    nk->port[pd->sidx] != th->th_sport)
4385			pf_change_ap(m, pd->src, &th->th_sport,
4386			    pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4387			    nk->port[pd->sidx], 0, pd->af);
4388
4389		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4390		    nk->port[pd->didx] != th->th_dport)
4391			pf_change_ap(m, pd->dst, &th->th_dport,
4392			    pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4393			    nk->port[pd->didx], 0, pd->af);
4394		copyback = 1;
4395	}
4396
4397	/* Copyback sequence modulation or stateful scrub changes if needed */
4398	if (copyback)
4399		m_copyback(m, off, sizeof(*th), (caddr_t)th);
4400
4401	return (PF_PASS);
4402}
4403
4404static int
4405pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4406    struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4407{
4408	struct pf_state_peer	*src, *dst;
4409	struct pf_state_key_cmp	 key;
4410	struct udphdr		*uh = pd->hdr.udp;
4411
4412	bzero(&key, sizeof(key));
4413	key.af = pd->af;
4414	key.proto = IPPROTO_UDP;
4415	if (direction == PF_IN)	{	/* wire side, straight */
4416		PF_ACPY(&key.addr[0], pd->src, key.af);
4417		PF_ACPY(&key.addr[1], pd->dst, key.af);
4418		key.port[0] = uh->uh_sport;
4419		key.port[1] = uh->uh_dport;
4420	} else {			/* stack side, reverse */
4421		PF_ACPY(&key.addr[1], pd->src, key.af);
4422		PF_ACPY(&key.addr[0], pd->dst, key.af);
4423		key.port[1] = uh->uh_sport;
4424		key.port[0] = uh->uh_dport;
4425	}
4426
4427	STATE_LOOKUP(kif, &key, direction, *state, pd);
4428
4429	if (direction == (*state)->direction) {
4430		src = &(*state)->src;
4431		dst = &(*state)->dst;
4432	} else {
4433		src = &(*state)->dst;
4434		dst = &(*state)->src;
4435	}
4436
4437	/* update states */
4438	if (src->state < PFUDPS_SINGLE)
4439		src->state = PFUDPS_SINGLE;
4440	if (dst->state == PFUDPS_SINGLE)
4441		dst->state = PFUDPS_MULTIPLE;
4442
4443	/* update expire time */
4444	(*state)->expire = time_uptime;
4445	if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4446		(*state)->timeout = PFTM_UDP_MULTIPLE;
4447	else
4448		(*state)->timeout = PFTM_UDP_SINGLE;
4449
4450	/* translate source/destination address, if necessary */
4451	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4452		struct pf_state_key *nk = (*state)->key[pd->didx];
4453
4454		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4455		    nk->port[pd->sidx] != uh->uh_sport)
4456			pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4457			    &uh->uh_sum, &nk->addr[pd->sidx],
4458			    nk->port[pd->sidx], 1, pd->af);
4459
4460		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4461		    nk->port[pd->didx] != uh->uh_dport)
4462			pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4463			    &uh->uh_sum, &nk->addr[pd->didx],
4464			    nk->port[pd->didx], 1, pd->af);
4465		m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4466	}
4467
4468	return (PF_PASS);
4469}
4470
4471static int
4472pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4473    struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4474{
4475	struct pf_addr  *saddr = pd->src, *daddr = pd->dst;
4476	u_int16_t	 icmpid = 0, *icmpsum;
4477	u_int8_t	 icmptype;
4478	int		 state_icmp = 0;
4479	struct pf_state_key_cmp key;
4480
4481	bzero(&key, sizeof(key));
4482	switch (pd->proto) {
4483#ifdef INET
4484	case IPPROTO_ICMP:
4485		icmptype = pd->hdr.icmp->icmp_type;
4486		icmpid = pd->hdr.icmp->icmp_id;
4487		icmpsum = &pd->hdr.icmp->icmp_cksum;
4488
4489		if (icmptype == ICMP_UNREACH ||
4490		    icmptype == ICMP_SOURCEQUENCH ||
4491		    icmptype == ICMP_REDIRECT ||
4492		    icmptype == ICMP_TIMXCEED ||
4493		    icmptype == ICMP_PARAMPROB)
4494			state_icmp++;
4495		break;
4496#endif /* INET */
4497#ifdef INET6
4498	case IPPROTO_ICMPV6:
4499		icmptype = pd->hdr.icmp6->icmp6_type;
4500		icmpid = pd->hdr.icmp6->icmp6_id;
4501		icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4502
4503		if (icmptype == ICMP6_DST_UNREACH ||
4504		    icmptype == ICMP6_PACKET_TOO_BIG ||
4505		    icmptype == ICMP6_TIME_EXCEEDED ||
4506		    icmptype == ICMP6_PARAM_PROB)
4507			state_icmp++;
4508		break;
4509#endif /* INET6 */
4510	}
4511
4512	if (!state_icmp) {
4513
4514		/*
4515		 * ICMP query/reply message not related to a TCP/UDP packet.
4516		 * Search for an ICMP state.
4517		 */
4518		key.af = pd->af;
4519		key.proto = pd->proto;
4520		key.port[0] = key.port[1] = icmpid;
4521		if (direction == PF_IN)	{	/* wire side, straight */
4522			PF_ACPY(&key.addr[0], pd->src, key.af);
4523			PF_ACPY(&key.addr[1], pd->dst, key.af);
4524		} else {			/* stack side, reverse */
4525			PF_ACPY(&key.addr[1], pd->src, key.af);
4526			PF_ACPY(&key.addr[0], pd->dst, key.af);
4527		}
4528
4529		STATE_LOOKUP(kif, &key, direction, *state, pd);
4530
4531		(*state)->expire = time_uptime;
4532		(*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4533
4534		/* translate source/destination address, if necessary */
4535		if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4536			struct pf_state_key *nk = (*state)->key[pd->didx];
4537
4538			switch (pd->af) {
4539#ifdef INET
4540			case AF_INET:
4541				if (PF_ANEQ(pd->src,
4542				    &nk->addr[pd->sidx], AF_INET))
4543					pf_change_a(&saddr->v4.s_addr,
4544					    pd->ip_sum,
4545					    nk->addr[pd->sidx].v4.s_addr, 0);
4546
4547				if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4548				    AF_INET))
4549					pf_change_a(&daddr->v4.s_addr,
4550					    pd->ip_sum,
4551					    nk->addr[pd->didx].v4.s_addr, 0);
4552
4553				if (nk->port[0] !=
4554				    pd->hdr.icmp->icmp_id) {
4555					pd->hdr.icmp->icmp_cksum =
4556					    pf_cksum_fixup(
4557					    pd->hdr.icmp->icmp_cksum, icmpid,
4558					    nk->port[pd->sidx], 0);
4559					pd->hdr.icmp->icmp_id =
4560					    nk->port[pd->sidx];
4561				}
4562
4563				m_copyback(m, off, ICMP_MINLEN,
4564				    (caddr_t )pd->hdr.icmp);
4565				break;
4566#endif /* INET */
4567#ifdef INET6
4568			case AF_INET6:
4569				if (PF_ANEQ(pd->src,
4570				    &nk->addr[pd->sidx], AF_INET6))
4571					pf_change_a6(saddr,
4572					    &pd->hdr.icmp6->icmp6_cksum,
4573					    &nk->addr[pd->sidx], 0);
4574
4575				if (PF_ANEQ(pd->dst,
4576				    &nk->addr[pd->didx], AF_INET6))
4577					pf_change_a6(daddr,
4578					    &pd->hdr.icmp6->icmp6_cksum,
4579					    &nk->addr[pd->didx], 0);
4580
4581				m_copyback(m, off, sizeof(struct icmp6_hdr),
4582				    (caddr_t )pd->hdr.icmp6);
4583				break;
4584#endif /* INET6 */
4585			}
4586		}
4587		return (PF_PASS);
4588
4589	} else {
4590		/*
4591		 * ICMP error message in response to a TCP/UDP packet.
4592		 * Extract the inner TCP/UDP header and search for that state.
4593		 */
4594
4595		struct pf_pdesc	pd2;
4596		bzero(&pd2, sizeof pd2);
4597#ifdef INET
4598		struct ip	h2;
4599#endif /* INET */
4600#ifdef INET6
4601		struct ip6_hdr	h2_6;
4602		int		terminal = 0;
4603#endif /* INET6 */
4604		int		ipoff2 = 0;
4605		int		off2 = 0;
4606
4607		pd2.af = pd->af;
4608		/* Payload packet is from the opposite direction. */
4609		pd2.sidx = (direction == PF_IN) ? 1 : 0;
4610		pd2.didx = (direction == PF_IN) ? 0 : 1;
4611		switch (pd->af) {
4612#ifdef INET
4613		case AF_INET:
4614			/* offset of h2 in mbuf chain */
4615			ipoff2 = off + ICMP_MINLEN;
4616
4617			if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4618			    NULL, reason, pd2.af)) {
4619				DPFPRINTF(PF_DEBUG_MISC,
4620				    ("pf: ICMP error message too short "
4621				    "(ip)\n"));
4622				return (PF_DROP);
4623			}
4624			/*
4625			 * ICMP error messages don't refer to non-first
4626			 * fragments
4627			 */
4628			if (h2.ip_off & htons(IP_OFFMASK)) {
4629				REASON_SET(reason, PFRES_FRAG);
4630				return (PF_DROP);
4631			}
4632
4633			/* offset of protocol header that follows h2 */
4634			off2 = ipoff2 + (h2.ip_hl << 2);
4635
4636			pd2.proto = h2.ip_p;
4637			pd2.src = (struct pf_addr *)&h2.ip_src;
4638			pd2.dst = (struct pf_addr *)&h2.ip_dst;
4639			pd2.ip_sum = &h2.ip_sum;
4640			break;
4641#endif /* INET */
4642#ifdef INET6
4643		case AF_INET6:
4644			ipoff2 = off + sizeof(struct icmp6_hdr);
4645
4646			if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4647			    NULL, reason, pd2.af)) {
4648				DPFPRINTF(PF_DEBUG_MISC,
4649				    ("pf: ICMP error message too short "
4650				    "(ip6)\n"));
4651				return (PF_DROP);
4652			}
4653			pd2.proto = h2_6.ip6_nxt;
4654			pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4655			pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4656			pd2.ip_sum = NULL;
4657			off2 = ipoff2 + sizeof(h2_6);
4658			do {
4659				switch (pd2.proto) {
4660				case IPPROTO_FRAGMENT:
4661					/*
4662					 * ICMPv6 error messages for
4663					 * non-first fragments
4664					 */
4665					REASON_SET(reason, PFRES_FRAG);
4666					return (PF_DROP);
4667				case IPPROTO_AH:
4668				case IPPROTO_HOPOPTS:
4669				case IPPROTO_ROUTING:
4670				case IPPROTO_DSTOPTS: {
4671					/* get next header and header length */
4672					struct ip6_ext opt6;
4673
4674					if (!pf_pull_hdr(m, off2, &opt6,
4675					    sizeof(opt6), NULL, reason,
4676					    pd2.af)) {
4677						DPFPRINTF(PF_DEBUG_MISC,
4678						    ("pf: ICMPv6 short opt\n"));
4679						return (PF_DROP);
4680					}
4681					if (pd2.proto == IPPROTO_AH)
4682						off2 += (opt6.ip6e_len + 2) * 4;
4683					else
4684						off2 += (opt6.ip6e_len + 1) * 8;
4685					pd2.proto = opt6.ip6e_nxt;
4686					/* goto the next header */
4687					break;
4688				}
4689				default:
4690					terminal++;
4691					break;
4692				}
4693			} while (!terminal);
4694			break;
4695#endif /* INET6 */
4696		}
4697
4698		switch (pd2.proto) {
4699		case IPPROTO_TCP: {
4700			struct tcphdr		 th;
4701			u_int32_t		 seq;
4702			struct pf_state_peer	*src, *dst;
4703			u_int8_t		 dws;
4704			int			 copyback = 0;
4705
4706			/*
4707			 * Only the first 8 bytes of the TCP header can be
4708			 * expected. Don't access any TCP header fields after
4709			 * th_seq, an ackskew test is not possible.
4710			 */
4711			if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4712			    pd2.af)) {
4713				DPFPRINTF(PF_DEBUG_MISC,
4714				    ("pf: ICMP error message too short "
4715				    "(tcp)\n"));
4716				return (PF_DROP);
4717			}
4718
4719			key.af = pd2.af;
4720			key.proto = IPPROTO_TCP;
4721			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4722			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4723			key.port[pd2.sidx] = th.th_sport;
4724			key.port[pd2.didx] = th.th_dport;
4725
4726			STATE_LOOKUP(kif, &key, direction, *state, pd);
4727
4728			if (direction == (*state)->direction) {
4729				src = &(*state)->dst;
4730				dst = &(*state)->src;
4731			} else {
4732				src = &(*state)->src;
4733				dst = &(*state)->dst;
4734			}
4735
4736			if (src->wscale && dst->wscale)
4737				dws = dst->wscale & PF_WSCALE_MASK;
4738			else
4739				dws = 0;
4740
4741			/* Demodulate sequence number */
4742			seq = ntohl(th.th_seq) - src->seqdiff;
4743			if (src->seqdiff) {
4744				pf_change_a(&th.th_seq, icmpsum,
4745				    htonl(seq), 0);
4746				copyback = 1;
4747			}
4748
4749			if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4750			    (!SEQ_GEQ(src->seqhi, seq) ||
4751			    !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4752				if (V_pf_status.debug >= PF_DEBUG_MISC) {
4753					printf("pf: BAD ICMP %d:%d ",
4754					    icmptype, pd->hdr.icmp->icmp_code);
4755					pf_print_host(pd->src, 0, pd->af);
4756					printf(" -> ");
4757					pf_print_host(pd->dst, 0, pd->af);
4758					printf(" state: ");
4759					pf_print_state(*state);
4760					printf(" seq=%u\n", seq);
4761				}
4762				REASON_SET(reason, PFRES_BADSTATE);
4763				return (PF_DROP);
4764			} else {
4765				if (V_pf_status.debug >= PF_DEBUG_MISC) {
4766					printf("pf: OK ICMP %d:%d ",
4767					    icmptype, pd->hdr.icmp->icmp_code);
4768					pf_print_host(pd->src, 0, pd->af);
4769					printf(" -> ");
4770					pf_print_host(pd->dst, 0, pd->af);
4771					printf(" state: ");
4772					pf_print_state(*state);
4773					printf(" seq=%u\n", seq);
4774				}
4775			}
4776
4777			/* translate source/destination address, if necessary */
4778			if ((*state)->key[PF_SK_WIRE] !=
4779			    (*state)->key[PF_SK_STACK]) {
4780				struct pf_state_key *nk =
4781				    (*state)->key[pd->didx];
4782
4783				if (PF_ANEQ(pd2.src,
4784				    &nk->addr[pd2.sidx], pd2.af) ||
4785				    nk->port[pd2.sidx] != th.th_sport)
4786					pf_change_icmp(pd2.src, &th.th_sport,
4787					    daddr, &nk->addr[pd2.sidx],
4788					    nk->port[pd2.sidx], NULL,
4789					    pd2.ip_sum, icmpsum,
4790					    pd->ip_sum, 0, pd2.af);
4791
4792				if (PF_ANEQ(pd2.dst,
4793				    &nk->addr[pd2.didx], pd2.af) ||
4794				    nk->port[pd2.didx] != th.th_dport)
4795					pf_change_icmp(pd2.dst, &th.th_dport,
4796					    NULL, /* XXX Inbound NAT? */
4797					    &nk->addr[pd2.didx],
4798					    nk->port[pd2.didx], NULL,
4799					    pd2.ip_sum, icmpsum,
4800					    pd->ip_sum, 0, pd2.af);
4801				copyback = 1;
4802			}
4803
4804			if (copyback) {
4805				switch (pd2.af) {
4806#ifdef INET
4807				case AF_INET:
4808					m_copyback(m, off, ICMP_MINLEN,
4809					    (caddr_t )pd->hdr.icmp);
4810					m_copyback(m, ipoff2, sizeof(h2),
4811					    (caddr_t )&h2);
4812					break;
4813#endif /* INET */
4814#ifdef INET6
4815				case AF_INET6:
4816					m_copyback(m, off,
4817					    sizeof(struct icmp6_hdr),
4818					    (caddr_t )pd->hdr.icmp6);
4819					m_copyback(m, ipoff2, sizeof(h2_6),
4820					    (caddr_t )&h2_6);
4821					break;
4822#endif /* INET6 */
4823				}
4824				m_copyback(m, off2, 8, (caddr_t)&th);
4825			}
4826
4827			return (PF_PASS);
4828			break;
4829		}
4830		case IPPROTO_UDP: {
4831			struct udphdr		uh;
4832
4833			if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4834			    NULL, reason, pd2.af)) {
4835				DPFPRINTF(PF_DEBUG_MISC,
4836				    ("pf: ICMP error message too short "
4837				    "(udp)\n"));
4838				return (PF_DROP);
4839			}
4840
4841			key.af = pd2.af;
4842			key.proto = IPPROTO_UDP;
4843			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4844			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4845			key.port[pd2.sidx] = uh.uh_sport;
4846			key.port[pd2.didx] = uh.uh_dport;
4847
4848			STATE_LOOKUP(kif, &key, direction, *state, pd);
4849
4850			/* translate source/destination address, if necessary */
4851			if ((*state)->key[PF_SK_WIRE] !=
4852			    (*state)->key[PF_SK_STACK]) {
4853				struct pf_state_key *nk =
4854				    (*state)->key[pd->didx];
4855
4856				if (PF_ANEQ(pd2.src,
4857				    &nk->addr[pd2.sidx], pd2.af) ||
4858				    nk->port[pd2.sidx] != uh.uh_sport)
4859					pf_change_icmp(pd2.src, &uh.uh_sport,
4860					    daddr, &nk->addr[pd2.sidx],
4861					    nk->port[pd2.sidx], &uh.uh_sum,
4862					    pd2.ip_sum, icmpsum,
4863					    pd->ip_sum, 1, pd2.af);
4864
4865				if (PF_ANEQ(pd2.dst,
4866				    &nk->addr[pd2.didx], pd2.af) ||
4867				    nk->port[pd2.didx] != uh.uh_dport)
4868					pf_change_icmp(pd2.dst, &uh.uh_dport,
4869					    NULL, /* XXX Inbound NAT? */
4870					    &nk->addr[pd2.didx],
4871					    nk->port[pd2.didx], &uh.uh_sum,
4872					    pd2.ip_sum, icmpsum,
4873					    pd->ip_sum, 1, pd2.af);
4874
4875				switch (pd2.af) {
4876#ifdef INET
4877				case AF_INET:
4878					m_copyback(m, off, ICMP_MINLEN,
4879					    (caddr_t )pd->hdr.icmp);
4880					m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4881					break;
4882#endif /* INET */
4883#ifdef INET6
4884				case AF_INET6:
4885					m_copyback(m, off,
4886					    sizeof(struct icmp6_hdr),
4887					    (caddr_t )pd->hdr.icmp6);
4888					m_copyback(m, ipoff2, sizeof(h2_6),
4889					    (caddr_t )&h2_6);
4890					break;
4891#endif /* INET6 */
4892				}
4893				m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4894			}
4895			return (PF_PASS);
4896			break;
4897		}
4898#ifdef INET
4899		case IPPROTO_ICMP: {
4900			struct icmp		iih;
4901
4902			if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4903			    NULL, reason, pd2.af)) {
4904				DPFPRINTF(PF_DEBUG_MISC,
4905				    ("pf: ICMP error message too short i"
4906				    "(icmp)\n"));
4907				return (PF_DROP);
4908			}
4909
4910			key.af = pd2.af;
4911			key.proto = IPPROTO_ICMP;
4912			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4913			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4914			key.port[0] = key.port[1] = iih.icmp_id;
4915
4916			STATE_LOOKUP(kif, &key, direction, *state, pd);
4917
4918			/* translate source/destination address, if necessary */
4919			if ((*state)->key[PF_SK_WIRE] !=
4920			    (*state)->key[PF_SK_STACK]) {
4921				struct pf_state_key *nk =
4922				    (*state)->key[pd->didx];
4923
4924				if (PF_ANEQ(pd2.src,
4925				    &nk->addr[pd2.sidx], pd2.af) ||
4926				    nk->port[pd2.sidx] != iih.icmp_id)
4927					pf_change_icmp(pd2.src, &iih.icmp_id,
4928					    daddr, &nk->addr[pd2.sidx],
4929					    nk->port[pd2.sidx], NULL,
4930					    pd2.ip_sum, icmpsum,
4931					    pd->ip_sum, 0, AF_INET);
4932
4933				if (PF_ANEQ(pd2.dst,
4934				    &nk->addr[pd2.didx], pd2.af) ||
4935				    nk->port[pd2.didx] != iih.icmp_id)
4936					pf_change_icmp(pd2.dst, &iih.icmp_id,
4937					    NULL, /* XXX Inbound NAT? */
4938					    &nk->addr[pd2.didx],
4939					    nk->port[pd2.didx], NULL,
4940					    pd2.ip_sum, icmpsum,
4941					    pd->ip_sum, 0, AF_INET);
4942
4943				m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
4944				m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4945				m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
4946			}
4947			return (PF_PASS);
4948			break;
4949		}
4950#endif /* INET */
4951#ifdef INET6
4952		case IPPROTO_ICMPV6: {
4953			struct icmp6_hdr	iih;
4954
4955			if (!pf_pull_hdr(m, off2, &iih,
4956			    sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
4957				DPFPRINTF(PF_DEBUG_MISC,
4958				    ("pf: ICMP error message too short "
4959				    "(icmp6)\n"));
4960				return (PF_DROP);
4961			}
4962
4963			key.af = pd2.af;
4964			key.proto = IPPROTO_ICMPV6;
4965			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4966			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4967			key.port[0] = key.port[1] = iih.icmp6_id;
4968
4969			STATE_LOOKUP(kif, &key, direction, *state, pd);
4970
4971			/* translate source/destination address, if necessary */
4972			if ((*state)->key[PF_SK_WIRE] !=
4973			    (*state)->key[PF_SK_STACK]) {
4974				struct pf_state_key *nk =
4975				    (*state)->key[pd->didx];
4976
4977				if (PF_ANEQ(pd2.src,
4978				    &nk->addr[pd2.sidx], pd2.af) ||
4979				    nk->port[pd2.sidx] != iih.icmp6_id)
4980					pf_change_icmp(pd2.src, &iih.icmp6_id,
4981					    daddr, &nk->addr[pd2.sidx],
4982					    nk->port[pd2.sidx], NULL,
4983					    pd2.ip_sum, icmpsum,
4984					    pd->ip_sum, 0, AF_INET6);
4985
4986				if (PF_ANEQ(pd2.dst,
4987				    &nk->addr[pd2.didx], pd2.af) ||
4988				    nk->port[pd2.didx] != iih.icmp6_id)
4989					pf_change_icmp(pd2.dst, &iih.icmp6_id,
4990					    NULL, /* XXX Inbound NAT? */
4991					    &nk->addr[pd2.didx],
4992					    nk->port[pd2.didx], NULL,
4993					    pd2.ip_sum, icmpsum,
4994					    pd->ip_sum, 0, AF_INET6);
4995
4996				m_copyback(m, off, sizeof(struct icmp6_hdr),
4997				    (caddr_t)pd->hdr.icmp6);
4998				m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
4999				m_copyback(m, off2, sizeof(struct icmp6_hdr),
5000				    (caddr_t)&iih);
5001			}
5002			return (PF_PASS);
5003			break;
5004		}
5005#endif /* INET6 */
5006		default: {
5007			key.af = pd2.af;
5008			key.proto = pd2.proto;
5009			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5010			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5011			key.port[0] = key.port[1] = 0;
5012
5013			STATE_LOOKUP(kif, &key, direction, *state, pd);
5014
5015			/* translate source/destination address, if necessary */
5016			if ((*state)->key[PF_SK_WIRE] !=
5017			    (*state)->key[PF_SK_STACK]) {
5018				struct pf_state_key *nk =
5019				    (*state)->key[pd->didx];
5020
5021				if (PF_ANEQ(pd2.src,
5022				    &nk->addr[pd2.sidx], pd2.af))
5023					pf_change_icmp(pd2.src, NULL, daddr,
5024					    &nk->addr[pd2.sidx], 0, NULL,
5025					    pd2.ip_sum, icmpsum,
5026					    pd->ip_sum, 0, pd2.af);
5027
5028				if (PF_ANEQ(pd2.dst,
5029				    &nk->addr[pd2.didx], pd2.af))
5030					pf_change_icmp(pd2.src, NULL,
5031					    NULL, /* XXX Inbound NAT? */
5032					    &nk->addr[pd2.didx], 0, NULL,
5033					    pd2.ip_sum, icmpsum,
5034					    pd->ip_sum, 0, pd2.af);
5035
5036				switch (pd2.af) {
5037#ifdef INET
5038				case AF_INET:
5039					m_copyback(m, off, ICMP_MINLEN,
5040					    (caddr_t)pd->hdr.icmp);
5041					m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5042					break;
5043#endif /* INET */
5044#ifdef INET6
5045				case AF_INET6:
5046					m_copyback(m, off,
5047					    sizeof(struct icmp6_hdr),
5048					    (caddr_t )pd->hdr.icmp6);
5049					m_copyback(m, ipoff2, sizeof(h2_6),
5050					    (caddr_t )&h2_6);
5051					break;
5052#endif /* INET6 */
5053				}
5054			}
5055			return (PF_PASS);
5056			break;
5057		}
5058		}
5059	}
5060}
5061
5062static int
5063pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5064    struct mbuf *m, struct pf_pdesc *pd)
5065{
5066	struct pf_state_peer	*src, *dst;
5067	struct pf_state_key_cmp	 key;
5068
5069	bzero(&key, sizeof(key));
5070	key.af = pd->af;
5071	key.proto = pd->proto;
5072	if (direction == PF_IN)	{
5073		PF_ACPY(&key.addr[0], pd->src, key.af);
5074		PF_ACPY(&key.addr[1], pd->dst, key.af);
5075		key.port[0] = key.port[1] = 0;
5076	} else {
5077		PF_ACPY(&key.addr[1], pd->src, key.af);
5078		PF_ACPY(&key.addr[0], pd->dst, key.af);
5079		key.port[1] = key.port[0] = 0;
5080	}
5081
5082	STATE_LOOKUP(kif, &key, direction, *state, pd);
5083
5084	if (direction == (*state)->direction) {
5085		src = &(*state)->src;
5086		dst = &(*state)->dst;
5087	} else {
5088		src = &(*state)->dst;
5089		dst = &(*state)->src;
5090	}
5091
5092	/* update states */
5093	if (src->state < PFOTHERS_SINGLE)
5094		src->state = PFOTHERS_SINGLE;
5095	if (dst->state == PFOTHERS_SINGLE)
5096		dst->state = PFOTHERS_MULTIPLE;
5097
5098	/* update expire time */
5099	(*state)->expire = time_uptime;
5100	if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5101		(*state)->timeout = PFTM_OTHER_MULTIPLE;
5102	else
5103		(*state)->timeout = PFTM_OTHER_SINGLE;
5104
5105	/* translate source/destination address, if necessary */
5106	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5107		struct pf_state_key *nk = (*state)->key[pd->didx];
5108
5109		KASSERT(nk, ("%s: nk is null", __func__));
5110		KASSERT(pd, ("%s: pd is null", __func__));
5111		KASSERT(pd->src, ("%s: pd->src is null", __func__));
5112		KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5113		switch (pd->af) {
5114#ifdef INET
5115		case AF_INET:
5116			if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5117				pf_change_a(&pd->src->v4.s_addr,
5118				    pd->ip_sum,
5119				    nk->addr[pd->sidx].v4.s_addr,
5120				    0);
5121
5122
5123			if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5124				pf_change_a(&pd->dst->v4.s_addr,
5125				    pd->ip_sum,
5126				    nk->addr[pd->didx].v4.s_addr,
5127				    0);
5128
5129				break;
5130#endif /* INET */
5131#ifdef INET6
5132		case AF_INET6:
5133			if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5134				PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5135
5136			if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5137				PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5138#endif /* INET6 */
5139		}
5140	}
5141	return (PF_PASS);
5142}
5143
5144/*
5145 * ipoff and off are measured from the start of the mbuf chain.
5146 * h must be at "ipoff" on the mbuf chain.
5147 */
5148void *
5149pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5150    u_short *actionp, u_short *reasonp, sa_family_t af)
5151{
5152	switch (af) {
5153#ifdef INET
5154	case AF_INET: {
5155		struct ip	*h = mtod(m, struct ip *);
5156		u_int16_t	 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5157
5158		if (fragoff) {
5159			if (fragoff >= len)
5160				ACTION_SET(actionp, PF_PASS);
5161			else {
5162				ACTION_SET(actionp, PF_DROP);
5163				REASON_SET(reasonp, PFRES_FRAG);
5164			}
5165			return (NULL);
5166		}
5167		if (m->m_pkthdr.len < off + len ||
5168		    ntohs(h->ip_len) < off + len) {
5169			ACTION_SET(actionp, PF_DROP);
5170			REASON_SET(reasonp, PFRES_SHORT);
5171			return (NULL);
5172		}
5173		break;
5174	}
5175#endif /* INET */
5176#ifdef INET6
5177	case AF_INET6: {
5178		struct ip6_hdr	*h = mtod(m, struct ip6_hdr *);
5179
5180		if (m->m_pkthdr.len < off + len ||
5181		    (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5182		    (unsigned)(off + len)) {
5183			ACTION_SET(actionp, PF_DROP);
5184			REASON_SET(reasonp, PFRES_SHORT);
5185			return (NULL);
5186		}
5187		break;
5188	}
5189#endif /* INET6 */
5190	}
5191	m_copydata(m, off, len, p);
5192	return (p);
5193}
5194
5195int
5196pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5197    int rtableid)
5198{
5199#ifdef RADIX_MPATH
5200	struct radix_node_head	*rnh;
5201#endif
5202	struct sockaddr_in	*dst;
5203	int			 ret = 1;
5204	int			 check_mpath;
5205#ifdef INET6
5206	struct sockaddr_in6	*dst6;
5207	struct route_in6	 ro;
5208#else
5209	struct route		 ro;
5210#endif
5211	struct radix_node	*rn;
5212	struct rtentry		*rt;
5213	struct ifnet		*ifp;
5214
5215	check_mpath = 0;
5216#ifdef RADIX_MPATH
5217	/* XXX: stick to table 0 for now */
5218	rnh = rt_tables_get_rnh(0, af);
5219	if (rnh != NULL && rn_mpath_capable(rnh))
5220		check_mpath = 1;
5221#endif
5222	bzero(&ro, sizeof(ro));
5223	switch (af) {
5224	case AF_INET:
5225		dst = satosin(&ro.ro_dst);
5226		dst->sin_family = AF_INET;
5227		dst->sin_len = sizeof(*dst);
5228		dst->sin_addr = addr->v4;
5229		break;
5230#ifdef INET6
5231	case AF_INET6:
5232		/*
5233		 * Skip check for addresses with embedded interface scope,
5234		 * as they would always match anyway.
5235		 */
5236		if (IN6_IS_SCOPE_EMBED(&addr->v6))
5237			goto out;
5238		dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5239		dst6->sin6_family = AF_INET6;
5240		dst6->sin6_len = sizeof(*dst6);
5241		dst6->sin6_addr = addr->v6;
5242		break;
5243#endif /* INET6 */
5244	default:
5245		return (0);
5246	}
5247
5248	/* Skip checks for ipsec interfaces */
5249	if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5250		goto out;
5251
5252	switch (af) {
5253#ifdef INET6
5254	case AF_INET6:
5255		in6_rtalloc_ign(&ro, 0, rtableid);
5256		break;
5257#endif
5258#ifdef INET
5259	case AF_INET:
5260		in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5261		break;
5262#endif
5263	default:
5264		rtalloc_ign((struct route *)&ro, 0);	/* No/default FIB. */
5265		break;
5266	}
5267
5268	if (ro.ro_rt != NULL) {
5269		/* No interface given, this is a no-route check */
5270		if (kif == NULL)
5271			goto out;
5272
5273		if (kif->pfik_ifp == NULL) {
5274			ret = 0;
5275			goto out;
5276		}
5277
5278		/* Perform uRPF check if passed input interface */
5279		ret = 0;
5280		rn = (struct radix_node *)ro.ro_rt;
5281		do {
5282			rt = (struct rtentry *)rn;
5283			ifp = rt->rt_ifp;
5284
5285			if (kif->pfik_ifp == ifp)
5286				ret = 1;
5287#ifdef RADIX_MPATH
5288			rn = rn_mpath_next(rn);
5289#endif
5290		} while (check_mpath == 1 && rn != NULL && ret == 0);
5291	} else
5292		ret = 0;
5293out:
5294	if (ro.ro_rt != NULL)
5295		RTFREE(ro.ro_rt);
5296	return (ret);
5297}
5298
5299#ifdef INET
5300static void
5301pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5302    struct pf_state *s, struct pf_pdesc *pd)
5303{
5304	struct mbuf		*m0, *m1;
5305	struct sockaddr_in	dst;
5306	struct ip		*ip;
5307	struct ifnet		*ifp = NULL;
5308	struct pf_addr		 naddr;
5309	struct pf_src_node	*sn = NULL;
5310	int			 error = 0;
5311	uint16_t		 ip_len, ip_off;
5312
5313	KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5314	KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5315	    __func__));
5316
5317	if ((pd->pf_mtag == NULL &&
5318	    ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5319	    pd->pf_mtag->routed++ > 3) {
5320		m0 = *m;
5321		*m = NULL;
5322		goto bad_locked;
5323	}
5324
5325	if (r->rt == PF_DUPTO) {
5326		if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5327			if (s)
5328				PF_STATE_UNLOCK(s);
5329			return;
5330		}
5331	} else {
5332		if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5333			if (s)
5334				PF_STATE_UNLOCK(s);
5335			return;
5336		}
5337		m0 = *m;
5338	}
5339
5340	ip = mtod(m0, struct ip *);
5341
5342	bzero(&dst, sizeof(dst));
5343	dst.sin_family = AF_INET;
5344	dst.sin_len = sizeof(dst);
5345	dst.sin_addr = ip->ip_dst;
5346
5347	if (r->rt == PF_FASTROUTE) {
5348		struct rtentry *rt;
5349
5350		if (s)
5351			PF_STATE_UNLOCK(s);
5352		rt = rtalloc1_fib(sintosa(&dst), 0, 0, M_GETFIB(m0));
5353		if (rt == NULL) {
5354			KMOD_IPSTAT_INC(ips_noroute);
5355			error = EHOSTUNREACH;
5356			goto bad;
5357		}
5358
5359		ifp = rt->rt_ifp;
5360		counter_u64_add(rt->rt_pksent, 1);
5361
5362		if (rt->rt_flags & RTF_GATEWAY)
5363			bcopy(satosin(rt->rt_gateway), &dst, sizeof(dst));
5364		RTFREE_LOCKED(rt);
5365	} else {
5366		if (TAILQ_EMPTY(&r->rpool.list)) {
5367			DPFPRINTF(PF_DEBUG_URGENT,
5368			    ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5369			goto bad_locked;
5370		}
5371		if (s == NULL) {
5372			pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5373			    &naddr, NULL, &sn);
5374			if (!PF_AZERO(&naddr, AF_INET))
5375				dst.sin_addr.s_addr = naddr.v4.s_addr;
5376			ifp = r->rpool.cur->kif ?
5377			    r->rpool.cur->kif->pfik_ifp : NULL;
5378		} else {
5379			if (!PF_AZERO(&s->rt_addr, AF_INET))
5380				dst.sin_addr.s_addr =
5381				    s->rt_addr.v4.s_addr;
5382			ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5383			PF_STATE_UNLOCK(s);
5384		}
5385	}
5386	if (ifp == NULL)
5387		goto bad;
5388
5389	if (oifp != ifp) {
5390		if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5391			goto bad;
5392		else if (m0 == NULL)
5393			goto done;
5394		if (m0->m_len < sizeof(struct ip)) {
5395			DPFPRINTF(PF_DEBUG_URGENT,
5396			    ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5397			goto bad;
5398		}
5399		ip = mtod(m0, struct ip *);
5400	}
5401
5402	if (ifp->if_flags & IFF_LOOPBACK)
5403		m0->m_flags |= M_SKIP_FIREWALL;
5404
5405	ip_len = ntohs(ip->ip_len);
5406	ip_off = ntohs(ip->ip_off);
5407
5408	/* Copied from FreeBSD 10.0-CURRENT ip_output. */
5409	m0->m_pkthdr.csum_flags |= CSUM_IP;
5410	if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5411		in_delayed_cksum(m0);
5412		m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5413	}
5414#ifdef SCTP
5415	if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5416		sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5417		m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5418	}
5419#endif
5420
5421	/*
5422	 * If small enough for interface, or the interface will take
5423	 * care of the fragmentation for us, we can just send directly.
5424	 */
5425	if (ip_len <= ifp->if_mtu ||
5426	    (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 ||
5427	    ((ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) {
5428		ip->ip_sum = 0;
5429		if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5430			ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5431			m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5432		}
5433		m_clrprotoflags(m0);	/* Avoid confusing lower layers. */
5434		error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5435		goto done;
5436	}
5437
5438	/* Balk when DF bit is set or the interface didn't support TSO. */
5439	if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5440		error = EMSGSIZE;
5441		KMOD_IPSTAT_INC(ips_cantfrag);
5442		if (r->rt != PF_DUPTO) {
5443			icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5444			    ifp->if_mtu);
5445			goto done;
5446		} else
5447			goto bad;
5448	}
5449
5450	error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5451	if (error)
5452		goto bad;
5453
5454	for (; m0; m0 = m1) {
5455		m1 = m0->m_nextpkt;
5456		m0->m_nextpkt = NULL;
5457		if (error == 0) {
5458			m_clrprotoflags(m0);
5459			error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5460		} else
5461			m_freem(m0);
5462	}
5463
5464	if (error == 0)
5465		KMOD_IPSTAT_INC(ips_fragmented);
5466
5467done:
5468	if (r->rt != PF_DUPTO)
5469		*m = NULL;
5470	return;
5471
5472bad_locked:
5473	if (s)
5474		PF_STATE_UNLOCK(s);
5475bad:
5476	m_freem(m0);
5477	goto done;
5478}
5479#endif /* INET */
5480
5481#ifdef INET6
5482static void
5483pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5484    struct pf_state *s, struct pf_pdesc *pd)
5485{
5486	struct mbuf		*m0;
5487	struct sockaddr_in6	dst;
5488	struct ip6_hdr		*ip6;
5489	struct ifnet		*ifp = NULL;
5490	struct pf_addr		 naddr;
5491	struct pf_src_node	*sn = NULL;
5492
5493	KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5494	KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5495	    __func__));
5496
5497	if ((pd->pf_mtag == NULL &&
5498	    ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5499	    pd->pf_mtag->routed++ > 3) {
5500		m0 = *m;
5501		*m = NULL;
5502		goto bad_locked;
5503	}
5504
5505	if (r->rt == PF_DUPTO) {
5506		if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5507			if (s)
5508				PF_STATE_UNLOCK(s);
5509			return;
5510		}
5511	} else {
5512		if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5513			if (s)
5514				PF_STATE_UNLOCK(s);
5515			return;
5516		}
5517		m0 = *m;
5518	}
5519
5520	ip6 = mtod(m0, struct ip6_hdr *);
5521
5522	bzero(&dst, sizeof(dst));
5523	dst.sin6_family = AF_INET6;
5524	dst.sin6_len = sizeof(dst);
5525	dst.sin6_addr = ip6->ip6_dst;
5526
5527	/* Cheat. XXX why only in the v6 case??? */
5528	if (r->rt == PF_FASTROUTE) {
5529		if (s)
5530			PF_STATE_UNLOCK(s);
5531		m0->m_flags |= M_SKIP_FIREWALL;
5532		ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5533		*m = NULL;
5534		return;
5535	}
5536
5537	if (TAILQ_EMPTY(&r->rpool.list)) {
5538		DPFPRINTF(PF_DEBUG_URGENT,
5539		    ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5540		goto bad_locked;
5541	}
5542	if (s == NULL) {
5543		pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5544		    &naddr, NULL, &sn);
5545		if (!PF_AZERO(&naddr, AF_INET6))
5546			PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5547			    &naddr, AF_INET6);
5548		ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5549	} else {
5550		if (!PF_AZERO(&s->rt_addr, AF_INET6))
5551			PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5552			    &s->rt_addr, AF_INET6);
5553		ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5554	}
5555
5556	if (s)
5557		PF_STATE_UNLOCK(s);
5558
5559	if (ifp == NULL)
5560		goto bad;
5561
5562	if (oifp != ifp) {
5563		if (pf_test6(PF_FWD, ifp, &m0, NULL) != PF_PASS)
5564			goto bad;
5565		else if (m0 == NULL)
5566			goto done;
5567		if (m0->m_len < sizeof(struct ip6_hdr)) {
5568			DPFPRINTF(PF_DEBUG_URGENT,
5569			    ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5570			    __func__));
5571			goto bad;
5572		}
5573		ip6 = mtod(m0, struct ip6_hdr *);
5574	}
5575
5576	if (ifp->if_flags & IFF_LOOPBACK)
5577		m0->m_flags |= M_SKIP_FIREWALL;
5578
5579	if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
5580	    ~ifp->if_hwassist) {
5581		uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
5582		in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
5583		m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
5584	}
5585
5586	/*
5587	 * If the packet is too large for the outgoing interface,
5588	 * send back an icmp6 error.
5589	 */
5590	if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5591		dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5592	if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5593		nd6_output(ifp, ifp, m0, &dst, NULL);
5594	else {
5595		in6_ifstat_inc(ifp, ifs6_in_toobig);
5596		if (r->rt != PF_DUPTO)
5597			icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5598		else
5599			goto bad;
5600	}
5601
5602done:
5603	if (r->rt != PF_DUPTO)
5604		*m = NULL;
5605	return;
5606
5607bad_locked:
5608	if (s)
5609		PF_STATE_UNLOCK(s);
5610bad:
5611	m_freem(m0);
5612	goto done;
5613}
5614#endif /* INET6 */
5615
5616/*
5617 * FreeBSD supports cksum offloads for the following drivers.
5618 *  em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
5619 *   ti(4), txp(4), xl(4)
5620 *
5621 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5622 *  network driver performed cksum including pseudo header, need to verify
5623 *   csum_data
5624 * CSUM_DATA_VALID :
5625 *  network driver performed cksum, needs to additional pseudo header
5626 *  cksum computation with partial csum_data(i.e. lack of H/W support for
5627 *  pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5628 *
5629 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5630 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5631 * TCP/UDP layer.
5632 * Also, set csum_data to 0xffff to force cksum validation.
5633 */
5634static int
5635pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5636{
5637	u_int16_t sum = 0;
5638	int hw_assist = 0;
5639	struct ip *ip;
5640
5641	if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5642		return (1);
5643	if (m->m_pkthdr.len < off + len)
5644		return (1);
5645
5646	switch (p) {
5647	case IPPROTO_TCP:
5648		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5649			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5650				sum = m->m_pkthdr.csum_data;
5651			} else {
5652				ip = mtod(m, struct ip *);
5653				sum = in_pseudo(ip->ip_src.s_addr,
5654				ip->ip_dst.s_addr, htonl((u_short)len +
5655				m->m_pkthdr.csum_data + IPPROTO_TCP));
5656			}
5657			sum ^= 0xffff;
5658			++hw_assist;
5659		}
5660		break;
5661	case IPPROTO_UDP:
5662		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5663			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5664				sum = m->m_pkthdr.csum_data;
5665			} else {
5666				ip = mtod(m, struct ip *);
5667				sum = in_pseudo(ip->ip_src.s_addr,
5668				ip->ip_dst.s_addr, htonl((u_short)len +
5669				m->m_pkthdr.csum_data + IPPROTO_UDP));
5670			}
5671			sum ^= 0xffff;
5672			++hw_assist;
5673		}
5674		break;
5675	case IPPROTO_ICMP:
5676#ifdef INET6
5677	case IPPROTO_ICMPV6:
5678#endif /* INET6 */
5679		break;
5680	default:
5681		return (1);
5682	}
5683
5684	if (!hw_assist) {
5685		switch (af) {
5686		case AF_INET:
5687			if (p == IPPROTO_ICMP) {
5688				if (m->m_len < off)
5689					return (1);
5690				m->m_data += off;
5691				m->m_len -= off;
5692				sum = in_cksum(m, len);
5693				m->m_data -= off;
5694				m->m_len += off;
5695			} else {
5696				if (m->m_len < sizeof(struct ip))
5697					return (1);
5698				sum = in4_cksum(m, p, off, len);
5699			}
5700			break;
5701#ifdef INET6
5702		case AF_INET6:
5703			if (m->m_len < sizeof(struct ip6_hdr))
5704				return (1);
5705			sum = in6_cksum(m, p, off, len);
5706			break;
5707#endif /* INET6 */
5708		default:
5709			return (1);
5710		}
5711	}
5712	if (sum) {
5713		switch (p) {
5714		case IPPROTO_TCP:
5715		    {
5716			KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5717			break;
5718		    }
5719		case IPPROTO_UDP:
5720		    {
5721			KMOD_UDPSTAT_INC(udps_badsum);
5722			break;
5723		    }
5724#ifdef INET
5725		case IPPROTO_ICMP:
5726		    {
5727			KMOD_ICMPSTAT_INC(icps_checksum);
5728			break;
5729		    }
5730#endif
5731#ifdef INET6
5732		case IPPROTO_ICMPV6:
5733		    {
5734			KMOD_ICMP6STAT_INC(icp6s_checksum);
5735			break;
5736		    }
5737#endif /* INET6 */
5738		}
5739		return (1);
5740	} else {
5741		if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5742			m->m_pkthdr.csum_flags |=
5743			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5744			m->m_pkthdr.csum_data = 0xffff;
5745		}
5746	}
5747	return (0);
5748}
5749
5750
5751#ifdef INET
5752int
5753pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5754{
5755	struct pfi_kif		*kif;
5756	u_short			 action, reason = 0, log = 0;
5757	struct mbuf		*m = *m0;
5758	struct ip		*h = NULL;
5759	struct m_tag		*ipfwtag;
5760	struct pf_rule		*a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5761	struct pf_state		*s = NULL;
5762	struct pf_ruleset	*ruleset = NULL;
5763	struct pf_pdesc		 pd;
5764	int			 off, dirndx, pqid = 0;
5765
5766	M_ASSERTPKTHDR(m);
5767
5768	if (!V_pf_status.running)
5769		return (PF_PASS);
5770
5771	memset(&pd, 0, sizeof(pd));
5772
5773	kif = (struct pfi_kif *)ifp->if_pf_kif;
5774
5775	if (kif == NULL) {
5776		DPFPRINTF(PF_DEBUG_URGENT,
5777		    ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5778		return (PF_DROP);
5779	}
5780	if (kif->pfik_flags & PFI_IFLAG_SKIP)
5781		return (PF_PASS);
5782
5783	if (m->m_flags & M_SKIP_FIREWALL)
5784		return (PF_PASS);
5785
5786	pd.pf_mtag = pf_find_mtag(m);
5787
5788	PF_RULES_RLOCK();
5789
5790	if (ip_divert_ptr != NULL &&
5791	    ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5792		struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5793		if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5794			if (pd.pf_mtag == NULL &&
5795			    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5796				action = PF_DROP;
5797				goto done;
5798			}
5799			pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5800			m_tag_delete(m, ipfwtag);
5801		}
5802		if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5803			m->m_flags |= M_FASTFWD_OURS;
5804			pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5805		}
5806	} else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5807		/* We do IP header normalization and packet reassembly here */
5808		action = PF_DROP;
5809		goto done;
5810	}
5811	m = *m0;	/* pf_normalize messes with m0 */
5812	h = mtod(m, struct ip *);
5813
5814	off = h->ip_hl << 2;
5815	if (off < (int)sizeof(struct ip)) {
5816		action = PF_DROP;
5817		REASON_SET(&reason, PFRES_SHORT);
5818		log = 1;
5819		goto done;
5820	}
5821
5822	pd.src = (struct pf_addr *)&h->ip_src;
5823	pd.dst = (struct pf_addr *)&h->ip_dst;
5824	pd.sport = pd.dport = NULL;
5825	pd.ip_sum = &h->ip_sum;
5826	pd.proto_sum = NULL;
5827	pd.proto = h->ip_p;
5828	pd.dir = dir;
5829	pd.sidx = (dir == PF_IN) ? 0 : 1;
5830	pd.didx = (dir == PF_IN) ? 1 : 0;
5831	pd.af = AF_INET;
5832	pd.tos = h->ip_tos;
5833	pd.tot_len = ntohs(h->ip_len);
5834
5835	/* handle fragments that didn't get reassembled by normalization */
5836	if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5837		action = pf_test_fragment(&r, dir, kif, m, h,
5838		    &pd, &a, &ruleset);
5839		goto done;
5840	}
5841
5842	switch (h->ip_p) {
5843
5844	case IPPROTO_TCP: {
5845		struct tcphdr	th;
5846
5847		pd.hdr.tcp = &th;
5848		if (!pf_pull_hdr(m, off, &th, sizeof(th),
5849		    &action, &reason, AF_INET)) {
5850			log = action != PF_PASS;
5851			goto done;
5852		}
5853		pd.p_len = pd.tot_len - off - (th.th_off << 2);
5854		if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5855			pqid = 1;
5856		action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5857		if (action == PF_DROP)
5858			goto done;
5859		action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5860		    &reason);
5861		if (action == PF_PASS) {
5862			if (pfsync_update_state_ptr != NULL)
5863				pfsync_update_state_ptr(s);
5864			r = s->rule.ptr;
5865			a = s->anchor.ptr;
5866			log = s->log;
5867		} else if (s == NULL)
5868			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5869			    &a, &ruleset, inp);
5870		break;
5871	}
5872
5873	case IPPROTO_UDP: {
5874		struct udphdr	uh;
5875
5876		pd.hdr.udp = &uh;
5877		if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5878		    &action, &reason, AF_INET)) {
5879			log = action != PF_PASS;
5880			goto done;
5881		}
5882		if (uh.uh_dport == 0 ||
5883		    ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5884		    ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5885			action = PF_DROP;
5886			REASON_SET(&reason, PFRES_SHORT);
5887			goto done;
5888		}
5889		action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5890		if (action == PF_PASS) {
5891			if (pfsync_update_state_ptr != NULL)
5892				pfsync_update_state_ptr(s);
5893			r = s->rule.ptr;
5894			a = s->anchor.ptr;
5895			log = s->log;
5896		} else if (s == NULL)
5897			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5898			    &a, &ruleset, inp);
5899		break;
5900	}
5901
5902	case IPPROTO_ICMP: {
5903		struct icmp	ih;
5904
5905		pd.hdr.icmp = &ih;
5906		if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5907		    &action, &reason, AF_INET)) {
5908			log = action != PF_PASS;
5909			goto done;
5910		}
5911		action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
5912		    &reason);
5913		if (action == PF_PASS) {
5914			if (pfsync_update_state_ptr != NULL)
5915				pfsync_update_state_ptr(s);
5916			r = s->rule.ptr;
5917			a = s->anchor.ptr;
5918			log = s->log;
5919		} else if (s == NULL)
5920			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5921			    &a, &ruleset, inp);
5922		break;
5923	}
5924
5925#ifdef INET6
5926	case IPPROTO_ICMPV6: {
5927		action = PF_DROP;
5928		DPFPRINTF(PF_DEBUG_MISC,
5929		    ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
5930		goto done;
5931	}
5932#endif
5933
5934	default:
5935		action = pf_test_state_other(&s, dir, kif, m, &pd);
5936		if (action == PF_PASS) {
5937			if (pfsync_update_state_ptr != NULL)
5938				pfsync_update_state_ptr(s);
5939			r = s->rule.ptr;
5940			a = s->anchor.ptr;
5941			log = s->log;
5942		} else if (s == NULL)
5943			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5944			    &a, &ruleset, inp);
5945		break;
5946	}
5947
5948done:
5949	PF_RULES_RUNLOCK();
5950	if (action == PF_PASS && h->ip_hl > 5 &&
5951	    !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
5952		action = PF_DROP;
5953		REASON_SET(&reason, PFRES_IPOPTIONS);
5954		log = r->log;
5955		DPFPRINTF(PF_DEBUG_MISC,
5956		    ("pf: dropping packet with ip options\n"));
5957	}
5958
5959	if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
5960		action = PF_DROP;
5961		REASON_SET(&reason, PFRES_MEMORY);
5962	}
5963	if (r->rtableid >= 0)
5964		M_SETFIB(m, r->rtableid);
5965
5966#ifdef ALTQ
5967	if (action == PF_PASS && r->qid) {
5968		if (pd.pf_mtag == NULL &&
5969		    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5970			action = PF_DROP;
5971			REASON_SET(&reason, PFRES_MEMORY);
5972		} else {
5973			if (pqid || (pd.tos & IPTOS_LOWDELAY))
5974				pd.pf_mtag->qid = r->pqid;
5975			else
5976				pd.pf_mtag->qid = r->qid;
5977			/* Add hints for ecn. */
5978			pd.pf_mtag->hdr = h;
5979		}
5980
5981	}
5982#endif /* ALTQ */
5983
5984	/*
5985	 * connections redirected to loopback should not match sockets
5986	 * bound specifically to loopback due to security implications,
5987	 * see tcp_input() and in_pcblookup_listen().
5988	 */
5989	if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5990	    pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5991	    (s->nat_rule.ptr->action == PF_RDR ||
5992	    s->nat_rule.ptr->action == PF_BINAT) &&
5993	    (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
5994		m->m_flags |= M_SKIP_FIREWALL;
5995
5996	if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
5997	    !PACKET_LOOPED(&pd)) {
5998
5999		ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6000		    sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6001		if (ipfwtag != NULL) {
6002			((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6003			    ntohs(r->divert.port);
6004			((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6005
6006			if (s)
6007				PF_STATE_UNLOCK(s);
6008
6009			m_tag_prepend(m, ipfwtag);
6010			if (m->m_flags & M_FASTFWD_OURS) {
6011				if (pd.pf_mtag == NULL &&
6012				    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6013					action = PF_DROP;
6014					REASON_SET(&reason, PFRES_MEMORY);
6015					log = 1;
6016					DPFPRINTF(PF_DEBUG_MISC,
6017					    ("pf: failed to allocate tag\n"));
6018				} else {
6019					pd.pf_mtag->flags |=
6020					    PF_FASTFWD_OURS_PRESENT;
6021					m->m_flags &= ~M_FASTFWD_OURS;
6022				}
6023			}
6024			ip_divert_ptr(*m0, dir ==  PF_IN ? DIR_IN : DIR_OUT);
6025			*m0 = NULL;
6026
6027			return (action);
6028		} else {
6029			/* XXX: ipfw has the same behaviour! */
6030			action = PF_DROP;
6031			REASON_SET(&reason, PFRES_MEMORY);
6032			log = 1;
6033			DPFPRINTF(PF_DEBUG_MISC,
6034			    ("pf: failed to allocate divert tag\n"));
6035		}
6036	}
6037
6038	if (log) {
6039		struct pf_rule *lr;
6040
6041		if (s != NULL && s->nat_rule.ptr != NULL &&
6042		    s->nat_rule.ptr->log & PF_LOG_ALL)
6043			lr = s->nat_rule.ptr;
6044		else
6045			lr = r;
6046		PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6047		    (s == NULL));
6048	}
6049
6050	kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6051	kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
6052
6053	if (action == PF_PASS || r->action == PF_DROP) {
6054		dirndx = (dir == PF_OUT);
6055		r->packets[dirndx]++;
6056		r->bytes[dirndx] += pd.tot_len;
6057		if (a != NULL) {
6058			a->packets[dirndx]++;
6059			a->bytes[dirndx] += pd.tot_len;
6060		}
6061		if (s != NULL) {
6062			if (s->nat_rule.ptr != NULL) {
6063				s->nat_rule.ptr->packets[dirndx]++;
6064				s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6065			}
6066			if (s->src_node != NULL) {
6067				s->src_node->packets[dirndx]++;
6068				s->src_node->bytes[dirndx] += pd.tot_len;
6069			}
6070			if (s->nat_src_node != NULL) {
6071				s->nat_src_node->packets[dirndx]++;
6072				s->nat_src_node->bytes[dirndx] += pd.tot_len;
6073			}
6074			dirndx = (dir == s->direction) ? 0 : 1;
6075			s->packets[dirndx]++;
6076			s->bytes[dirndx] += pd.tot_len;
6077		}
6078		tr = r;
6079		nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6080		if (nr != NULL && r == &V_pf_default_rule)
6081			tr = nr;
6082		if (tr->src.addr.type == PF_ADDR_TABLE)
6083			pfr_update_stats(tr->src.addr.p.tbl,
6084			    (s == NULL) ? pd.src :
6085			    &s->key[(s->direction == PF_IN)]->
6086				addr[(s->direction == PF_OUT)],
6087			    pd.af, pd.tot_len, dir == PF_OUT,
6088			    r->action == PF_PASS, tr->src.neg);
6089		if (tr->dst.addr.type == PF_ADDR_TABLE)
6090			pfr_update_stats(tr->dst.addr.p.tbl,
6091			    (s == NULL) ? pd.dst :
6092			    &s->key[(s->direction == PF_IN)]->
6093				addr[(s->direction == PF_IN)],
6094			    pd.af, pd.tot_len, dir == PF_OUT,
6095			    r->action == PF_PASS, tr->dst.neg);
6096	}
6097
6098	switch (action) {
6099	case PF_SYNPROXY_DROP:
6100		m_freem(*m0);
6101	case PF_DEFER:
6102		*m0 = NULL;
6103		action = PF_PASS;
6104		break;
6105	case PF_DROP:
6106		m_freem(*m0);
6107		*m0 = NULL;
6108		break;
6109	default:
6110		/* pf_route() returns unlocked. */
6111		if (r->rt) {
6112			pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6113			return (action);
6114		}
6115		break;
6116	}
6117	if (s)
6118		PF_STATE_UNLOCK(s);
6119
6120	return (action);
6121}
6122#endif /* INET */
6123
6124#ifdef INET6
6125int
6126pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6127{
6128	struct pfi_kif		*kif;
6129	u_short			 action, reason = 0, log = 0;
6130	struct mbuf		*m = *m0, *n = NULL;
6131	struct m_tag		*mtag;
6132	struct ip6_hdr		*h = NULL;
6133	struct pf_rule		*a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6134	struct pf_state		*s = NULL;
6135	struct pf_ruleset	*ruleset = NULL;
6136	struct pf_pdesc		 pd;
6137	int			 off, terminal = 0, dirndx, rh_cnt = 0;
6138	int			 fwdir = dir;
6139
6140	M_ASSERTPKTHDR(m);
6141
6142	/* Detect packet forwarding.
6143	 * If the input interface is different from the output interface we're
6144	 * forwarding.
6145	 * We do need to be careful about bridges. If the
6146	 * net.link.bridge.pfil_bridge sysctl is set we can be filtering on a
6147	 * bridge, so if the input interface is a bridge member and the output
6148	 * interface is its bridge we're not actually forwarding but bridging.
6149	 */
6150	if (dir == PF_OUT && m->m_pkthdr.rcvif && ifp != m->m_pkthdr.rcvif
6151	    && (m->m_pkthdr.rcvif->if_bridge == NULL
6152	        || m->m_pkthdr.rcvif->if_bridge != ifp->if_softc))
6153		fwdir = PF_FWD;
6154
6155	if (!V_pf_status.running)
6156		return (PF_PASS);
6157
6158	memset(&pd, 0, sizeof(pd));
6159	pd.pf_mtag = pf_find_mtag(m);
6160
6161	if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6162		return (PF_PASS);
6163
6164	kif = (struct pfi_kif *)ifp->if_pf_kif;
6165	if (kif == NULL) {
6166		DPFPRINTF(PF_DEBUG_URGENT,
6167		    ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6168		return (PF_DROP);
6169	}
6170	if (kif->pfik_flags & PFI_IFLAG_SKIP)
6171		return (PF_PASS);
6172
6173	if (m->m_flags & M_SKIP_FIREWALL)
6174		return (PF_PASS);
6175
6176	PF_RULES_RLOCK();
6177
6178	/* We do IP header normalization and packet reassembly here */
6179	if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6180		action = PF_DROP;
6181		goto done;
6182	}
6183	m = *m0;	/* pf_normalize messes with m0 */
6184	h = mtod(m, struct ip6_hdr *);
6185
6186#if 1
6187	/*
6188	 * we do not support jumbogram yet.  if we keep going, zero ip6_plen
6189	 * will do something bad, so drop the packet for now.
6190	 */
6191	if (htons(h->ip6_plen) == 0) {
6192		action = PF_DROP;
6193		REASON_SET(&reason, PFRES_NORM);	/*XXX*/
6194		goto done;
6195	}
6196#endif
6197
6198	pd.src = (struct pf_addr *)&h->ip6_src;
6199	pd.dst = (struct pf_addr *)&h->ip6_dst;
6200	pd.sport = pd.dport = NULL;
6201	pd.ip_sum = NULL;
6202	pd.proto_sum = NULL;
6203	pd.dir = dir;
6204	pd.sidx = (dir == PF_IN) ? 0 : 1;
6205	pd.didx = (dir == PF_IN) ? 1 : 0;
6206	pd.af = AF_INET6;
6207	pd.tos = 0;
6208	pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6209
6210	off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6211	pd.proto = h->ip6_nxt;
6212	do {
6213		switch (pd.proto) {
6214		case IPPROTO_FRAGMENT:
6215			action = pf_test_fragment(&r, dir, kif, m, h,
6216			    &pd, &a, &ruleset);
6217			if (action == PF_DROP)
6218				REASON_SET(&reason, PFRES_FRAG);
6219			goto done;
6220		case IPPROTO_ROUTING: {
6221			struct ip6_rthdr rthdr;
6222
6223			if (rh_cnt++) {
6224				DPFPRINTF(PF_DEBUG_MISC,
6225				    ("pf: IPv6 more than one rthdr\n"));
6226				action = PF_DROP;
6227				REASON_SET(&reason, PFRES_IPOPTIONS);
6228				log = 1;
6229				goto done;
6230			}
6231			if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6232			    &reason, pd.af)) {
6233				DPFPRINTF(PF_DEBUG_MISC,
6234				    ("pf: IPv6 short rthdr\n"));
6235				action = PF_DROP;
6236				REASON_SET(&reason, PFRES_SHORT);
6237				log = 1;
6238				goto done;
6239			}
6240			if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6241				DPFPRINTF(PF_DEBUG_MISC,
6242				    ("pf: IPv6 rthdr0\n"));
6243				action = PF_DROP;
6244				REASON_SET(&reason, PFRES_IPOPTIONS);
6245				log = 1;
6246				goto done;
6247			}
6248			/* FALLTHROUGH */
6249		}
6250		case IPPROTO_AH:
6251		case IPPROTO_HOPOPTS:
6252		case IPPROTO_DSTOPTS: {
6253			/* get next header and header length */
6254			struct ip6_ext	opt6;
6255
6256			if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6257			    NULL, &reason, pd.af)) {
6258				DPFPRINTF(PF_DEBUG_MISC,
6259				    ("pf: IPv6 short opt\n"));
6260				action = PF_DROP;
6261				log = 1;
6262				goto done;
6263			}
6264			if (pd.proto == IPPROTO_AH)
6265				off += (opt6.ip6e_len + 2) * 4;
6266			else
6267				off += (opt6.ip6e_len + 1) * 8;
6268			pd.proto = opt6.ip6e_nxt;
6269			/* goto the next header */
6270			break;
6271		}
6272		default:
6273			terminal++;
6274			break;
6275		}
6276	} while (!terminal);
6277
6278	/* if there's no routing header, use unmodified mbuf for checksumming */
6279	if (!n)
6280		n = m;
6281
6282	switch (pd.proto) {
6283
6284	case IPPROTO_TCP: {
6285		struct tcphdr	th;
6286
6287		pd.hdr.tcp = &th;
6288		if (!pf_pull_hdr(m, off, &th, sizeof(th),
6289		    &action, &reason, AF_INET6)) {
6290			log = action != PF_PASS;
6291			goto done;
6292		}
6293		pd.p_len = pd.tot_len - off - (th.th_off << 2);
6294		action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6295		if (action == PF_DROP)
6296			goto done;
6297		action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6298		    &reason);
6299		if (action == PF_PASS) {
6300			if (pfsync_update_state_ptr != NULL)
6301				pfsync_update_state_ptr(s);
6302			r = s->rule.ptr;
6303			a = s->anchor.ptr;
6304			log = s->log;
6305		} else if (s == NULL)
6306			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6307			    &a, &ruleset, inp);
6308		break;
6309	}
6310
6311	case IPPROTO_UDP: {
6312		struct udphdr	uh;
6313
6314		pd.hdr.udp = &uh;
6315		if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6316		    &action, &reason, AF_INET6)) {
6317			log = action != PF_PASS;
6318			goto done;
6319		}
6320		if (uh.uh_dport == 0 ||
6321		    ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6322		    ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6323			action = PF_DROP;
6324			REASON_SET(&reason, PFRES_SHORT);
6325			goto done;
6326		}
6327		action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6328		if (action == PF_PASS) {
6329			if (pfsync_update_state_ptr != NULL)
6330				pfsync_update_state_ptr(s);
6331			r = s->rule.ptr;
6332			a = s->anchor.ptr;
6333			log = s->log;
6334		} else if (s == NULL)
6335			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6336			    &a, &ruleset, inp);
6337		break;
6338	}
6339
6340	case IPPROTO_ICMP: {
6341		action = PF_DROP;
6342		DPFPRINTF(PF_DEBUG_MISC,
6343		    ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6344		goto done;
6345	}
6346
6347	case IPPROTO_ICMPV6: {
6348		struct icmp6_hdr	ih;
6349
6350		pd.hdr.icmp6 = &ih;
6351		if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6352		    &action, &reason, AF_INET6)) {
6353			log = action != PF_PASS;
6354			goto done;
6355		}
6356		action = pf_test_state_icmp(&s, dir, kif,
6357		    m, off, h, &pd, &reason);
6358		if (action == PF_PASS) {
6359			if (pfsync_update_state_ptr != NULL)
6360				pfsync_update_state_ptr(s);
6361			r = s->rule.ptr;
6362			a = s->anchor.ptr;
6363			log = s->log;
6364		} else if (s == NULL)
6365			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6366			    &a, &ruleset, inp);
6367		break;
6368	}
6369
6370	default:
6371		action = pf_test_state_other(&s, dir, kif, m, &pd);
6372		if (action == PF_PASS) {
6373			if (pfsync_update_state_ptr != NULL)
6374				pfsync_update_state_ptr(s);
6375			r = s->rule.ptr;
6376			a = s->anchor.ptr;
6377			log = s->log;
6378		} else if (s == NULL)
6379			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6380			    &a, &ruleset, inp);
6381		break;
6382	}
6383
6384done:
6385	PF_RULES_RUNLOCK();
6386	if (n != m) {
6387		m_freem(n);
6388		n = NULL;
6389	}
6390
6391	/* handle dangerous IPv6 extension headers. */
6392	if (action == PF_PASS && rh_cnt &&
6393	    !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6394		action = PF_DROP;
6395		REASON_SET(&reason, PFRES_IPOPTIONS);
6396		log = r->log;
6397		DPFPRINTF(PF_DEBUG_MISC,
6398		    ("pf: dropping packet with dangerous v6 headers\n"));
6399	}
6400
6401	if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6402		action = PF_DROP;
6403		REASON_SET(&reason, PFRES_MEMORY);
6404	}
6405	if (r->rtableid >= 0)
6406		M_SETFIB(m, r->rtableid);
6407
6408#ifdef ALTQ
6409	if (action == PF_PASS && r->qid) {
6410		if (pd.pf_mtag == NULL &&
6411		    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6412			action = PF_DROP;
6413			REASON_SET(&reason, PFRES_MEMORY);
6414		} else {
6415			if (pd.tos & IPTOS_LOWDELAY)
6416				pd.pf_mtag->qid = r->pqid;
6417			else
6418				pd.pf_mtag->qid = r->qid;
6419			/* Add hints for ecn. */
6420			pd.pf_mtag->hdr = h;
6421		}
6422	}
6423#endif /* ALTQ */
6424
6425	if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6426	    pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6427	    (s->nat_rule.ptr->action == PF_RDR ||
6428	    s->nat_rule.ptr->action == PF_BINAT) &&
6429	    IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6430		m->m_flags |= M_SKIP_FIREWALL;
6431
6432	/* XXX: Anybody working on it?! */
6433	if (r->divert.port)
6434		printf("pf: divert(9) is not supported for IPv6\n");
6435
6436	if (log) {
6437		struct pf_rule *lr;
6438
6439		if (s != NULL && s->nat_rule.ptr != NULL &&
6440		    s->nat_rule.ptr->log & PF_LOG_ALL)
6441			lr = s->nat_rule.ptr;
6442		else
6443			lr = r;
6444		PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6445		    &pd, (s == NULL));
6446	}
6447
6448	kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6449	kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6450
6451	if (action == PF_PASS || r->action == PF_DROP) {
6452		dirndx = (dir == PF_OUT);
6453		r->packets[dirndx]++;
6454		r->bytes[dirndx] += pd.tot_len;
6455		if (a != NULL) {
6456			a->packets[dirndx]++;
6457			a->bytes[dirndx] += pd.tot_len;
6458		}
6459		if (s != NULL) {
6460			if (s->nat_rule.ptr != NULL) {
6461				s->nat_rule.ptr->packets[dirndx]++;
6462				s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6463			}
6464			if (s->src_node != NULL) {
6465				s->src_node->packets[dirndx]++;
6466				s->src_node->bytes[dirndx] += pd.tot_len;
6467			}
6468			if (s->nat_src_node != NULL) {
6469				s->nat_src_node->packets[dirndx]++;
6470				s->nat_src_node->bytes[dirndx] += pd.tot_len;
6471			}
6472			dirndx = (dir == s->direction) ? 0 : 1;
6473			s->packets[dirndx]++;
6474			s->bytes[dirndx] += pd.tot_len;
6475		}
6476		tr = r;
6477		nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6478		if (nr != NULL && r == &V_pf_default_rule)
6479			tr = nr;
6480		if (tr->src.addr.type == PF_ADDR_TABLE)
6481			pfr_update_stats(tr->src.addr.p.tbl,
6482			    (s == NULL) ? pd.src :
6483			    &s->key[(s->direction == PF_IN)]->addr[0],
6484			    pd.af, pd.tot_len, dir == PF_OUT,
6485			    r->action == PF_PASS, tr->src.neg);
6486		if (tr->dst.addr.type == PF_ADDR_TABLE)
6487			pfr_update_stats(tr->dst.addr.p.tbl,
6488			    (s == NULL) ? pd.dst :
6489			    &s->key[(s->direction == PF_IN)]->addr[1],
6490			    pd.af, pd.tot_len, dir == PF_OUT,
6491			    r->action == PF_PASS, tr->dst.neg);
6492	}
6493
6494	switch (action) {
6495	case PF_SYNPROXY_DROP:
6496		m_freem(*m0);
6497	case PF_DEFER:
6498		*m0 = NULL;
6499		action = PF_PASS;
6500		break;
6501	case PF_DROP:
6502		m_freem(*m0);
6503		*m0 = NULL;
6504		break;
6505	default:
6506		/* pf_route6() returns unlocked. */
6507		if (r->rt) {
6508			pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
6509			return (action);
6510		}
6511		break;
6512	}
6513
6514	if (s)
6515		PF_STATE_UNLOCK(s);
6516
6517	/* If reassembled packet passed, create new fragments. */
6518	if (action == PF_PASS && *m0 && fwdir == PF_FWD &&
6519	    (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
6520		action = pf_refragment6(ifp, m0, mtag);
6521
6522	return (action);
6523}
6524#endif /* INET6 */
6525