pf_norm.c revision 289703
160786Sps/*-
2240121Sdelphij * Copyright 2001 Niels Provos <provos@citi.umich.edu>
360786Sps * Copyright 2011 Alexander Bluhm <bluhm@openbsd.org>
460786Sps * All rights reserved.
560786Sps *
660786Sps * Redistribution and use in source and binary forms, with or without
7240121Sdelphij * modification, are permitted provided that the following conditions
860786Sps * are met:
960786Sps * 1. Redistributions of source code must retain the above copyright
1060786Sps *    notice, this list of conditions and the following disclaimer.
1160786Sps * 2. Redistributions in binary form must reproduce the above copyright
1260786Sps *    notice, this list of conditions and the following disclaimer in the
1360786Sps *    documentation and/or other materials provided with the distribution.
1460786Sps *
1560786Sps * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1660786Sps * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1760786Sps * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1860786Sps * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1960786Sps * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2060786Sps * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2160786Sps * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2260786Sps * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2360786Sps * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2460786Sps * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2560786Sps *
2660786Sps *	$OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
2760786Sps */
2860786Sps
2960786Sps#include <sys/cdefs.h>
3060786Sps__FBSDID("$FreeBSD: stable/10/sys/netpfil/pf/pf_norm.c 289703 2015-10-21 15:32:21Z kp $");
3160786Sps
3260786Sps#include "opt_inet.h"
3360786Sps#include "opt_inet6.h"
3460786Sps#include "opt_pf.h"
3560786Sps
3660786Sps#include <sys/param.h>
3760786Sps#include <sys/lock.h>
3860786Sps#include <sys/mbuf.h>
3960786Sps#include <sys/mutex.h>
4060786Sps#include <sys/refcount.h>
4160786Sps#include <sys/rwlock.h>
4260786Sps#include <sys/socket.h>
4360786Sps
4460786Sps#include <net/if.h>
4560786Sps#include <net/vnet.h>
4660786Sps#include <net/pfvar.h>
4760786Sps#include <net/if_pflog.h>
4860786Sps
4960786Sps#include <netinet/in.h>
5060786Sps#include <netinet/ip.h>
5160786Sps#include <netinet/ip_var.h>
5260786Sps#include <netinet6/ip6_var.h>
5360786Sps#include <netinet/tcp.h>
5460786Sps#include <netinet/tcp_fsm.h>
5560786Sps#include <netinet/tcp_seq.h>
5660786Sps
5760786Sps#ifdef INET6
5860786Sps#include <netinet/ip6.h>
5960786Sps#endif /* INET6 */
6060786Sps
6160786Spsstruct pf_frent {
6260786Sps	TAILQ_ENTRY(pf_frent)	fr_next;
6360786Sps	struct mbuf	*fe_m;
6460786Sps	uint16_t	fe_hdrlen;	/* ipv4 header lenght with ip options
6560786Sps					   ipv6, extension, fragment header */
6660786Sps	uint16_t	fe_extoff;	/* last extension header offset or 0 */
6760786Sps	uint16_t	fe_len;		/* fragment length */
6860786Sps	uint16_t	fe_off;		/* fragment offset */
6960786Sps	uint16_t	fe_mff;		/* more fragment flag */
7060786Sps};
7160786Sps
7260786Spsstruct pf_fragment_cmp {
7360786Sps	struct pf_addr	frc_src;
7460786Sps	struct pf_addr	frc_dst;
7560786Sps	uint32_t	frc_id;
7660786Sps	sa_family_t	frc_af;
7760786Sps	uint8_t		frc_proto;
7860786Sps};
7960786Sps
8060786Spsstruct pf_fragment {
8160786Sps	struct pf_fragment_cmp	fr_key;
8260786Sps#define fr_src	fr_key.frc_src
8360786Sps#define fr_dst	fr_key.frc_dst
8460786Sps#define fr_id	fr_key.frc_id
8560786Sps#define fr_af	fr_key.frc_af
8660786Sps#define fr_proto	fr_key.frc_proto
8760786Sps
8860786Sps	RB_ENTRY(pf_fragment) fr_entry;
8960786Sps	TAILQ_ENTRY(pf_fragment) frag_next;
9060786Sps	uint8_t		fr_flags;	/* status flags */
9160786Sps#define PFFRAG_SEENLAST		0x0001	/* Seen the last fragment for this */
9260786Sps#define PFFRAG_NOBUFFER		0x0002	/* Non-buffering fragment cache */
9360786Sps#define PFFRAG_DROP		0x0004	/* Drop all fragments */
9460786Sps#define BUFFER_FRAGMENTS(fr)	(!((fr)->fr_flags & PFFRAG_NOBUFFER))
9560786Sps	uint16_t	fr_max;		/* fragment data max */
9660786Sps	uint32_t	fr_timeout;
9760786Sps	uint16_t	fr_maxlen;	/* maximum length of single fragment */
9860786Sps	TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
9960786Sps};
10060786Sps
10160786Spsstruct pf_fragment_tag {
10260786Sps	uint16_t	ft_hdrlen;	/* header length of reassembled pkt */
10360786Sps	uint16_t	ft_extoff;	/* last extension header offset or 0 */
10460786Sps	uint16_t	ft_maxlen;	/* maximum fragment payload length */
10560786Sps	uint32_t	ft_id;		/* fragment id */
10660786Sps};
10760786Sps
10860786Spsstatic struct mtx pf_frag_mtx;
10960786Sps#define PF_FRAG_LOCK()		mtx_lock(&pf_frag_mtx)
11060786Sps#define PF_FRAG_UNLOCK()	mtx_unlock(&pf_frag_mtx)
11160786Sps#define PF_FRAG_ASSERT()	mtx_assert(&pf_frag_mtx, MA_OWNED)
112161475Sdelphij
113161475SdelphijVNET_DEFINE(uma_zone_t, pf_state_scrub_z);	/* XXX: shared with pfsync */
11460786Sps
11560786Spsstatic VNET_DEFINE(uma_zone_t, pf_frent_z);
11660786Sps#define	V_pf_frent_z	VNET(pf_frent_z)
11760786Spsstatic VNET_DEFINE(uma_zone_t, pf_frag_z);
118161475Sdelphij#define	V_pf_frag_z	VNET(pf_frag_z)
11960786Sps
12060786SpsTAILQ_HEAD(pf_fragqueue, pf_fragment);
12160786SpsTAILQ_HEAD(pf_cachequeue, pf_fragment);
12260786Spsstatic VNET_DEFINE(struct pf_fragqueue,	pf_fragqueue);
12360786Sps#define	V_pf_fragqueue			VNET(pf_fragqueue)
12460786Spsstatic VNET_DEFINE(struct pf_cachequeue,	pf_cachequeue);
12560786Sps#define	V_pf_cachequeue			VNET(pf_cachequeue)
12660786SpsRB_HEAD(pf_frag_tree, pf_fragment);
12760786Spsstatic VNET_DEFINE(struct pf_frag_tree,	pf_frag_tree);
12860786Sps#define	V_pf_frag_tree			VNET(pf_frag_tree)
12960786Spsstatic VNET_DEFINE(struct pf_frag_tree,	pf_cache_tree);
13060786Sps#define	V_pf_cache_tree			VNET(pf_cache_tree)
13160786Spsstatic int		 pf_frag_compare(struct pf_fragment *,
13260786Sps			    struct pf_fragment *);
13360786Spsstatic RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
13460786Spsstatic RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
13560786Sps
13660786Spsstatic void	pf_flush_fragments(void);
13760786Spsstatic void	pf_free_fragment(struct pf_fragment *);
13860786Spsstatic void	pf_remove_fragment(struct pf_fragment *);
13960786Spsstatic int	pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
14060786Sps		    struct tcphdr *, int, sa_family_t);
14160786Spsstatic struct pf_frent *pf_create_fragment(u_short *);
14260786Spsstatic struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
14360786Sps		    struct pf_frag_tree *tree);
14460786Spsstatic struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
14560786Sps		    struct pf_frent *, u_short *);
14660786Spsstatic int	pf_isfull_fragment(struct pf_fragment *);
14760786Spsstatic struct mbuf *pf_join_fragment(struct pf_fragment *);
14860786Sps#ifdef INET
14960786Spsstatic void	pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
15060786Spsstatic int	pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
15160786Spsstatic struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
15260786Sps		    struct pf_fragment **, int, int, int *);
15360786Sps#endif	/* INET */
15460786Sps#ifdef INET6
15560786Spsstatic int	pf_reassemble6(struct mbuf **, struct ip6_hdr *,
156128345Stjr		    struct ip6_frag *, uint16_t, uint16_t, u_short *);
157128345Stjrstatic void	pf_scrub_ip6(struct mbuf **, uint8_t);
158128345Stjr#endif	/* INET6 */
159128345Stjr
160128345Stjr#define	DPFPRINTF(x) do {				\
161128345Stjr	if (V_pf_status.debug >= PF_DEBUG_MISC) {	\
16260786Sps		printf("%s: ", __func__);		\
16360786Sps		printf x ;				\
16460786Sps	}						\
16560786Sps} while(0)
16660786Sps
16760786Sps#ifdef INET
16860786Spsstatic void
16960786Spspf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
170251154Sdelphij{
17160786Sps
17260786Sps	key->frc_src.v4 = ip->ip_src;
17360786Sps	key->frc_dst.v4 = ip->ip_dst;
17460786Sps	key->frc_af = AF_INET;
17560786Sps	key->frc_proto = ip->ip_p;
17660786Sps	key->frc_id = ip->ip_id;
17760786Sps}
17860786Sps#endif	/* INET */
17989019Sps
180251154Sdelphijvoid
181251154Sdelphijpf_normalize_init(void)
182251154Sdelphij{
183251154Sdelphij
184251154Sdelphij	V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
185251154Sdelphij	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
186251154Sdelphij	V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
187251154Sdelphij	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
188251154Sdelphij	V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
189251154Sdelphij	    sizeof(struct pf_state_scrub),  NULL, NULL, NULL, NULL,
190251154Sdelphij	    UMA_ALIGN_PTR, 0);
19160786Sps
19260786Sps	V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
19360786Sps	V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
19460786Sps	uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
19560786Sps	uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
19660786Sps
19760786Sps	mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
19860786Sps
19960786Sps	TAILQ_INIT(&V_pf_fragqueue);
20060786Sps	TAILQ_INIT(&V_pf_cachequeue);
20160786Sps}
20260786Sps
20360786Spsvoid
20460786Spspf_normalize_cleanup(void)
20560786Sps{
20660786Sps
20760786Sps	uma_zdestroy(V_pf_state_scrub_z);
20860786Sps	uma_zdestroy(V_pf_frent_z);
20960786Sps	uma_zdestroy(V_pf_frag_z);
21060786Sps
21160786Sps	mtx_destroy(&pf_frag_mtx);
21260786Sps}
21360786Sps
21460786Spsstatic int
21560786Spspf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
21660786Sps{
21760786Sps	int	diff;
21860786Sps
21960786Sps	if ((diff = a->fr_id - b->fr_id) != 0)
22060786Sps		return (diff);
22160786Sps	if ((diff = a->fr_proto - b->fr_proto) != 0)
22260786Sps		return (diff);
22360786Sps	if ((diff = a->fr_af - b->fr_af) != 0)
22460786Sps		return (diff);
22560786Sps	if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
22660786Sps		return (diff);
22760786Sps	if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
22860786Sps		return (diff);
22960786Sps	return (0);
23060786Sps}
23160786Sps
23260786Spsvoid
23360786Spspf_purge_expired_fragments(void)
23460786Sps{
23560786Sps	struct pf_fragment	*frag;
23660786Sps	u_int32_t		 expire = time_uptime -
23760786Sps				    V_pf_default_rule.timeout[PFTM_FRAG];
23860786Sps
23960786Sps	PF_FRAG_LOCK();
24060786Sps	while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
24160786Sps		KASSERT((BUFFER_FRAGMENTS(frag)),
24260786Sps		    ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
24360786Sps		if (frag->fr_timeout > expire)
24460786Sps			break;
24560786Sps
24660786Sps		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
24760786Sps		pf_free_fragment(frag);
24860786Sps	}
24989019Sps
25060786Sps	while ((frag = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue)) != NULL) {
25160786Sps		KASSERT((!BUFFER_FRAGMENTS(frag)),
25260786Sps		    ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__));
25360786Sps		if (frag->fr_timeout > expire)
25460786Sps			break;
25560786Sps
25660786Sps		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
25760786Sps		pf_free_fragment(frag);
25860786Sps		KASSERT((TAILQ_EMPTY(&V_pf_cachequeue) ||
25960786Sps		    TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue) != frag),
26060786Sps		    ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
26160786Sps		    __FUNCTION__));
26260786Sps	}
26360786Sps	PF_FRAG_UNLOCK();
26489019Sps}
26560786Sps
26660786Sps/*
26760786Sps * Try to flush old fragments to make space for new ones
26860786Sps */
26963128Spsstatic void
27063128Spspf_flush_fragments(void)
27163128Sps{
27263128Sps	struct pf_fragment	*frag, *cache;
27363128Sps	int			 goal;
27463128Sps
27563128Sps	PF_FRAG_ASSERT();
27660786Sps
27760786Sps	goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
27860786Sps	DPFPRINTF(("trying to free %d frag entriess\n", goal));
27960786Sps	while (goal < uma_zone_get_cur(V_pf_frent_z)) {
28060786Sps		frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
28160786Sps		if (frag)
28260786Sps			pf_free_fragment(frag);
28360786Sps		cache = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue);
28460786Sps		if (cache)
285161475Sdelphij			pf_free_fragment(cache);
286221715Sdelphij		if (frag == NULL && cache == NULL)
287161475Sdelphij			break;
28860786Sps	}
28960786Sps}
29060786Sps
291173682Sdelphij/* Frees the fragments and all associated entries */
292173682Sdelphijstatic void
293173682Sdelphijpf_free_fragment(struct pf_fragment *frag)
29460786Sps{
29560786Sps	struct pf_frent		*frent;
29660786Sps
29760786Sps	PF_FRAG_ASSERT();
29860786Sps
29960786Sps	/* Free all fragments */
300221715Sdelphij	if (BUFFER_FRAGMENTS(frag)) {
301221715Sdelphij		for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
302221715Sdelphij		    frent = TAILQ_FIRST(&frag->fr_queue)) {
30360786Sps			TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
304221715Sdelphij
30560786Sps			m_freem(frent->fe_m);
30660786Sps			uma_zfree(V_pf_frent_z, frent);
30760786Sps		}
30860786Sps	} else {
30960786Sps		for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
31060786Sps		    frent = TAILQ_FIRST(&frag->fr_queue)) {
31160786Sps			TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
31260786Sps
31360786Sps			KASSERT((TAILQ_EMPTY(&frag->fr_queue) ||
31460786Sps			    TAILQ_FIRST(&frag->fr_queue)->fe_off >
31560786Sps			    frent->fe_len),
31660786Sps			    ("! (TAILQ_EMPTY() || TAILQ_FIRST()->fe_off >"
31760786Sps			    " frent->fe_len): %s", __func__));
31860786Sps
31960786Sps			uma_zfree(V_pf_frent_z, frent);
32060786Sps		}
32160786Sps	}
32260786Sps
32360786Sps	pf_remove_fragment(frag);
32460786Sps}
32560786Sps
32660786Spsstatic struct pf_fragment *
32760786Spspf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
32860786Sps{
32960786Sps	struct pf_fragment	*frag;
33060786Sps
33160786Sps	PF_FRAG_ASSERT();
33260786Sps
33360786Sps	frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
33460786Sps	if (frag != NULL) {
33560786Sps		/* XXX Are we sure we want to update the timeout? */
33660786Sps		frag->fr_timeout = time_uptime;
33760786Sps		if (BUFFER_FRAGMENTS(frag)) {
33860786Sps			TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
33960786Sps			TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
34060786Sps		} else {
34160786Sps			TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
34260786Sps			TAILQ_INSERT_HEAD(&V_pf_cachequeue, frag, frag_next);
34360786Sps		}
34460786Sps	}
34560786Sps
34660786Sps	return (frag);
34760786Sps}
348221715Sdelphij
349221715Sdelphij/* Removes a fragment from the fragment queue and frees the fragment */
350221715Sdelphijstatic void
351pf_remove_fragment(struct pf_fragment *frag)
352{
353
354	PF_FRAG_ASSERT();
355
356	if (BUFFER_FRAGMENTS(frag)) {
357		RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
358		TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
359		uma_zfree(V_pf_frag_z, frag);
360	} else {
361		RB_REMOVE(pf_frag_tree, &V_pf_cache_tree, frag);
362		TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
363		uma_zfree(V_pf_frag_z, frag);
364	}
365}
366
367static struct pf_frent *
368pf_create_fragment(u_short *reason)
369{
370	struct pf_frent *frent;
371
372	PF_FRAG_ASSERT();
373
374	frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
375	if (frent == NULL) {
376		pf_flush_fragments();
377		frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
378		if (frent == NULL) {
379			REASON_SET(reason, PFRES_MEMORY);
380			return (NULL);
381		}
382	}
383
384	return (frent);
385}
386
387struct pf_fragment *
388pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
389		u_short *reason)
390{
391	struct pf_frent		*after, *next, *prev;
392	struct pf_fragment	*frag;
393	uint16_t		total;
394
395	PF_FRAG_ASSERT();
396
397	/* No empty fragments. */
398	if (frent->fe_len == 0) {
399		DPFPRINTF(("bad fragment: len 0"));
400		goto bad_fragment;
401	}
402
403	/* All fragments are 8 byte aligned. */
404	if (frent->fe_mff && (frent->fe_len & 0x7)) {
405		DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len));
406		goto bad_fragment;
407	}
408
409	/* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
410	if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
411		DPFPRINTF(("bad fragment: max packet %d",
412		    frent->fe_off + frent->fe_len));
413		goto bad_fragment;
414	}
415
416	DPFPRINTF((key->frc_af == AF_INET ?
417	    "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d",
418	    key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
419
420	/* Fully buffer all of the fragments in this fragment queue. */
421	frag = pf_find_fragment(key, &V_pf_frag_tree);
422
423	/* Create a new reassembly queue for this packet. */
424	if (frag == NULL) {
425		frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
426		if (frag == NULL) {
427			pf_flush_fragments();
428			frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
429			if (frag == NULL) {
430				REASON_SET(reason, PFRES_MEMORY);
431				goto drop_fragment;
432			}
433		}
434
435		*(struct pf_fragment_cmp *)frag = *key;
436		frag->fr_flags = 0;
437		frag->fr_timeout = time_second;
438		frag->fr_maxlen = frent->fe_len;
439		TAILQ_INIT(&frag->fr_queue);
440
441		RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
442		TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
443
444		/* We do not have a previous fragment. */
445		TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
446
447		return (frag);
448	}
449
450	KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
451
452	/* Remember maximum fragment len for refragmentation. */
453	if (frent->fe_len > frag->fr_maxlen)
454		frag->fr_maxlen = frent->fe_len;
455
456	/* Maximum data we have seen already. */
457	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
458		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
459
460	/* Non terminal fragments must have more fragments flag. */
461	if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
462		goto bad_fragment;
463
464	/* Check if we saw the last fragment already. */
465	if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
466		if (frent->fe_off + frent->fe_len > total ||
467		    (frent->fe_off + frent->fe_len == total && frent->fe_mff))
468			goto bad_fragment;
469	} else {
470		if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
471			goto bad_fragment;
472	}
473
474	/* Find a fragment after the current one. */
475	prev = NULL;
476	TAILQ_FOREACH(after, &frag->fr_queue, fr_next) {
477		if (after->fe_off > frent->fe_off)
478			break;
479		prev = after;
480	}
481
482	KASSERT(prev != NULL || after != NULL,
483	    ("prev != NULL || after != NULL"));
484
485	if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
486		uint16_t precut;
487
488		precut = prev->fe_off + prev->fe_len - frent->fe_off;
489		if (precut >= frent->fe_len)
490			goto bad_fragment;
491		DPFPRINTF(("overlap -%d", precut));
492		m_adj(frent->fe_m, precut);
493		frent->fe_off += precut;
494		frent->fe_len -= precut;
495	}
496
497	for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
498	    after = next) {
499		uint16_t aftercut;
500
501		aftercut = frent->fe_off + frent->fe_len - after->fe_off;
502		DPFPRINTF(("adjust overlap %d", aftercut));
503		if (aftercut < after->fe_len) {
504			m_adj(after->fe_m, aftercut);
505			after->fe_off += aftercut;
506			after->fe_len -= aftercut;
507			break;
508		}
509
510		/* This fragment is completely overlapped, lose it. */
511		next = TAILQ_NEXT(after, fr_next);
512		m_freem(after->fe_m);
513		TAILQ_REMOVE(&frag->fr_queue, after, fr_next);
514		uma_zfree(V_pf_frent_z, after);
515	}
516
517	if (prev == NULL)
518		TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
519	else
520		TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
521
522	return (frag);
523
524bad_fragment:
525	REASON_SET(reason, PFRES_FRAG);
526drop_fragment:
527	uma_zfree(V_pf_frent_z, frent);
528	return (NULL);
529}
530
531static int
532pf_isfull_fragment(struct pf_fragment *frag)
533{
534	struct pf_frent	*frent, *next;
535	uint16_t off, total;
536
537	/* Check if we are completely reassembled */
538	if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff)
539		return (0);
540
541	/* Maximum data we have seen already */
542	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
543		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
544
545	/* Check if we have all the data */
546	off = 0;
547	for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) {
548		next = TAILQ_NEXT(frent, fr_next);
549
550		off += frent->fe_len;
551		if (off < total && (next == NULL || next->fe_off != off)) {
552			DPFPRINTF(("missing fragment at %d, next %d, total %d",
553			    off, next == NULL ? -1 : next->fe_off, total));
554			return (0);
555		}
556	}
557	DPFPRINTF(("%d < %d?", off, total));
558	if (off < total)
559		return (0);
560	KASSERT(off == total, ("off == total"));
561
562	return (1);
563}
564
565static struct mbuf *
566pf_join_fragment(struct pf_fragment *frag)
567{
568	struct mbuf *m, *m2;
569	struct pf_frent	*frent, *next;
570
571	frent = TAILQ_FIRST(&frag->fr_queue);
572	next = TAILQ_NEXT(frent, fr_next);
573
574	m = frent->fe_m;
575	m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
576	uma_zfree(V_pf_frent_z, frent);
577	for (frent = next; frent != NULL; frent = next) {
578		next = TAILQ_NEXT(frent, fr_next);
579
580		m2 = frent->fe_m;
581		/* Strip off ip header. */
582		m_adj(m2, frent->fe_hdrlen);
583		/* Strip off any trailing bytes. */
584		m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
585
586		uma_zfree(V_pf_frent_z, frent);
587		m_cat(m, m2);
588	}
589
590	/* Remove from fragment queue. */
591	pf_remove_fragment(frag);
592
593	return (m);
594}
595
596#ifdef INET
597static int
598pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
599{
600	struct mbuf		*m = *m0;
601	struct pf_frent		*frent;
602	struct pf_fragment	*frag;
603	struct pf_fragment_cmp	key;
604	uint16_t		total, hdrlen;
605
606	/* Get an entry for the fragment queue */
607	if ((frent = pf_create_fragment(reason)) == NULL)
608		return (PF_DROP);
609
610	frent->fe_m = m;
611	frent->fe_hdrlen = ip->ip_hl << 2;
612	frent->fe_extoff = 0;
613	frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
614	frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
615	frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
616
617	pf_ip2key(ip, dir, &key);
618
619	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
620		return (PF_DROP);
621
622	/* The mbuf is part of the fragment entry, no direct free or access */
623	m = *m0 = NULL;
624
625	if (!pf_isfull_fragment(frag))
626		return (PF_PASS);  /* drop because *m0 is NULL, no error */
627
628	/* We have all the data */
629	frent = TAILQ_FIRST(&frag->fr_queue);
630	KASSERT(frent != NULL, ("frent != NULL"));
631	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
632		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
633	hdrlen = frent->fe_hdrlen;
634
635	m = *m0 = pf_join_fragment(frag);
636	frag = NULL;
637
638	if (m->m_flags & M_PKTHDR) {
639		int plen = 0;
640		for (m = *m0; m; m = m->m_next)
641			plen += m->m_len;
642		m = *m0;
643		m->m_pkthdr.len = plen;
644	}
645
646	ip = mtod(m, struct ip *);
647	ip->ip_len = htons(hdrlen + total);
648	ip->ip_off &= ~(IP_MF|IP_OFFMASK);
649
650	if (hdrlen + total > IP_MAXPACKET) {
651		DPFPRINTF(("drop: too big: %d", total));
652		ip->ip_len = 0;
653		REASON_SET(reason, PFRES_SHORT);
654		/* PF_DROP requires a valid mbuf *m0 in pf_test() */
655		return (PF_DROP);
656	}
657
658	DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
659	return (PF_PASS);
660}
661#endif	/* INET */
662
663#ifdef INET6
664static int
665pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
666    uint16_t hdrlen, uint16_t extoff, u_short *reason)
667{
668	struct mbuf		*m = *m0;
669	struct pf_frent		*frent;
670	struct pf_fragment	*frag;
671	struct pf_fragment_cmp	 key;
672	struct m_tag		*mtag;
673	struct pf_fragment_tag	*ftag;
674	int			 off;
675	uint32_t		 frag_id;
676	uint16_t		 total, maxlen;
677	uint8_t			 proto;
678
679	PF_FRAG_LOCK();
680
681	/* Get an entry for the fragment queue. */
682	if ((frent = pf_create_fragment(reason)) == NULL) {
683		PF_FRAG_UNLOCK();
684		return (PF_DROP);
685	}
686
687	frent->fe_m = m;
688	frent->fe_hdrlen = hdrlen;
689	frent->fe_extoff = extoff;
690	frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
691	frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
692	frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
693
694	key.frc_src.v6 = ip6->ip6_src;
695	key.frc_dst.v6 = ip6->ip6_dst;
696	key.frc_af = AF_INET6;
697	/* Only the first fragment's protocol is relevant. */
698	key.frc_proto = 0;
699	key.frc_id = fraghdr->ip6f_ident;
700
701	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
702		PF_FRAG_UNLOCK();
703		return (PF_DROP);
704	}
705
706	/* The mbuf is part of the fragment entry, no direct free or access. */
707	m = *m0 = NULL;
708
709	if (!pf_isfull_fragment(frag)) {
710		PF_FRAG_UNLOCK();
711		return (PF_PASS);  /* Drop because *m0 is NULL, no error. */
712	}
713
714	/* We have all the data. */
715	extoff = frent->fe_extoff;
716	maxlen = frag->fr_maxlen;
717	frag_id = frag->fr_id;
718	frent = TAILQ_FIRST(&frag->fr_queue);
719	KASSERT(frent != NULL, ("frent != NULL"));
720	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
721		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
722	hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
723
724	m = *m0 = pf_join_fragment(frag);
725	frag = NULL;
726
727	PF_FRAG_UNLOCK();
728
729	/* Take protocol from first fragment header. */
730	m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
731	KASSERT(m, ("%s: short mbuf chain", __func__));
732	proto = *(mtod(m, caddr_t) + off);
733	m = *m0;
734
735	/* Delete frag6 header */
736	if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
737		goto fail;
738
739	if (m->m_flags & M_PKTHDR) {
740		int plen = 0;
741		for (m = *m0; m; m = m->m_next)
742			plen += m->m_len;
743		m = *m0;
744		m->m_pkthdr.len = plen;
745	}
746
747	if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag),
748	    M_NOWAIT)) == NULL)
749		goto fail;
750	ftag = (struct pf_fragment_tag *)(mtag + 1);
751	ftag->ft_hdrlen = hdrlen;
752	ftag->ft_extoff = extoff;
753	ftag->ft_maxlen = maxlen;
754	ftag->ft_id = frag_id;
755	m_tag_prepend(m, mtag);
756
757	ip6 = mtod(m, struct ip6_hdr *);
758	ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
759	if (extoff) {
760		/* Write protocol into next field of last extension header. */
761		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
762		    &off);
763		KASSERT(m, ("%s: short mbuf chain", __func__));
764		*(mtod(m, char *) + off) = proto;
765		m = *m0;
766	} else
767		ip6->ip6_nxt = proto;
768
769	if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
770		DPFPRINTF(("drop: too big: %d", total));
771		ip6->ip6_plen = 0;
772		REASON_SET(reason, PFRES_SHORT);
773		/* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
774		return (PF_DROP);
775	}
776
777	DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen)));
778	return (PF_PASS);
779
780fail:
781	REASON_SET(reason, PFRES_MEMORY);
782	/* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
783	return (PF_DROP);
784}
785#endif	/* INET6 */
786
787#ifdef INET
788static struct mbuf *
789pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
790    int drop, int *nomem)
791{
792	struct mbuf		*m = *m0;
793	struct pf_frent		*frp, *fra, *cur = NULL;
794	int			 ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
795	u_int16_t		 off = ntohs(h->ip_off) << 3;
796	u_int16_t		 max = ip_len + off;
797	int			 hosed = 0;
798
799	PF_FRAG_ASSERT();
800	KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
801	    ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
802
803	/* Create a new range queue for this packet */
804	if (*frag == NULL) {
805		*frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
806		if (*frag == NULL) {
807			pf_flush_fragments();
808			*frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
809			if (*frag == NULL)
810				goto no_mem;
811		}
812
813		/* Get an entry for the queue */
814		cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
815		if (cur == NULL) {
816			uma_zfree(V_pf_frag_z, *frag);
817			*frag = NULL;
818			goto no_mem;
819		}
820
821		(*frag)->fr_flags = PFFRAG_NOBUFFER;
822		(*frag)->fr_max = 0;
823		(*frag)->fr_src.v4 = h->ip_src;
824		(*frag)->fr_dst.v4 = h->ip_dst;
825		(*frag)->fr_af = AF_INET;
826		(*frag)->fr_proto = h->ip_p;
827		(*frag)->fr_id = h->ip_id;
828		(*frag)->fr_timeout = time_uptime;
829
830		cur->fe_off = off;
831		cur->fe_len = max; /* TODO: fe_len = max - off ? */
832		TAILQ_INIT(&(*frag)->fr_queue);
833		TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
834
835		RB_INSERT(pf_frag_tree, &V_pf_cache_tree, *frag);
836		TAILQ_INSERT_HEAD(&V_pf_cachequeue, *frag, frag_next);
837
838		DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
839
840		goto pass;
841	}
842
843	/*
844	 * Find a fragment after the current one:
845	 *  - off contains the real shifted offset.
846	 */
847	frp = NULL;
848	TAILQ_FOREACH(fra, &(*frag)->fr_queue, fr_next) {
849		if (fra->fe_off > off)
850			break;
851		frp = fra;
852	}
853
854	KASSERT((frp != NULL || fra != NULL),
855	    ("!(frp != NULL || fra != NULL): %s", __FUNCTION__));
856
857	if (frp != NULL) {
858		int	precut;
859
860		precut = frp->fe_len - off;
861		if (precut >= ip_len) {
862			/* Fragment is entirely a duplicate */
863			DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
864			    h->ip_id, frp->fe_off, frp->fe_len, off, max));
865			goto drop_fragment;
866		}
867		if (precut == 0) {
868			/* They are adjacent.  Fixup cache entry */
869			DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
870			    h->ip_id, frp->fe_off, frp->fe_len, off, max));
871			frp->fe_len = max;
872		} else if (precut > 0) {
873			/* The first part of this payload overlaps with a
874			 * fragment that has already been passed.
875			 * Need to trim off the first part of the payload.
876			 * But to do so easily, we need to create another
877			 * mbuf to throw the original header into.
878			 */
879
880			DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
881			    h->ip_id, precut, frp->fe_off, frp->fe_len, off,
882			    max));
883
884			off += precut;
885			max -= precut;
886			/* Update the previous frag to encompass this one */
887			frp->fe_len = max;
888
889			if (!drop) {
890				/* XXX Optimization opportunity
891				 * This is a very heavy way to trim the payload.
892				 * we could do it much faster by diddling mbuf
893				 * internals but that would be even less legible
894				 * than this mbuf magic.  For my next trick,
895				 * I'll pull a rabbit out of my laptop.
896				 */
897				*m0 = m_dup(m, M_NOWAIT);
898				if (*m0 == NULL)
899					goto no_mem;
900				/* From KAME Project : We have missed this! */
901				m_adj(*m0, (h->ip_hl << 2) -
902				    (*m0)->m_pkthdr.len);
903
904				KASSERT(((*m0)->m_next == NULL),
905				    ("(*m0)->m_next != NULL: %s",
906				    __FUNCTION__));
907				m_adj(m, precut + (h->ip_hl << 2));
908				m_cat(*m0, m);
909				m = *m0;
910				if (m->m_flags & M_PKTHDR) {
911					int plen = 0;
912					struct mbuf *t;
913					for (t = m; t; t = t->m_next)
914						plen += t->m_len;
915					m->m_pkthdr.len = plen;
916				}
917
918
919				h = mtod(m, struct ip *);
920
921				KASSERT(((int)m->m_len ==
922				    ntohs(h->ip_len) - precut),
923				    ("m->m_len != ntohs(h->ip_len) - precut: %s",
924				    __FUNCTION__));
925				h->ip_off = htons(ntohs(h->ip_off) +
926				    (precut >> 3));
927				h->ip_len = htons(ntohs(h->ip_len) - precut);
928			} else {
929				hosed++;
930			}
931		} else {
932			/* There is a gap between fragments */
933
934			DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
935			    h->ip_id, -precut, frp->fe_off, frp->fe_len, off,
936			    max));
937
938			cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
939			if (cur == NULL)
940				goto no_mem;
941
942			cur->fe_off = off;
943			cur->fe_len = max;
944			TAILQ_INSERT_AFTER(&(*frag)->fr_queue, frp, cur, fr_next);
945		}
946	}
947
948	if (fra != NULL) {
949		int	aftercut;
950		int	merge = 0;
951
952		aftercut = max - fra->fe_off;
953		if (aftercut == 0) {
954			/* Adjacent fragments */
955			DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
956			    h->ip_id, off, max, fra->fe_off, fra->fe_len));
957			fra->fe_off = off;
958			merge = 1;
959		} else if (aftercut > 0) {
960			/* Need to chop off the tail of this fragment */
961			DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
962			    h->ip_id, aftercut, off, max, fra->fe_off,
963			    fra->fe_len));
964			fra->fe_off = off;
965			max -= aftercut;
966
967			merge = 1;
968
969			if (!drop) {
970				m_adj(m, -aftercut);
971				if (m->m_flags & M_PKTHDR) {
972					int plen = 0;
973					struct mbuf *t;
974					for (t = m; t; t = t->m_next)
975						plen += t->m_len;
976					m->m_pkthdr.len = plen;
977				}
978				h = mtod(m, struct ip *);
979				KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut),
980				    ("m->m_len != ntohs(h->ip_len) - aftercut: %s",
981				    __FUNCTION__));
982				h->ip_len = htons(ntohs(h->ip_len) - aftercut);
983			} else {
984				hosed++;
985			}
986		} else if (frp == NULL) {
987			/* There is a gap between fragments */
988			DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
989			    h->ip_id, -aftercut, off, max, fra->fe_off,
990			    fra->fe_len));
991
992			cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
993			if (cur == NULL)
994				goto no_mem;
995
996			cur->fe_off = off;
997			cur->fe_len = max;
998			TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
999		}
1000
1001
1002		/* Need to glue together two separate fragment descriptors */
1003		if (merge) {
1004			if (cur && fra->fe_off <= cur->fe_len) {
1005				/* Need to merge in a previous 'cur' */
1006				DPFPRINTF(("fragcache[%d]: adjacent(merge "
1007				    "%d-%d) %d-%d (%d-%d)\n",
1008				    h->ip_id, cur->fe_off, cur->fe_len, off,
1009				    max, fra->fe_off, fra->fe_len));
1010				fra->fe_off = cur->fe_off;
1011				TAILQ_REMOVE(&(*frag)->fr_queue, cur, fr_next);
1012				uma_zfree(V_pf_frent_z, cur);
1013				cur = NULL;
1014
1015			} else if (frp && fra->fe_off <= frp->fe_len) {
1016				/* Need to merge in a modified 'frp' */
1017				KASSERT((cur == NULL), ("cur != NULL: %s",
1018				    __FUNCTION__));
1019				DPFPRINTF(("fragcache[%d]: adjacent(merge "
1020				    "%d-%d) %d-%d (%d-%d)\n",
1021				    h->ip_id, frp->fe_off, frp->fe_len, off,
1022				    max, fra->fe_off, fra->fe_len));
1023				fra->fe_off = frp->fe_off;
1024				TAILQ_REMOVE(&(*frag)->fr_queue, frp, fr_next);
1025				uma_zfree(V_pf_frent_z, frp);
1026				frp = NULL;
1027
1028			}
1029		}
1030	}
1031
1032	if (hosed) {
1033		/*
1034		 * We must keep tracking the overall fragment even when
1035		 * we're going to drop it anyway so that we know when to
1036		 * free the overall descriptor.  Thus we drop the frag late.
1037		 */
1038		goto drop_fragment;
1039	}
1040
1041
1042 pass:
1043	/* Update maximum data size */
1044	if ((*frag)->fr_max < max)
1045		(*frag)->fr_max = max;
1046
1047	/* This is the last segment */
1048	if (!mff)
1049		(*frag)->fr_flags |= PFFRAG_SEENLAST;
1050
1051	/* Check if we are completely reassembled */
1052	if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
1053	    TAILQ_FIRST(&(*frag)->fr_queue)->fe_off == 0 &&
1054	    TAILQ_FIRST(&(*frag)->fr_queue)->fe_len == (*frag)->fr_max) {
1055		/* Remove from fragment queue */
1056		DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
1057		    (*frag)->fr_max));
1058		pf_free_fragment(*frag);
1059		*frag = NULL;
1060	}
1061
1062	return (m);
1063
1064 no_mem:
1065	*nomem = 1;
1066
1067	/* Still need to pay attention to !IP_MF */
1068	if (!mff && *frag != NULL)
1069		(*frag)->fr_flags |= PFFRAG_SEENLAST;
1070
1071	m_freem(m);
1072	return (NULL);
1073
1074 drop_fragment:
1075
1076	/* Still need to pay attention to !IP_MF */
1077	if (!mff && *frag != NULL)
1078		(*frag)->fr_flags |= PFFRAG_SEENLAST;
1079
1080	if (drop) {
1081		/* This fragment has been deemed bad.  Don't reass */
1082		if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
1083			DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
1084			    h->ip_id));
1085		(*frag)->fr_flags |= PFFRAG_DROP;
1086	}
1087
1088	m_freem(m);
1089	return (NULL);
1090}
1091#endif	/* INET */
1092
1093#ifdef INET6
1094int
1095pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
1096{
1097	struct mbuf		*m = *m0, *t;
1098	struct pf_fragment_tag	*ftag = (struct pf_fragment_tag *)(mtag + 1);
1099	struct pf_pdesc		 pd;
1100	uint32_t		 frag_id;
1101	uint16_t		 hdrlen, extoff, maxlen;
1102	uint8_t			 proto;
1103	int			 error, action;
1104
1105	hdrlen = ftag->ft_hdrlen;
1106	extoff = ftag->ft_extoff;
1107	maxlen = ftag->ft_maxlen;
1108	frag_id = ftag->ft_id;
1109	m_tag_delete(m, mtag);
1110	mtag = NULL;
1111	ftag = NULL;
1112
1113	if (extoff) {
1114		int off;
1115
1116		/* Use protocol from next field of last extension header */
1117		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
1118		    &off);
1119		KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
1120		proto = *(mtod(m, caddr_t) + off);
1121		*(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
1122		m = *m0;
1123	} else {
1124		struct ip6_hdr *hdr;
1125
1126		hdr = mtod(m, struct ip6_hdr *);
1127		proto = hdr->ip6_nxt;
1128		hdr->ip6_nxt = IPPROTO_FRAGMENT;
1129	}
1130
1131	/*
1132	 * Maxlen may be less than 8 if there was only a single
1133	 * fragment.  As it was fragmented before, add a fragment
1134	 * header also for a single fragment.  If total or maxlen
1135	 * is less than 8, ip6_fragment() will return EMSGSIZE and
1136	 * we drop the packet.
1137	 */
1138	error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1139	m = (*m0)->m_nextpkt;
1140	(*m0)->m_nextpkt = NULL;
1141	if (error == 0) {
1142		/* The first mbuf contains the unfragmented packet. */
1143		m_freem(*m0);
1144		*m0 = NULL;
1145		action = PF_PASS;
1146	} else {
1147		/* Drop expects an mbuf to free. */
1148		DPFPRINTF(("refragment error %d", error));
1149		action = PF_DROP;
1150	}
1151	for (t = m; m; m = t) {
1152		t = m->m_nextpkt;
1153		m->m_nextpkt = NULL;
1154		m->m_flags |= M_SKIP_FIREWALL;
1155		memset(&pd, 0, sizeof(pd));
1156		pd.pf_mtag = pf_find_mtag(m);
1157		if (error == 0)
1158			ip6_forward(m, 0);
1159		else
1160			m_freem(m);
1161	}
1162
1163	return (action);
1164}
1165#endif /* INET6 */
1166
1167#ifdef INET
1168int
1169pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
1170    struct pf_pdesc *pd)
1171{
1172	struct mbuf		*m = *m0;
1173	struct pf_rule		*r;
1174	struct pf_fragment	*frag = NULL;
1175	struct pf_fragment_cmp	key;
1176	struct ip		*h = mtod(m, struct ip *);
1177	int			 mff = (ntohs(h->ip_off) & IP_MF);
1178	int			 hlen = h->ip_hl << 2;
1179	u_int16_t		 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1180	u_int16_t		 max;
1181	int			 ip_len;
1182	int			 ip_off;
1183	int			 tag = -1;
1184	int			 verdict;
1185
1186	PF_RULES_RASSERT();
1187
1188	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1189	while (r != NULL) {
1190		r->evaluations++;
1191		if (pfi_kif_match(r->kif, kif) == r->ifnot)
1192			r = r->skip[PF_SKIP_IFP].ptr;
1193		else if (r->direction && r->direction != dir)
1194			r = r->skip[PF_SKIP_DIR].ptr;
1195		else if (r->af && r->af != AF_INET)
1196			r = r->skip[PF_SKIP_AF].ptr;
1197		else if (r->proto && r->proto != h->ip_p)
1198			r = r->skip[PF_SKIP_PROTO].ptr;
1199		else if (PF_MISMATCHAW(&r->src.addr,
1200		    (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1201		    r->src.neg, kif, M_GETFIB(m)))
1202			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1203		else if (PF_MISMATCHAW(&r->dst.addr,
1204		    (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1205		    r->dst.neg, NULL, M_GETFIB(m)))
1206			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1207		else if (r->match_tag && !pf_match_tag(m, r, &tag,
1208		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
1209			r = TAILQ_NEXT(r, entries);
1210		else
1211			break;
1212	}
1213
1214	if (r == NULL || r->action == PF_NOSCRUB)
1215		return (PF_PASS);
1216	else {
1217		r->packets[dir == PF_OUT]++;
1218		r->bytes[dir == PF_OUT] += pd->tot_len;
1219	}
1220
1221	/* Check for illegal packets */
1222	if (hlen < (int)sizeof(struct ip))
1223		goto drop;
1224
1225	if (hlen > ntohs(h->ip_len))
1226		goto drop;
1227
1228	/* Clear IP_DF if the rule uses the no-df option */
1229	if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1230		u_int16_t ip_off = h->ip_off;
1231
1232		h->ip_off &= htons(~IP_DF);
1233		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1234	}
1235
1236	/* We will need other tests here */
1237	if (!fragoff && !mff)
1238		goto no_fragment;
1239
1240	/* We're dealing with a fragment now. Don't allow fragments
1241	 * with IP_DF to enter the cache. If the flag was cleared by
1242	 * no-df above, fine. Otherwise drop it.
1243	 */
1244	if (h->ip_off & htons(IP_DF)) {
1245		DPFPRINTF(("IP_DF\n"));
1246		goto bad;
1247	}
1248
1249	ip_len = ntohs(h->ip_len) - hlen;
1250	ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1251
1252	/* All fragments are 8 byte aligned */
1253	if (mff && (ip_len & 0x7)) {
1254		DPFPRINTF(("mff and %d\n", ip_len));
1255		goto bad;
1256	}
1257
1258	/* Respect maximum length */
1259	if (fragoff + ip_len > IP_MAXPACKET) {
1260		DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1261		goto bad;
1262	}
1263	max = fragoff + ip_len;
1264
1265	if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
1266
1267		/* Fully buffer all of the fragments */
1268		PF_FRAG_LOCK();
1269
1270		pf_ip2key(h, dir, &key);
1271		frag = pf_find_fragment(&key, &V_pf_frag_tree);
1272
1273		/* Check if we saw the last fragment already */
1274		if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1275		    max > frag->fr_max)
1276			goto bad;
1277
1278		/* Might return a completely reassembled mbuf, or NULL */
1279		DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1280		verdict = pf_reassemble(m0, h, dir, reason);
1281		PF_FRAG_UNLOCK();
1282
1283		if (verdict != PF_PASS)
1284			return (PF_DROP);
1285
1286		m = *m0;
1287		if (m == NULL)
1288			return (PF_DROP);
1289
1290		/* use mtag from concatenated mbuf chain */
1291		pd->pf_mtag = pf_find_mtag(m);
1292#ifdef DIAGNOSTIC
1293		if (pd->pf_mtag == NULL) {
1294			printf("%s: pf_find_mtag returned NULL(1)\n", __func__);
1295			if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
1296				m_freem(m);
1297				*m0 = NULL;
1298				goto no_mem;
1299			}
1300		}
1301#endif
1302		h = mtod(m, struct ip *);
1303	} else {
1304		/* non-buffering fragment cache (drops or masks overlaps) */
1305		int	nomem = 0;
1306
1307		if (dir == PF_OUT && pd->pf_mtag->flags & PF_TAG_FRAGCACHE) {
1308			/*
1309			 * Already passed the fragment cache in the
1310			 * input direction.  If we continued, it would
1311			 * appear to be a dup and would be dropped.
1312			 */
1313			goto fragment_pass;
1314		}
1315
1316		PF_FRAG_LOCK();
1317		pf_ip2key(h, dir, &key);
1318		frag = pf_find_fragment(&key, &V_pf_cache_tree);
1319
1320		/* Check if we saw the last fragment already */
1321		if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1322		    max > frag->fr_max) {
1323			if (r->rule_flag & PFRULE_FRAGDROP)
1324				frag->fr_flags |= PFFRAG_DROP;
1325			goto bad;
1326		}
1327
1328		*m0 = m = pf_fragcache(m0, h, &frag, mff,
1329		    (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
1330		PF_FRAG_UNLOCK();
1331		if (m == NULL) {
1332			if (nomem)
1333				goto no_mem;
1334			goto drop;
1335		}
1336
1337		/* use mtag from copied and trimmed mbuf chain */
1338		pd->pf_mtag = pf_find_mtag(m);
1339#ifdef DIAGNOSTIC
1340		if (pd->pf_mtag == NULL) {
1341			printf("%s: pf_find_mtag returned NULL(2)\n", __func__);
1342			if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
1343				m_freem(m);
1344				*m0 = NULL;
1345				goto no_mem;
1346			}
1347		}
1348#endif
1349		if (dir == PF_IN)
1350			pd->pf_mtag->flags |= PF_TAG_FRAGCACHE;
1351
1352		if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1353			goto drop;
1354		goto fragment_pass;
1355	}
1356
1357 no_fragment:
1358	/* At this point, only IP_DF is allowed in ip_off */
1359	if (h->ip_off & ~htons(IP_DF)) {
1360		u_int16_t ip_off = h->ip_off;
1361
1362		h->ip_off &= htons(IP_DF);
1363		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1364	}
1365
1366	/* not missing a return here */
1367
1368 fragment_pass:
1369	pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
1370
1371	if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1372		pd->flags |= PFDESC_IP_REAS;
1373	return (PF_PASS);
1374
1375 no_mem:
1376	REASON_SET(reason, PFRES_MEMORY);
1377	if (r != NULL && r->log)
1378		PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1379		    1);
1380	return (PF_DROP);
1381
1382 drop:
1383	REASON_SET(reason, PFRES_NORM);
1384	if (r != NULL && r->log)
1385		PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1386		    1);
1387	return (PF_DROP);
1388
1389 bad:
1390	DPFPRINTF(("dropping bad fragment\n"));
1391
1392	/* Free associated fragments */
1393	if (frag != NULL) {
1394		pf_free_fragment(frag);
1395		PF_FRAG_UNLOCK();
1396	}
1397
1398	REASON_SET(reason, PFRES_FRAG);
1399	if (r != NULL && r->log)
1400		PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1401		    1);
1402
1403	return (PF_DROP);
1404}
1405#endif
1406
1407#ifdef INET6
1408int
1409pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1410    u_short *reason, struct pf_pdesc *pd)
1411{
1412	struct mbuf		*m = *m0;
1413	struct pf_rule		*r;
1414	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
1415	int			 extoff;
1416	int			 off;
1417	struct ip6_ext		 ext;
1418	struct ip6_opt		 opt;
1419	struct ip6_opt_jumbo	 jumbo;
1420	struct ip6_frag		 frag;
1421	u_int32_t		 jumbolen = 0, plen;
1422	int			 optend;
1423	int			 ooff;
1424	u_int8_t		 proto;
1425	int			 terminal;
1426
1427	PF_RULES_RASSERT();
1428
1429	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1430	while (r != NULL) {
1431		r->evaluations++;
1432		if (pfi_kif_match(r->kif, kif) == r->ifnot)
1433			r = r->skip[PF_SKIP_IFP].ptr;
1434		else if (r->direction && r->direction != dir)
1435			r = r->skip[PF_SKIP_DIR].ptr;
1436		else if (r->af && r->af != AF_INET6)
1437			r = r->skip[PF_SKIP_AF].ptr;
1438#if 0 /* header chain! */
1439		else if (r->proto && r->proto != h->ip6_nxt)
1440			r = r->skip[PF_SKIP_PROTO].ptr;
1441#endif
1442		else if (PF_MISMATCHAW(&r->src.addr,
1443		    (struct pf_addr *)&h->ip6_src, AF_INET6,
1444		    r->src.neg, kif, M_GETFIB(m)))
1445			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1446		else if (PF_MISMATCHAW(&r->dst.addr,
1447		    (struct pf_addr *)&h->ip6_dst, AF_INET6,
1448		    r->dst.neg, NULL, M_GETFIB(m)))
1449			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1450		else
1451			break;
1452	}
1453
1454	if (r == NULL || r->action == PF_NOSCRUB)
1455		return (PF_PASS);
1456	else {
1457		r->packets[dir == PF_OUT]++;
1458		r->bytes[dir == PF_OUT] += pd->tot_len;
1459	}
1460
1461	/* Check for illegal packets */
1462	if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1463		goto drop;
1464
1465	extoff = 0;
1466	off = sizeof(struct ip6_hdr);
1467	proto = h->ip6_nxt;
1468	terminal = 0;
1469	do {
1470		switch (proto) {
1471		case IPPROTO_FRAGMENT:
1472			goto fragment;
1473			break;
1474		case IPPROTO_AH:
1475		case IPPROTO_ROUTING:
1476		case IPPROTO_DSTOPTS:
1477			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1478			    NULL, AF_INET6))
1479				goto shortpkt;
1480			extoff = off;
1481			if (proto == IPPROTO_AH)
1482				off += (ext.ip6e_len + 2) * 4;
1483			else
1484				off += (ext.ip6e_len + 1) * 8;
1485			proto = ext.ip6e_nxt;
1486			break;
1487		case IPPROTO_HOPOPTS:
1488			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1489			    NULL, AF_INET6))
1490				goto shortpkt;
1491			extoff = off;
1492			optend = off + (ext.ip6e_len + 1) * 8;
1493			ooff = off + sizeof(ext);
1494			do {
1495				if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1496				    sizeof(opt.ip6o_type), NULL, NULL,
1497				    AF_INET6))
1498					goto shortpkt;
1499				if (opt.ip6o_type == IP6OPT_PAD1) {
1500					ooff++;
1501					continue;
1502				}
1503				if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1504				    NULL, NULL, AF_INET6))
1505					goto shortpkt;
1506				if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1507					goto drop;
1508				switch (opt.ip6o_type) {
1509				case IP6OPT_JUMBO:
1510					if (h->ip6_plen != 0)
1511						goto drop;
1512					if (!pf_pull_hdr(m, ooff, &jumbo,
1513					    sizeof(jumbo), NULL, NULL,
1514					    AF_INET6))
1515						goto shortpkt;
1516					memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1517					    sizeof(jumbolen));
1518					jumbolen = ntohl(jumbolen);
1519					if (jumbolen <= IPV6_MAXPACKET)
1520						goto drop;
1521					if (sizeof(struct ip6_hdr) + jumbolen !=
1522					    m->m_pkthdr.len)
1523						goto drop;
1524					break;
1525				default:
1526					break;
1527				}
1528				ooff += sizeof(opt) + opt.ip6o_len;
1529			} while (ooff < optend);
1530
1531			off = optend;
1532			proto = ext.ip6e_nxt;
1533			break;
1534		default:
1535			terminal = 1;
1536			break;
1537		}
1538	} while (!terminal);
1539
1540	/* jumbo payload option must be present, or plen > 0 */
1541	if (ntohs(h->ip6_plen) == 0)
1542		plen = jumbolen;
1543	else
1544		plen = ntohs(h->ip6_plen);
1545	if (plen == 0)
1546		goto drop;
1547	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1548		goto shortpkt;
1549
1550	pf_scrub_ip6(&m, r->min_ttl);
1551
1552	return (PF_PASS);
1553
1554 fragment:
1555	/* Jumbo payload packets cannot be fragmented. */
1556	plen = ntohs(h->ip6_plen);
1557	if (plen == 0 || jumbolen)
1558		goto drop;
1559	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1560		goto shortpkt;
1561
1562	if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1563		goto shortpkt;
1564
1565	/* Offset now points to data portion. */
1566	off += sizeof(frag);
1567
1568	/* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1569	if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1570		return (PF_DROP);
1571	m = *m0;
1572	if (m == NULL)
1573		return (PF_DROP);
1574
1575	pd->flags |= PFDESC_IP_REAS;
1576	return (PF_PASS);
1577
1578 shortpkt:
1579	REASON_SET(reason, PFRES_SHORT);
1580	if (r != NULL && r->log)
1581		PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1582		    1);
1583	return (PF_DROP);
1584
1585 drop:
1586	REASON_SET(reason, PFRES_NORM);
1587	if (r != NULL && r->log)
1588		PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1589		    1);
1590	return (PF_DROP);
1591}
1592#endif /* INET6 */
1593
1594int
1595pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1596    int off, void *h, struct pf_pdesc *pd)
1597{
1598	struct pf_rule	*r, *rm = NULL;
1599	struct tcphdr	*th = pd->hdr.tcp;
1600	int		 rewrite = 0;
1601	u_short		 reason;
1602	u_int8_t	 flags;
1603	sa_family_t	 af = pd->af;
1604
1605	PF_RULES_RASSERT();
1606
1607	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1608	while (r != NULL) {
1609		r->evaluations++;
1610		if (pfi_kif_match(r->kif, kif) == r->ifnot)
1611			r = r->skip[PF_SKIP_IFP].ptr;
1612		else if (r->direction && r->direction != dir)
1613			r = r->skip[PF_SKIP_DIR].ptr;
1614		else if (r->af && r->af != af)
1615			r = r->skip[PF_SKIP_AF].ptr;
1616		else if (r->proto && r->proto != pd->proto)
1617			r = r->skip[PF_SKIP_PROTO].ptr;
1618		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1619		    r->src.neg, kif, M_GETFIB(m)))
1620			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1621		else if (r->src.port_op && !pf_match_port(r->src.port_op,
1622			    r->src.port[0], r->src.port[1], th->th_sport))
1623			r = r->skip[PF_SKIP_SRC_PORT].ptr;
1624		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1625		    r->dst.neg, NULL, M_GETFIB(m)))
1626			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1627		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1628			    r->dst.port[0], r->dst.port[1], th->th_dport))
1629			r = r->skip[PF_SKIP_DST_PORT].ptr;
1630		else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1631			    pf_osfp_fingerprint(pd, m, off, th),
1632			    r->os_fingerprint))
1633			r = TAILQ_NEXT(r, entries);
1634		else {
1635			rm = r;
1636			break;
1637		}
1638	}
1639
1640	if (rm == NULL || rm->action == PF_NOSCRUB)
1641		return (PF_PASS);
1642	else {
1643		r->packets[dir == PF_OUT]++;
1644		r->bytes[dir == PF_OUT] += pd->tot_len;
1645	}
1646
1647	if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1648		pd->flags |= PFDESC_TCP_NORM;
1649
1650	flags = th->th_flags;
1651	if (flags & TH_SYN) {
1652		/* Illegal packet */
1653		if (flags & TH_RST)
1654			goto tcp_drop;
1655
1656		if (flags & TH_FIN)
1657			goto tcp_drop;
1658	} else {
1659		/* Illegal packet */
1660		if (!(flags & (TH_ACK|TH_RST)))
1661			goto tcp_drop;
1662	}
1663
1664	if (!(flags & TH_ACK)) {
1665		/* These flags are only valid if ACK is set */
1666		if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1667			goto tcp_drop;
1668	}
1669
1670	/* Check for illegal header length */
1671	if (th->th_off < (sizeof(struct tcphdr) >> 2))
1672		goto tcp_drop;
1673
1674	/* If flags changed, or reserved data set, then adjust */
1675	if (flags != th->th_flags || th->th_x2 != 0) {
1676		u_int16_t	ov, nv;
1677
1678		ov = *(u_int16_t *)(&th->th_ack + 1);
1679		th->th_flags = flags;
1680		th->th_x2 = 0;
1681		nv = *(u_int16_t *)(&th->th_ack + 1);
1682
1683		th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1684		rewrite = 1;
1685	}
1686
1687	/* Remove urgent pointer, if TH_URG is not set */
1688	if (!(flags & TH_URG) && th->th_urp) {
1689		th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1690		    0, 0);
1691		th->th_urp = 0;
1692		rewrite = 1;
1693	}
1694
1695	/* Process options */
1696	if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1697		rewrite = 1;
1698
1699	/* copy back packet headers if we sanitized */
1700	if (rewrite)
1701		m_copyback(m, off, sizeof(*th), (caddr_t)th);
1702
1703	return (PF_PASS);
1704
1705 tcp_drop:
1706	REASON_SET(&reason, PFRES_NORM);
1707	if (rm != NULL && r->log)
1708		PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd,
1709		    1);
1710	return (PF_DROP);
1711}
1712
1713int
1714pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1715    struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1716{
1717	u_int32_t tsval, tsecr;
1718	u_int8_t hdr[60];
1719	u_int8_t *opt;
1720
1721	KASSERT((src->scrub == NULL),
1722	    ("pf_normalize_tcp_init: src->scrub != NULL"));
1723
1724	src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1725	if (src->scrub == NULL)
1726		return (1);
1727
1728	switch (pd->af) {
1729#ifdef INET
1730	case AF_INET: {
1731		struct ip *h = mtod(m, struct ip *);
1732		src->scrub->pfss_ttl = h->ip_ttl;
1733		break;
1734	}
1735#endif /* INET */
1736#ifdef INET6
1737	case AF_INET6: {
1738		struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1739		src->scrub->pfss_ttl = h->ip6_hlim;
1740		break;
1741	}
1742#endif /* INET6 */
1743	}
1744
1745
1746	/*
1747	 * All normalizations below are only begun if we see the start of
1748	 * the connections.  They must all set an enabled bit in pfss_flags
1749	 */
1750	if ((th->th_flags & TH_SYN) == 0)
1751		return (0);
1752
1753
1754	if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1755	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1756		/* Diddle with TCP options */
1757		int hlen;
1758		opt = hdr + sizeof(struct tcphdr);
1759		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1760		while (hlen >= TCPOLEN_TIMESTAMP) {
1761			switch (*opt) {
1762			case TCPOPT_EOL:	/* FALLTHROUGH */
1763			case TCPOPT_NOP:
1764				opt++;
1765				hlen--;
1766				break;
1767			case TCPOPT_TIMESTAMP:
1768				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1769					src->scrub->pfss_flags |=
1770					    PFSS_TIMESTAMP;
1771					src->scrub->pfss_ts_mod =
1772					    htonl(arc4random());
1773
1774					/* note PFSS_PAWS not set yet */
1775					memcpy(&tsval, &opt[2],
1776					    sizeof(u_int32_t));
1777					memcpy(&tsecr, &opt[6],
1778					    sizeof(u_int32_t));
1779					src->scrub->pfss_tsval0 = ntohl(tsval);
1780					src->scrub->pfss_tsval = ntohl(tsval);
1781					src->scrub->pfss_tsecr = ntohl(tsecr);
1782					getmicrouptime(&src->scrub->pfss_last);
1783				}
1784				/* FALLTHROUGH */
1785			default:
1786				hlen -= MAX(opt[1], 2);
1787				opt += MAX(opt[1], 2);
1788				break;
1789			}
1790		}
1791	}
1792
1793	return (0);
1794}
1795
1796void
1797pf_normalize_tcp_cleanup(struct pf_state *state)
1798{
1799	if (state->src.scrub)
1800		uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1801	if (state->dst.scrub)
1802		uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1803
1804	/* Someday... flush the TCP segment reassembly descriptors. */
1805}
1806
1807int
1808pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1809    u_short *reason, struct tcphdr *th, struct pf_state *state,
1810    struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1811{
1812	struct timeval uptime;
1813	u_int32_t tsval, tsecr;
1814	u_int tsval_from_last;
1815	u_int8_t hdr[60];
1816	u_int8_t *opt;
1817	int copyback = 0;
1818	int got_ts = 0;
1819
1820	KASSERT((src->scrub || dst->scrub),
1821	    ("%s: src->scrub && dst->scrub!", __func__));
1822
1823	/*
1824	 * Enforce the minimum TTL seen for this connection.  Negate a common
1825	 * technique to evade an intrusion detection system and confuse
1826	 * firewall state code.
1827	 */
1828	switch (pd->af) {
1829#ifdef INET
1830	case AF_INET: {
1831		if (src->scrub) {
1832			struct ip *h = mtod(m, struct ip *);
1833			if (h->ip_ttl > src->scrub->pfss_ttl)
1834				src->scrub->pfss_ttl = h->ip_ttl;
1835			h->ip_ttl = src->scrub->pfss_ttl;
1836		}
1837		break;
1838	}
1839#endif /* INET */
1840#ifdef INET6
1841	case AF_INET6: {
1842		if (src->scrub) {
1843			struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1844			if (h->ip6_hlim > src->scrub->pfss_ttl)
1845				src->scrub->pfss_ttl = h->ip6_hlim;
1846			h->ip6_hlim = src->scrub->pfss_ttl;
1847		}
1848		break;
1849	}
1850#endif /* INET6 */
1851	}
1852
1853	if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1854	    ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1855	    (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1856	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1857		/* Diddle with TCP options */
1858		int hlen;
1859		opt = hdr + sizeof(struct tcphdr);
1860		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1861		while (hlen >= TCPOLEN_TIMESTAMP) {
1862			switch (*opt) {
1863			case TCPOPT_EOL:	/* FALLTHROUGH */
1864			case TCPOPT_NOP:
1865				opt++;
1866				hlen--;
1867				break;
1868			case TCPOPT_TIMESTAMP:
1869				/* Modulate the timestamps.  Can be used for
1870				 * NAT detection, OS uptime determination or
1871				 * reboot detection.
1872				 */
1873
1874				if (got_ts) {
1875					/* Huh?  Multiple timestamps!? */
1876					if (V_pf_status.debug >= PF_DEBUG_MISC) {
1877						DPFPRINTF(("multiple TS??"));
1878						pf_print_state(state);
1879						printf("\n");
1880					}
1881					REASON_SET(reason, PFRES_TS);
1882					return (PF_DROP);
1883				}
1884				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1885					memcpy(&tsval, &opt[2],
1886					    sizeof(u_int32_t));
1887					if (tsval && src->scrub &&
1888					    (src->scrub->pfss_flags &
1889					    PFSS_TIMESTAMP)) {
1890						tsval = ntohl(tsval);
1891						pf_change_proto_a(m, &opt[2],
1892						    &th->th_sum,
1893						    htonl(tsval +
1894						    src->scrub->pfss_ts_mod),
1895						    0);
1896						copyback = 1;
1897					}
1898
1899					/* Modulate TS reply iff valid (!0) */
1900					memcpy(&tsecr, &opt[6],
1901					    sizeof(u_int32_t));
1902					if (tsecr && dst->scrub &&
1903					    (dst->scrub->pfss_flags &
1904					    PFSS_TIMESTAMP)) {
1905						tsecr = ntohl(tsecr)
1906						    - dst->scrub->pfss_ts_mod;
1907						pf_change_proto_a(m, &opt[6],
1908						    &th->th_sum, htonl(tsecr),
1909						    0);
1910						copyback = 1;
1911					}
1912					got_ts = 1;
1913				}
1914				/* FALLTHROUGH */
1915			default:
1916				hlen -= MAX(opt[1], 2);
1917				opt += MAX(opt[1], 2);
1918				break;
1919			}
1920		}
1921		if (copyback) {
1922			/* Copyback the options, caller copys back header */
1923			*writeback = 1;
1924			m_copyback(m, off + sizeof(struct tcphdr),
1925			    (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1926			    sizeof(struct tcphdr));
1927		}
1928	}
1929
1930
1931	/*
1932	 * Must invalidate PAWS checks on connections idle for too long.
1933	 * The fastest allowed timestamp clock is 1ms.  That turns out to
1934	 * be about 24 days before it wraps.  XXX Right now our lowerbound
1935	 * TS echo check only works for the first 12 days of a connection
1936	 * when the TS has exhausted half its 32bit space
1937	 */
1938#define TS_MAX_IDLE	(24*24*60*60)
1939#define TS_MAX_CONN	(12*24*60*60)	/* XXX remove when better tsecr check */
1940
1941	getmicrouptime(&uptime);
1942	if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1943	    (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1944	    time_uptime - state->creation > TS_MAX_CONN))  {
1945		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1946			DPFPRINTF(("src idled out of PAWS\n"));
1947			pf_print_state(state);
1948			printf("\n");
1949		}
1950		src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1951		    | PFSS_PAWS_IDLED;
1952	}
1953	if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1954	    uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1955		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1956			DPFPRINTF(("dst idled out of PAWS\n"));
1957			pf_print_state(state);
1958			printf("\n");
1959		}
1960		dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1961		    | PFSS_PAWS_IDLED;
1962	}
1963
1964	if (got_ts && src->scrub && dst->scrub &&
1965	    (src->scrub->pfss_flags & PFSS_PAWS) &&
1966	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
1967		/* Validate that the timestamps are "in-window".
1968		 * RFC1323 describes TCP Timestamp options that allow
1969		 * measurement of RTT (round trip time) and PAWS
1970		 * (protection against wrapped sequence numbers).  PAWS
1971		 * gives us a set of rules for rejecting packets on
1972		 * long fat pipes (packets that were somehow delayed
1973		 * in transit longer than the time it took to send the
1974		 * full TCP sequence space of 4Gb).  We can use these
1975		 * rules and infer a few others that will let us treat
1976		 * the 32bit timestamp and the 32bit echoed timestamp
1977		 * as sequence numbers to prevent a blind attacker from
1978		 * inserting packets into a connection.
1979		 *
1980		 * RFC1323 tells us:
1981		 *  - The timestamp on this packet must be greater than
1982		 *    or equal to the last value echoed by the other
1983		 *    endpoint.  The RFC says those will be discarded
1984		 *    since it is a dup that has already been acked.
1985		 *    This gives us a lowerbound on the timestamp.
1986		 *        timestamp >= other last echoed timestamp
1987		 *  - The timestamp will be less than or equal to
1988		 *    the last timestamp plus the time between the
1989		 *    last packet and now.  The RFC defines the max
1990		 *    clock rate as 1ms.  We will allow clocks to be
1991		 *    up to 10% fast and will allow a total difference
1992		 *    or 30 seconds due to a route change.  And this
1993		 *    gives us an upperbound on the timestamp.
1994		 *        timestamp <= last timestamp + max ticks
1995		 *    We have to be careful here.  Windows will send an
1996		 *    initial timestamp of zero and then initialize it
1997		 *    to a random value after the 3whs; presumably to
1998		 *    avoid a DoS by having to call an expensive RNG
1999		 *    during a SYN flood.  Proof MS has at least one
2000		 *    good security geek.
2001		 *
2002		 *  - The TCP timestamp option must also echo the other
2003		 *    endpoints timestamp.  The timestamp echoed is the
2004		 *    one carried on the earliest unacknowledged segment
2005		 *    on the left edge of the sequence window.  The RFC
2006		 *    states that the host will reject any echoed
2007		 *    timestamps that were larger than any ever sent.
2008		 *    This gives us an upperbound on the TS echo.
2009		 *        tescr <= largest_tsval
2010		 *  - The lowerbound on the TS echo is a little more
2011		 *    tricky to determine.  The other endpoint's echoed
2012		 *    values will not decrease.  But there may be
2013		 *    network conditions that re-order packets and
2014		 *    cause our view of them to decrease.  For now the
2015		 *    only lowerbound we can safely determine is that
2016		 *    the TS echo will never be less than the original
2017		 *    TS.  XXX There is probably a better lowerbound.
2018		 *    Remove TS_MAX_CONN with better lowerbound check.
2019		 *        tescr >= other original TS
2020		 *
2021		 * It is also important to note that the fastest
2022		 * timestamp clock of 1ms will wrap its 32bit space in
2023		 * 24 days.  So we just disable TS checking after 24
2024		 * days of idle time.  We actually must use a 12d
2025		 * connection limit until we can come up with a better
2026		 * lowerbound to the TS echo check.
2027		 */
2028		struct timeval delta_ts;
2029		int ts_fudge;
2030
2031
2032		/*
2033		 * PFTM_TS_DIFF is how many seconds of leeway to allow
2034		 * a host's timestamp.  This can happen if the previous
2035		 * packet got delayed in transit for much longer than
2036		 * this packet.
2037		 */
2038		if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
2039			ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
2040
2041		/* Calculate max ticks since the last timestamp */
2042#define TS_MAXFREQ	1100		/* RFC max TS freq of 1Khz + 10% skew */
2043#define TS_MICROSECS	1000000		/* microseconds per second */
2044		delta_ts = uptime;
2045		timevalsub(&delta_ts, &src->scrub->pfss_last);
2046		tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
2047		tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
2048
2049		if ((src->state >= TCPS_ESTABLISHED &&
2050		    dst->state >= TCPS_ESTABLISHED) &&
2051		    (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
2052		    SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
2053		    (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
2054		    SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
2055			/* Bad RFC1323 implementation or an insertion attack.
2056			 *
2057			 * - Solaris 2.6 and 2.7 are known to send another ACK
2058			 *   after the FIN,FIN|ACK,ACK closing that carries
2059			 *   an old timestamp.
2060			 */
2061
2062			DPFPRINTF(("Timestamp failed %c%c%c%c\n",
2063			    SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
2064			    SEQ_GT(tsval, src->scrub->pfss_tsval +
2065			    tsval_from_last) ? '1' : ' ',
2066			    SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
2067			    SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
2068			DPFPRINTF((" tsval: %u  tsecr: %u  +ticks: %u  "
2069			    "idle: %jus %lums\n",
2070			    tsval, tsecr, tsval_from_last,
2071			    (uintmax_t)delta_ts.tv_sec,
2072			    delta_ts.tv_usec / 1000));
2073			DPFPRINTF((" src->tsval: %u  tsecr: %u\n",
2074			    src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
2075			DPFPRINTF((" dst->tsval: %u  tsecr: %u  tsval0: %u"
2076			    "\n", dst->scrub->pfss_tsval,
2077			    dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
2078			if (V_pf_status.debug >= PF_DEBUG_MISC) {
2079				pf_print_state(state);
2080				pf_print_flags(th->th_flags);
2081				printf("\n");
2082			}
2083			REASON_SET(reason, PFRES_TS);
2084			return (PF_DROP);
2085		}
2086
2087		/* XXX I'd really like to require tsecr but it's optional */
2088
2089	} else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
2090	    ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
2091	    || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
2092	    src->scrub && dst->scrub &&
2093	    (src->scrub->pfss_flags & PFSS_PAWS) &&
2094	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
2095		/* Didn't send a timestamp.  Timestamps aren't really useful
2096		 * when:
2097		 *  - connection opening or closing (often not even sent).
2098		 *    but we must not let an attacker to put a FIN on a
2099		 *    data packet to sneak it through our ESTABLISHED check.
2100		 *  - on a TCP reset.  RFC suggests not even looking at TS.
2101		 *  - on an empty ACK.  The TS will not be echoed so it will
2102		 *    probably not help keep the RTT calculation in sync and
2103		 *    there isn't as much danger when the sequence numbers
2104		 *    got wrapped.  So some stacks don't include TS on empty
2105		 *    ACKs :-(
2106		 *
2107		 * To minimize the disruption to mostly RFC1323 conformant
2108		 * stacks, we will only require timestamps on data packets.
2109		 *
2110		 * And what do ya know, we cannot require timestamps on data
2111		 * packets.  There appear to be devices that do legitimate
2112		 * TCP connection hijacking.  There are HTTP devices that allow
2113		 * a 3whs (with timestamps) and then buffer the HTTP request.
2114		 * If the intermediate device has the HTTP response cache, it
2115		 * will spoof the response but not bother timestamping its
2116		 * packets.  So we can look for the presence of a timestamp in
2117		 * the first data packet and if there, require it in all future
2118		 * packets.
2119		 */
2120
2121		if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
2122			/*
2123			 * Hey!  Someone tried to sneak a packet in.  Or the
2124			 * stack changed its RFC1323 behavior?!?!
2125			 */
2126			if (V_pf_status.debug >= PF_DEBUG_MISC) {
2127				DPFPRINTF(("Did not receive expected RFC1323 "
2128				    "timestamp\n"));
2129				pf_print_state(state);
2130				pf_print_flags(th->th_flags);
2131				printf("\n");
2132			}
2133			REASON_SET(reason, PFRES_TS);
2134			return (PF_DROP);
2135		}
2136	}
2137
2138
2139	/*
2140	 * We will note if a host sends his data packets with or without
2141	 * timestamps.  And require all data packets to contain a timestamp
2142	 * if the first does.  PAWS implicitly requires that all data packets be
2143	 * timestamped.  But I think there are middle-man devices that hijack
2144	 * TCP streams immediately after the 3whs and don't timestamp their
2145	 * packets (seen in a WWW accelerator or cache).
2146	 */
2147	if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
2148	    (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
2149		if (got_ts)
2150			src->scrub->pfss_flags |= PFSS_DATA_TS;
2151		else {
2152			src->scrub->pfss_flags |= PFSS_DATA_NOTS;
2153			if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
2154			    (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
2155				/* Don't warn if other host rejected RFC1323 */
2156				DPFPRINTF(("Broken RFC1323 stack did not "
2157				    "timestamp data packet. Disabled PAWS "
2158				    "security.\n"));
2159				pf_print_state(state);
2160				pf_print_flags(th->th_flags);
2161				printf("\n");
2162			}
2163		}
2164	}
2165
2166
2167	/*
2168	 * Update PAWS values
2169	 */
2170	if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
2171	    (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
2172		getmicrouptime(&src->scrub->pfss_last);
2173		if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
2174		    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
2175			src->scrub->pfss_tsval = tsval;
2176
2177		if (tsecr) {
2178			if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
2179			    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
2180				src->scrub->pfss_tsecr = tsecr;
2181
2182			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
2183			    (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
2184			    src->scrub->pfss_tsval0 == 0)) {
2185				/* tsval0 MUST be the lowest timestamp */
2186				src->scrub->pfss_tsval0 = tsval;
2187			}
2188
2189			/* Only fully initialized after a TS gets echoed */
2190			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
2191				src->scrub->pfss_flags |= PFSS_PAWS;
2192		}
2193	}
2194
2195	/* I have a dream....  TCP segment reassembly.... */
2196	return (0);
2197}
2198
2199static int
2200pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
2201    int off, sa_family_t af)
2202{
2203	u_int16_t	*mss;
2204	int		 thoff;
2205	int		 opt, cnt, optlen = 0;
2206	int		 rewrite = 0;
2207	u_char		 opts[TCP_MAXOLEN];
2208	u_char		*optp = opts;
2209
2210	thoff = th->th_off << 2;
2211	cnt = thoff - sizeof(struct tcphdr);
2212
2213	if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
2214	    NULL, NULL, af))
2215		return (rewrite);
2216
2217	for (; cnt > 0; cnt -= optlen, optp += optlen) {
2218		opt = optp[0];
2219		if (opt == TCPOPT_EOL)
2220			break;
2221		if (opt == TCPOPT_NOP)
2222			optlen = 1;
2223		else {
2224			if (cnt < 2)
2225				break;
2226			optlen = optp[1];
2227			if (optlen < 2 || optlen > cnt)
2228				break;
2229		}
2230		switch (opt) {
2231		case TCPOPT_MAXSEG:
2232			mss = (u_int16_t *)(optp + 2);
2233			if ((ntohs(*mss)) > r->max_mss) {
2234				th->th_sum = pf_proto_cksum_fixup(m,
2235				    th->th_sum, *mss, htons(r->max_mss), 0);
2236				*mss = htons(r->max_mss);
2237				rewrite = 1;
2238			}
2239			break;
2240		default:
2241			break;
2242		}
2243	}
2244
2245	if (rewrite)
2246		m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
2247
2248	return (rewrite);
2249}
2250
2251#ifdef INET
2252static void
2253pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
2254{
2255	struct mbuf		*m = *m0;
2256	struct ip		*h = mtod(m, struct ip *);
2257
2258	/* Clear IP_DF if no-df was requested */
2259	if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
2260		u_int16_t ip_off = h->ip_off;
2261
2262		h->ip_off &= htons(~IP_DF);
2263		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2264	}
2265
2266	/* Enforce a minimum ttl, may cause endless packet loops */
2267	if (min_ttl && h->ip_ttl < min_ttl) {
2268		u_int16_t ip_ttl = h->ip_ttl;
2269
2270		h->ip_ttl = min_ttl;
2271		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2272	}
2273
2274	/* Enforce tos */
2275	if (flags & PFRULE_SET_TOS) {
2276		u_int16_t	ov, nv;
2277
2278		ov = *(u_int16_t *)h;
2279		h->ip_tos = tos;
2280		nv = *(u_int16_t *)h;
2281
2282		h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2283	}
2284
2285	/* random-id, but not for fragments */
2286	if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2287		u_int16_t ip_id = h->ip_id;
2288
2289		h->ip_id = ip_randomid();
2290		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2291	}
2292}
2293#endif /* INET */
2294
2295#ifdef INET6
2296static void
2297pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl)
2298{
2299	struct mbuf		*m = *m0;
2300	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
2301
2302	/* Enforce a minimum ttl, may cause endless packet loops */
2303	if (min_ttl && h->ip6_hlim < min_ttl)
2304		h->ip6_hlim = min_ttl;
2305}
2306#endif
2307