pf_norm.c revision 317333
1/*-
2 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
3 * Copyright 2011 Alexander Bluhm <bluhm@openbsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 *	$OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/netpfil/pf/pf_norm.c 317333 2017-04-23 08:58:50Z kp $");
31
32#include "opt_inet.h"
33#include "opt_inet6.h"
34#include "opt_pf.h"
35
36#include <sys/param.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/mbuf.h>
40#include <sys/mutex.h>
41#include <sys/refcount.h>
42#include <sys/rwlock.h>
43#include <sys/socket.h>
44
45#include <net/if.h>
46#include <net/vnet.h>
47#include <net/pfvar.h>
48#include <net/if_pflog.h>
49
50#include <netinet/in.h>
51#include <netinet/ip.h>
52#include <netinet/ip_var.h>
53#include <netinet6/ip6_var.h>
54#include <netinet/tcp.h>
55#include <netinet/tcp_fsm.h>
56#include <netinet/tcp_seq.h>
57
58#ifdef INET6
59#include <netinet/ip6.h>
60#endif /* INET6 */
61
62struct pf_frent {
63	TAILQ_ENTRY(pf_frent)	fr_next;
64	struct mbuf	*fe_m;
65	uint16_t	fe_hdrlen;	/* ipv4 header length with ip options
66					   ipv6, extension, fragment header */
67	uint16_t	fe_extoff;	/* last extension header offset or 0 */
68	uint16_t	fe_len;		/* fragment length */
69	uint16_t	fe_off;		/* fragment offset */
70	uint16_t	fe_mff;		/* more fragment flag */
71};
72
73struct pf_fragment_cmp {
74	struct pf_addr	frc_src;
75	struct pf_addr	frc_dst;
76	uint32_t	frc_id;
77	sa_family_t	frc_af;
78	uint8_t		frc_proto;
79};
80
81struct pf_fragment {
82	struct pf_fragment_cmp	fr_key;
83#define fr_src	fr_key.frc_src
84#define fr_dst	fr_key.frc_dst
85#define fr_id	fr_key.frc_id
86#define fr_af	fr_key.frc_af
87#define fr_proto	fr_key.frc_proto
88
89	RB_ENTRY(pf_fragment) fr_entry;
90	TAILQ_ENTRY(pf_fragment) frag_next;
91	uint32_t	fr_timeout;
92	uint16_t	fr_maxlen;	/* maximum length of single fragment */
93	TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
94};
95
96struct pf_fragment_tag {
97	uint16_t	ft_hdrlen;	/* header length of reassembled pkt */
98	uint16_t	ft_extoff;	/* last extension header offset or 0 */
99	uint16_t	ft_maxlen;	/* maximum fragment payload length */
100	uint32_t	ft_id;		/* fragment id */
101};
102
103static struct mtx pf_frag_mtx;
104MTX_SYSINIT(pf_frag_mtx, &pf_frag_mtx, "pf fragments", MTX_DEF);
105#define PF_FRAG_LOCK()		mtx_lock(&pf_frag_mtx)
106#define PF_FRAG_UNLOCK()	mtx_unlock(&pf_frag_mtx)
107#define PF_FRAG_ASSERT()	mtx_assert(&pf_frag_mtx, MA_OWNED)
108
109VNET_DEFINE(uma_zone_t, pf_state_scrub_z);	/* XXX: shared with pfsync */
110
111static VNET_DEFINE(uma_zone_t, pf_frent_z);
112#define	V_pf_frent_z	VNET(pf_frent_z)
113static VNET_DEFINE(uma_zone_t, pf_frag_z);
114#define	V_pf_frag_z	VNET(pf_frag_z)
115
116TAILQ_HEAD(pf_fragqueue, pf_fragment);
117TAILQ_HEAD(pf_cachequeue, pf_fragment);
118static VNET_DEFINE(struct pf_fragqueue,	pf_fragqueue);
119#define	V_pf_fragqueue			VNET(pf_fragqueue)
120RB_HEAD(pf_frag_tree, pf_fragment);
121static VNET_DEFINE(struct pf_frag_tree,	pf_frag_tree);
122#define	V_pf_frag_tree			VNET(pf_frag_tree)
123static int		 pf_frag_compare(struct pf_fragment *,
124			    struct pf_fragment *);
125static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
126static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
127
128static void	pf_flush_fragments(void);
129static void	pf_free_fragment(struct pf_fragment *);
130static void	pf_remove_fragment(struct pf_fragment *);
131static int	pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
132		    struct tcphdr *, int, sa_family_t);
133static struct pf_frent *pf_create_fragment(u_short *);
134static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
135		    struct pf_frag_tree *tree);
136static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
137		    struct pf_frent *, u_short *);
138static int	pf_isfull_fragment(struct pf_fragment *);
139static struct mbuf *pf_join_fragment(struct pf_fragment *);
140#ifdef INET
141static void	pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
142static int	pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
143#endif	/* INET */
144#ifdef INET6
145static int	pf_reassemble6(struct mbuf **, struct ip6_hdr *,
146		    struct ip6_frag *, uint16_t, uint16_t, u_short *);
147static void	pf_scrub_ip6(struct mbuf **, uint8_t);
148#endif	/* INET6 */
149
150#define	DPFPRINTF(x) do {				\
151	if (V_pf_status.debug >= PF_DEBUG_MISC) {	\
152		printf("%s: ", __func__);		\
153		printf x ;				\
154	}						\
155} while(0)
156
157#ifdef INET
158static void
159pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
160{
161
162	key->frc_src.v4 = ip->ip_src;
163	key->frc_dst.v4 = ip->ip_dst;
164	key->frc_af = AF_INET;
165	key->frc_proto = ip->ip_p;
166	key->frc_id = ip->ip_id;
167}
168#endif	/* INET */
169
170void
171pf_normalize_init(void)
172{
173
174	V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
175	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
176	V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
177	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
178	V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
179	    sizeof(struct pf_state_scrub),  NULL, NULL, NULL, NULL,
180	    UMA_ALIGN_PTR, 0);
181
182	V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
183	V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
184	uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
185	uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
186
187	TAILQ_INIT(&V_pf_fragqueue);
188}
189
190void
191pf_normalize_cleanup(void)
192{
193
194	uma_zdestroy(V_pf_state_scrub_z);
195	uma_zdestroy(V_pf_frent_z);
196	uma_zdestroy(V_pf_frag_z);
197}
198
199static int
200pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
201{
202	int	diff;
203
204	if ((diff = a->fr_id - b->fr_id) != 0)
205		return (diff);
206	if ((diff = a->fr_proto - b->fr_proto) != 0)
207		return (diff);
208	if ((diff = a->fr_af - b->fr_af) != 0)
209		return (diff);
210	if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
211		return (diff);
212	if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
213		return (diff);
214	return (0);
215}
216
217void
218pf_purge_expired_fragments(void)
219{
220	struct pf_fragment	*frag;
221	u_int32_t		 expire = time_uptime -
222				    V_pf_default_rule.timeout[PFTM_FRAG];
223
224	PF_FRAG_LOCK();
225	while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
226		if (frag->fr_timeout > expire)
227			break;
228
229		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
230		pf_free_fragment(frag);
231	}
232
233	PF_FRAG_UNLOCK();
234}
235
236/*
237 * Try to flush old fragments to make space for new ones
238 */
239static void
240pf_flush_fragments(void)
241{
242	struct pf_fragment	*frag;
243	int			 goal;
244
245	PF_FRAG_ASSERT();
246
247	goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
248	DPFPRINTF(("trying to free %d frag entriess\n", goal));
249	while (goal < uma_zone_get_cur(V_pf_frent_z)) {
250		frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
251		if (frag)
252			pf_free_fragment(frag);
253		else
254			break;
255	}
256}
257
258/* Frees the fragments and all associated entries */
259static void
260pf_free_fragment(struct pf_fragment *frag)
261{
262	struct pf_frent		*frent;
263
264	PF_FRAG_ASSERT();
265
266	/* Free all fragments */
267	for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
268	    frent = TAILQ_FIRST(&frag->fr_queue)) {
269		TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
270
271		m_freem(frent->fe_m);
272		uma_zfree(V_pf_frent_z, frent);
273	}
274
275	pf_remove_fragment(frag);
276}
277
278static struct pf_fragment *
279pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
280{
281	struct pf_fragment	*frag;
282
283	PF_FRAG_ASSERT();
284
285	frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
286	if (frag != NULL) {
287		/* XXX Are we sure we want to update the timeout? */
288		frag->fr_timeout = time_uptime;
289		TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
290		TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
291	}
292
293	return (frag);
294}
295
296/* Removes a fragment from the fragment queue and frees the fragment */
297static void
298pf_remove_fragment(struct pf_fragment *frag)
299{
300
301	PF_FRAG_ASSERT();
302
303	RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
304	TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
305	uma_zfree(V_pf_frag_z, frag);
306}
307
308static struct pf_frent *
309pf_create_fragment(u_short *reason)
310{
311	struct pf_frent *frent;
312
313	PF_FRAG_ASSERT();
314
315	frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
316	if (frent == NULL) {
317		pf_flush_fragments();
318		frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
319		if (frent == NULL) {
320			REASON_SET(reason, PFRES_MEMORY);
321			return (NULL);
322		}
323	}
324
325	return (frent);
326}
327
328static struct pf_fragment *
329pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
330		u_short *reason)
331{
332	struct pf_frent		*after, *next, *prev;
333	struct pf_fragment	*frag;
334	uint16_t		total;
335
336	PF_FRAG_ASSERT();
337
338	/* No empty fragments. */
339	if (frent->fe_len == 0) {
340		DPFPRINTF(("bad fragment: len 0"));
341		goto bad_fragment;
342	}
343
344	/* All fragments are 8 byte aligned. */
345	if (frent->fe_mff && (frent->fe_len & 0x7)) {
346		DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len));
347		goto bad_fragment;
348	}
349
350	/* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
351	if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
352		DPFPRINTF(("bad fragment: max packet %d",
353		    frent->fe_off + frent->fe_len));
354		goto bad_fragment;
355	}
356
357	DPFPRINTF((key->frc_af == AF_INET ?
358	    "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d",
359	    key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
360
361	/* Fully buffer all of the fragments in this fragment queue. */
362	frag = pf_find_fragment(key, &V_pf_frag_tree);
363
364	/* Create a new reassembly queue for this packet. */
365	if (frag == NULL) {
366		frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
367		if (frag == NULL) {
368			pf_flush_fragments();
369			frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
370			if (frag == NULL) {
371				REASON_SET(reason, PFRES_MEMORY);
372				goto drop_fragment;
373			}
374		}
375
376		*(struct pf_fragment_cmp *)frag = *key;
377		frag->fr_timeout = time_uptime;
378		frag->fr_maxlen = frent->fe_len;
379		TAILQ_INIT(&frag->fr_queue);
380
381		RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
382		TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
383
384		/* We do not have a previous fragment. */
385		TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
386
387		return (frag);
388	}
389
390	KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
391
392	/* Remember maximum fragment len for refragmentation. */
393	if (frent->fe_len > frag->fr_maxlen)
394		frag->fr_maxlen = frent->fe_len;
395
396	/* Maximum data we have seen already. */
397	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
398		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
399
400	/* Non terminal fragments must have more fragments flag. */
401	if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
402		goto bad_fragment;
403
404	/* Check if we saw the last fragment already. */
405	if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
406		if (frent->fe_off + frent->fe_len > total ||
407		    (frent->fe_off + frent->fe_len == total && frent->fe_mff))
408			goto bad_fragment;
409	} else {
410		if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
411			goto bad_fragment;
412	}
413
414	/* Find a fragment after the current one. */
415	prev = NULL;
416	TAILQ_FOREACH(after, &frag->fr_queue, fr_next) {
417		if (after->fe_off > frent->fe_off)
418			break;
419		prev = after;
420	}
421
422	KASSERT(prev != NULL || after != NULL,
423	    ("prev != NULL || after != NULL"));
424
425	if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
426		uint16_t precut;
427
428		precut = prev->fe_off + prev->fe_len - frent->fe_off;
429		if (precut >= frent->fe_len)
430			goto bad_fragment;
431		DPFPRINTF(("overlap -%d", precut));
432		m_adj(frent->fe_m, precut);
433		frent->fe_off += precut;
434		frent->fe_len -= precut;
435	}
436
437	for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
438	    after = next) {
439		uint16_t aftercut;
440
441		aftercut = frent->fe_off + frent->fe_len - after->fe_off;
442		DPFPRINTF(("adjust overlap %d", aftercut));
443		if (aftercut < after->fe_len) {
444			m_adj(after->fe_m, aftercut);
445			after->fe_off += aftercut;
446			after->fe_len -= aftercut;
447			break;
448		}
449
450		/* This fragment is completely overlapped, lose it. */
451		next = TAILQ_NEXT(after, fr_next);
452		m_freem(after->fe_m);
453		TAILQ_REMOVE(&frag->fr_queue, after, fr_next);
454		uma_zfree(V_pf_frent_z, after);
455	}
456
457	if (prev == NULL)
458		TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
459	else
460		TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
461
462	return (frag);
463
464bad_fragment:
465	REASON_SET(reason, PFRES_FRAG);
466drop_fragment:
467	uma_zfree(V_pf_frent_z, frent);
468	return (NULL);
469}
470
471static int
472pf_isfull_fragment(struct pf_fragment *frag)
473{
474	struct pf_frent	*frent, *next;
475	uint16_t off, total;
476
477	/* Check if we are completely reassembled */
478	if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff)
479		return (0);
480
481	/* Maximum data we have seen already */
482	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
483		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
484
485	/* Check if we have all the data */
486	off = 0;
487	for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) {
488		next = TAILQ_NEXT(frent, fr_next);
489
490		off += frent->fe_len;
491		if (off < total && (next == NULL || next->fe_off != off)) {
492			DPFPRINTF(("missing fragment at %d, next %d, total %d",
493			    off, next == NULL ? -1 : next->fe_off, total));
494			return (0);
495		}
496	}
497	DPFPRINTF(("%d < %d?", off, total));
498	if (off < total)
499		return (0);
500	KASSERT(off == total, ("off == total"));
501
502	return (1);
503}
504
505static struct mbuf *
506pf_join_fragment(struct pf_fragment *frag)
507{
508	struct mbuf *m, *m2;
509	struct pf_frent	*frent, *next;
510
511	frent = TAILQ_FIRST(&frag->fr_queue);
512	next = TAILQ_NEXT(frent, fr_next);
513
514	m = frent->fe_m;
515	m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
516	uma_zfree(V_pf_frent_z, frent);
517	for (frent = next; frent != NULL; frent = next) {
518		next = TAILQ_NEXT(frent, fr_next);
519
520		m2 = frent->fe_m;
521		/* Strip off ip header. */
522		m_adj(m2, frent->fe_hdrlen);
523		/* Strip off any trailing bytes. */
524		m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
525
526		uma_zfree(V_pf_frent_z, frent);
527		m_cat(m, m2);
528	}
529
530	/* Remove from fragment queue. */
531	pf_remove_fragment(frag);
532
533	return (m);
534}
535
536#ifdef INET
537static int
538pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
539{
540	struct mbuf		*m = *m0;
541	struct pf_frent		*frent;
542	struct pf_fragment	*frag;
543	struct pf_fragment_cmp	key;
544	uint16_t		total, hdrlen;
545
546	/* Get an entry for the fragment queue */
547	if ((frent = pf_create_fragment(reason)) == NULL)
548		return (PF_DROP);
549
550	frent->fe_m = m;
551	frent->fe_hdrlen = ip->ip_hl << 2;
552	frent->fe_extoff = 0;
553	frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
554	frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
555	frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
556
557	pf_ip2key(ip, dir, &key);
558
559	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
560		return (PF_DROP);
561
562	/* The mbuf is part of the fragment entry, no direct free or access */
563	m = *m0 = NULL;
564
565	if (!pf_isfull_fragment(frag))
566		return (PF_PASS);  /* drop because *m0 is NULL, no error */
567
568	/* We have all the data */
569	frent = TAILQ_FIRST(&frag->fr_queue);
570	KASSERT(frent != NULL, ("frent != NULL"));
571	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
572		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
573	hdrlen = frent->fe_hdrlen;
574
575	m = *m0 = pf_join_fragment(frag);
576	frag = NULL;
577
578	if (m->m_flags & M_PKTHDR) {
579		int plen = 0;
580		for (m = *m0; m; m = m->m_next)
581			plen += m->m_len;
582		m = *m0;
583		m->m_pkthdr.len = plen;
584	}
585
586	ip = mtod(m, struct ip *);
587	ip->ip_len = htons(hdrlen + total);
588	ip->ip_off &= ~(IP_MF|IP_OFFMASK);
589
590	if (hdrlen + total > IP_MAXPACKET) {
591		DPFPRINTF(("drop: too big: %d", total));
592		ip->ip_len = 0;
593		REASON_SET(reason, PFRES_SHORT);
594		/* PF_DROP requires a valid mbuf *m0 in pf_test() */
595		return (PF_DROP);
596	}
597
598	DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
599	return (PF_PASS);
600}
601#endif	/* INET */
602
603#ifdef INET6
604static int
605pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
606    uint16_t hdrlen, uint16_t extoff, u_short *reason)
607{
608	struct mbuf		*m = *m0;
609	struct pf_frent		*frent;
610	struct pf_fragment	*frag;
611	struct pf_fragment_cmp	 key;
612	struct m_tag		*mtag;
613	struct pf_fragment_tag	*ftag;
614	int			 off;
615	uint32_t		 frag_id;
616	uint16_t		 total, maxlen;
617	uint8_t			 proto;
618
619	PF_FRAG_LOCK();
620
621	/* Get an entry for the fragment queue. */
622	if ((frent = pf_create_fragment(reason)) == NULL) {
623		PF_FRAG_UNLOCK();
624		return (PF_DROP);
625	}
626
627	frent->fe_m = m;
628	frent->fe_hdrlen = hdrlen;
629	frent->fe_extoff = extoff;
630	frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
631	frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
632	frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
633
634	key.frc_src.v6 = ip6->ip6_src;
635	key.frc_dst.v6 = ip6->ip6_dst;
636	key.frc_af = AF_INET6;
637	/* Only the first fragment's protocol is relevant. */
638	key.frc_proto = 0;
639	key.frc_id = fraghdr->ip6f_ident;
640
641	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
642		PF_FRAG_UNLOCK();
643		return (PF_DROP);
644	}
645
646	/* The mbuf is part of the fragment entry, no direct free or access. */
647	m = *m0 = NULL;
648
649	if (!pf_isfull_fragment(frag)) {
650		PF_FRAG_UNLOCK();
651		return (PF_PASS);  /* Drop because *m0 is NULL, no error. */
652	}
653
654	/* We have all the data. */
655	extoff = frent->fe_extoff;
656	maxlen = frag->fr_maxlen;
657	frag_id = frag->fr_id;
658	frent = TAILQ_FIRST(&frag->fr_queue);
659	KASSERT(frent != NULL, ("frent != NULL"));
660	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
661		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
662	hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
663
664	m = *m0 = pf_join_fragment(frag);
665	frag = NULL;
666
667	PF_FRAG_UNLOCK();
668
669	/* Take protocol from first fragment header. */
670	m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
671	KASSERT(m, ("%s: short mbuf chain", __func__));
672	proto = *(mtod(m, caddr_t) + off);
673	m = *m0;
674
675	/* Delete frag6 header */
676	if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
677		goto fail;
678
679	if (m->m_flags & M_PKTHDR) {
680		int plen = 0;
681		for (m = *m0; m; m = m->m_next)
682			plen += m->m_len;
683		m = *m0;
684		m->m_pkthdr.len = plen;
685	}
686
687	if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag),
688	    M_NOWAIT)) == NULL)
689		goto fail;
690	ftag = (struct pf_fragment_tag *)(mtag + 1);
691	ftag->ft_hdrlen = hdrlen;
692	ftag->ft_extoff = extoff;
693	ftag->ft_maxlen = maxlen;
694	ftag->ft_id = frag_id;
695	m_tag_prepend(m, mtag);
696
697	ip6 = mtod(m, struct ip6_hdr *);
698	ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
699	if (extoff) {
700		/* Write protocol into next field of last extension header. */
701		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
702		    &off);
703		KASSERT(m, ("%s: short mbuf chain", __func__));
704		*(mtod(m, char *) + off) = proto;
705		m = *m0;
706	} else
707		ip6->ip6_nxt = proto;
708
709	if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
710		DPFPRINTF(("drop: too big: %d", total));
711		ip6->ip6_plen = 0;
712		REASON_SET(reason, PFRES_SHORT);
713		/* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
714		return (PF_DROP);
715	}
716
717	DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen)));
718	return (PF_PASS);
719
720fail:
721	REASON_SET(reason, PFRES_MEMORY);
722	/* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
723	return (PF_DROP);
724}
725#endif	/* INET6 */
726
727#ifdef INET6
728int
729pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
730{
731	struct mbuf		*m = *m0, *t;
732	struct pf_fragment_tag	*ftag = (struct pf_fragment_tag *)(mtag + 1);
733	struct pf_pdesc		 pd;
734	uint32_t		 frag_id;
735	uint16_t		 hdrlen, extoff, maxlen;
736	uint8_t			 proto;
737	int			 error, action;
738
739	hdrlen = ftag->ft_hdrlen;
740	extoff = ftag->ft_extoff;
741	maxlen = ftag->ft_maxlen;
742	frag_id = ftag->ft_id;
743	m_tag_delete(m, mtag);
744	mtag = NULL;
745	ftag = NULL;
746
747	if (extoff) {
748		int off;
749
750		/* Use protocol from next field of last extension header */
751		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
752		    &off);
753		KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
754		proto = *(mtod(m, caddr_t) + off);
755		*(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
756		m = *m0;
757	} else {
758		struct ip6_hdr *hdr;
759
760		hdr = mtod(m, struct ip6_hdr *);
761		proto = hdr->ip6_nxt;
762		hdr->ip6_nxt = IPPROTO_FRAGMENT;
763	}
764
765	/* The MTU must be a multiple of 8 bytes, or we risk doing the
766	 * fragmentation wrong. */
767	maxlen = maxlen & ~7;
768
769	/*
770	 * Maxlen may be less than 8 if there was only a single
771	 * fragment.  As it was fragmented before, add a fragment
772	 * header also for a single fragment.  If total or maxlen
773	 * is less than 8, ip6_fragment() will return EMSGSIZE and
774	 * we drop the packet.
775	 */
776	error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
777	m = (*m0)->m_nextpkt;
778	(*m0)->m_nextpkt = NULL;
779	if (error == 0) {
780		/* The first mbuf contains the unfragmented packet. */
781		m_freem(*m0);
782		*m0 = NULL;
783		action = PF_PASS;
784	} else {
785		/* Drop expects an mbuf to free. */
786		DPFPRINTF(("refragment error %d", error));
787		action = PF_DROP;
788	}
789	for (t = m; m; m = t) {
790		t = m->m_nextpkt;
791		m->m_nextpkt = NULL;
792		m->m_flags |= M_SKIP_FIREWALL;
793		memset(&pd, 0, sizeof(pd));
794		pd.pf_mtag = pf_find_mtag(m);
795		if (error == 0)
796			ip6_forward(m, 0);
797		else
798			m_freem(m);
799	}
800
801	return (action);
802}
803#endif /* INET6 */
804
805#ifdef INET
806int
807pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
808    struct pf_pdesc *pd)
809{
810	struct mbuf		*m = *m0;
811	struct pf_rule		*r;
812	struct ip		*h = mtod(m, struct ip *);
813	int			 mff = (ntohs(h->ip_off) & IP_MF);
814	int			 hlen = h->ip_hl << 2;
815	u_int16_t		 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
816	u_int16_t		 max;
817	int			 ip_len;
818	int			 ip_off;
819	int			 tag = -1;
820	int			 verdict;
821
822	PF_RULES_RASSERT();
823
824	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
825	while (r != NULL) {
826		r->evaluations++;
827		if (pfi_kif_match(r->kif, kif) == r->ifnot)
828			r = r->skip[PF_SKIP_IFP].ptr;
829		else if (r->direction && r->direction != dir)
830			r = r->skip[PF_SKIP_DIR].ptr;
831		else if (r->af && r->af != AF_INET)
832			r = r->skip[PF_SKIP_AF].ptr;
833		else if (r->proto && r->proto != h->ip_p)
834			r = r->skip[PF_SKIP_PROTO].ptr;
835		else if (PF_MISMATCHAW(&r->src.addr,
836		    (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
837		    r->src.neg, kif, M_GETFIB(m)))
838			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
839		else if (PF_MISMATCHAW(&r->dst.addr,
840		    (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
841		    r->dst.neg, NULL, M_GETFIB(m)))
842			r = r->skip[PF_SKIP_DST_ADDR].ptr;
843		else if (r->match_tag && !pf_match_tag(m, r, &tag,
844		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
845			r = TAILQ_NEXT(r, entries);
846		else
847			break;
848	}
849
850	if (r == NULL || r->action == PF_NOSCRUB)
851		return (PF_PASS);
852	else {
853		r->packets[dir == PF_OUT]++;
854		r->bytes[dir == PF_OUT] += pd->tot_len;
855	}
856
857	/* Check for illegal packets */
858	if (hlen < (int)sizeof(struct ip)) {
859		REASON_SET(reason, PFRES_NORM);
860		goto drop;
861	}
862
863	if (hlen > ntohs(h->ip_len)) {
864		REASON_SET(reason, PFRES_NORM);
865		goto drop;
866	}
867
868	/* Clear IP_DF if the rule uses the no-df option */
869	if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
870		u_int16_t ip_off = h->ip_off;
871
872		h->ip_off &= htons(~IP_DF);
873		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
874	}
875
876	/* We will need other tests here */
877	if (!fragoff && !mff)
878		goto no_fragment;
879
880	/* We're dealing with a fragment now. Don't allow fragments
881	 * with IP_DF to enter the cache. If the flag was cleared by
882	 * no-df above, fine. Otherwise drop it.
883	 */
884	if (h->ip_off & htons(IP_DF)) {
885		DPFPRINTF(("IP_DF\n"));
886		goto bad;
887	}
888
889	ip_len = ntohs(h->ip_len) - hlen;
890	ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
891
892	/* All fragments are 8 byte aligned */
893	if (mff && (ip_len & 0x7)) {
894		DPFPRINTF(("mff and %d\n", ip_len));
895		goto bad;
896	}
897
898	/* Respect maximum length */
899	if (fragoff + ip_len > IP_MAXPACKET) {
900		DPFPRINTF(("max packet %d\n", fragoff + ip_len));
901		goto bad;
902	}
903	max = fragoff + ip_len;
904
905	/* Fully buffer all of the fragments
906	 * Might return a completely reassembled mbuf, or NULL */
907	PF_FRAG_LOCK();
908	DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
909	verdict = pf_reassemble(m0, h, dir, reason);
910	PF_FRAG_UNLOCK();
911
912	if (verdict != PF_PASS)
913		return (PF_DROP);
914
915	m = *m0;
916	if (m == NULL)
917		return (PF_DROP);
918
919	h = mtod(m, struct ip *);
920
921 no_fragment:
922	/* At this point, only IP_DF is allowed in ip_off */
923	if (h->ip_off & ~htons(IP_DF)) {
924		u_int16_t ip_off = h->ip_off;
925
926		h->ip_off &= htons(IP_DF);
927		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
928	}
929
930	pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
931
932	return (PF_PASS);
933
934 bad:
935	DPFPRINTF(("dropping bad fragment\n"));
936	REASON_SET(reason, PFRES_FRAG);
937 drop:
938	if (r != NULL && r->log)
939		PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
940		    1);
941
942	return (PF_DROP);
943}
944#endif
945
946#ifdef INET6
947int
948pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
949    u_short *reason, struct pf_pdesc *pd)
950{
951	struct mbuf		*m = *m0;
952	struct pf_rule		*r;
953	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
954	int			 extoff;
955	int			 off;
956	struct ip6_ext		 ext;
957	struct ip6_opt		 opt;
958	struct ip6_opt_jumbo	 jumbo;
959	struct ip6_frag		 frag;
960	u_int32_t		 jumbolen = 0, plen;
961	int			 optend;
962	int			 ooff;
963	u_int8_t		 proto;
964	int			 terminal;
965
966	PF_RULES_RASSERT();
967
968	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
969	while (r != NULL) {
970		r->evaluations++;
971		if (pfi_kif_match(r->kif, kif) == r->ifnot)
972			r = r->skip[PF_SKIP_IFP].ptr;
973		else if (r->direction && r->direction != dir)
974			r = r->skip[PF_SKIP_DIR].ptr;
975		else if (r->af && r->af != AF_INET6)
976			r = r->skip[PF_SKIP_AF].ptr;
977#if 0 /* header chain! */
978		else if (r->proto && r->proto != h->ip6_nxt)
979			r = r->skip[PF_SKIP_PROTO].ptr;
980#endif
981		else if (PF_MISMATCHAW(&r->src.addr,
982		    (struct pf_addr *)&h->ip6_src, AF_INET6,
983		    r->src.neg, kif, M_GETFIB(m)))
984			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
985		else if (PF_MISMATCHAW(&r->dst.addr,
986		    (struct pf_addr *)&h->ip6_dst, AF_INET6,
987		    r->dst.neg, NULL, M_GETFIB(m)))
988			r = r->skip[PF_SKIP_DST_ADDR].ptr;
989		else
990			break;
991	}
992
993	if (r == NULL || r->action == PF_NOSCRUB)
994		return (PF_PASS);
995	else {
996		r->packets[dir == PF_OUT]++;
997		r->bytes[dir == PF_OUT] += pd->tot_len;
998	}
999
1000	/* Check for illegal packets */
1001	if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1002		goto drop;
1003
1004	extoff = 0;
1005	off = sizeof(struct ip6_hdr);
1006	proto = h->ip6_nxt;
1007	terminal = 0;
1008	do {
1009		switch (proto) {
1010		case IPPROTO_FRAGMENT:
1011			goto fragment;
1012			break;
1013		case IPPROTO_AH:
1014		case IPPROTO_ROUTING:
1015		case IPPROTO_DSTOPTS:
1016			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1017			    NULL, AF_INET6))
1018				goto shortpkt;
1019			extoff = off;
1020			if (proto == IPPROTO_AH)
1021				off += (ext.ip6e_len + 2) * 4;
1022			else
1023				off += (ext.ip6e_len + 1) * 8;
1024			proto = ext.ip6e_nxt;
1025			break;
1026		case IPPROTO_HOPOPTS:
1027			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1028			    NULL, AF_INET6))
1029				goto shortpkt;
1030			extoff = off;
1031			optend = off + (ext.ip6e_len + 1) * 8;
1032			ooff = off + sizeof(ext);
1033			do {
1034				if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1035				    sizeof(opt.ip6o_type), NULL, NULL,
1036				    AF_INET6))
1037					goto shortpkt;
1038				if (opt.ip6o_type == IP6OPT_PAD1) {
1039					ooff++;
1040					continue;
1041				}
1042				if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1043				    NULL, NULL, AF_INET6))
1044					goto shortpkt;
1045				if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1046					goto drop;
1047				switch (opt.ip6o_type) {
1048				case IP6OPT_JUMBO:
1049					if (h->ip6_plen != 0)
1050						goto drop;
1051					if (!pf_pull_hdr(m, ooff, &jumbo,
1052					    sizeof(jumbo), NULL, NULL,
1053					    AF_INET6))
1054						goto shortpkt;
1055					memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1056					    sizeof(jumbolen));
1057					jumbolen = ntohl(jumbolen);
1058					if (jumbolen <= IPV6_MAXPACKET)
1059						goto drop;
1060					if (sizeof(struct ip6_hdr) + jumbolen !=
1061					    m->m_pkthdr.len)
1062						goto drop;
1063					break;
1064				default:
1065					break;
1066				}
1067				ooff += sizeof(opt) + opt.ip6o_len;
1068			} while (ooff < optend);
1069
1070			off = optend;
1071			proto = ext.ip6e_nxt;
1072			break;
1073		default:
1074			terminal = 1;
1075			break;
1076		}
1077	} while (!terminal);
1078
1079	/* jumbo payload option must be present, or plen > 0 */
1080	if (ntohs(h->ip6_plen) == 0)
1081		plen = jumbolen;
1082	else
1083		plen = ntohs(h->ip6_plen);
1084	if (plen == 0)
1085		goto drop;
1086	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1087		goto shortpkt;
1088
1089	pf_scrub_ip6(&m, r->min_ttl);
1090
1091	return (PF_PASS);
1092
1093 fragment:
1094	/* Jumbo payload packets cannot be fragmented. */
1095	plen = ntohs(h->ip6_plen);
1096	if (plen == 0 || jumbolen)
1097		goto drop;
1098	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1099		goto shortpkt;
1100
1101	if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1102		goto shortpkt;
1103
1104	/* Offset now points to data portion. */
1105	off += sizeof(frag);
1106
1107	/* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1108	if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1109		return (PF_DROP);
1110	m = *m0;
1111	if (m == NULL)
1112		return (PF_DROP);
1113
1114	pd->flags |= PFDESC_IP_REAS;
1115	return (PF_PASS);
1116
1117 shortpkt:
1118	REASON_SET(reason, PFRES_SHORT);
1119	if (r != NULL && r->log)
1120		PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1121		    1);
1122	return (PF_DROP);
1123
1124 drop:
1125	REASON_SET(reason, PFRES_NORM);
1126	if (r != NULL && r->log)
1127		PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1128		    1);
1129	return (PF_DROP);
1130}
1131#endif /* INET6 */
1132
1133int
1134pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1135    int off, void *h, struct pf_pdesc *pd)
1136{
1137	struct pf_rule	*r, *rm = NULL;
1138	struct tcphdr	*th = pd->hdr.tcp;
1139	int		 rewrite = 0;
1140	u_short		 reason;
1141	u_int8_t	 flags;
1142	sa_family_t	 af = pd->af;
1143
1144	PF_RULES_RASSERT();
1145
1146	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1147	while (r != NULL) {
1148		r->evaluations++;
1149		if (pfi_kif_match(r->kif, kif) == r->ifnot)
1150			r = r->skip[PF_SKIP_IFP].ptr;
1151		else if (r->direction && r->direction != dir)
1152			r = r->skip[PF_SKIP_DIR].ptr;
1153		else if (r->af && r->af != af)
1154			r = r->skip[PF_SKIP_AF].ptr;
1155		else if (r->proto && r->proto != pd->proto)
1156			r = r->skip[PF_SKIP_PROTO].ptr;
1157		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1158		    r->src.neg, kif, M_GETFIB(m)))
1159			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1160		else if (r->src.port_op && !pf_match_port(r->src.port_op,
1161			    r->src.port[0], r->src.port[1], th->th_sport))
1162			r = r->skip[PF_SKIP_SRC_PORT].ptr;
1163		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1164		    r->dst.neg, NULL, M_GETFIB(m)))
1165			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1166		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1167			    r->dst.port[0], r->dst.port[1], th->th_dport))
1168			r = r->skip[PF_SKIP_DST_PORT].ptr;
1169		else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1170			    pf_osfp_fingerprint(pd, m, off, th),
1171			    r->os_fingerprint))
1172			r = TAILQ_NEXT(r, entries);
1173		else {
1174			rm = r;
1175			break;
1176		}
1177	}
1178
1179	if (rm == NULL || rm->action == PF_NOSCRUB)
1180		return (PF_PASS);
1181	else {
1182		r->packets[dir == PF_OUT]++;
1183		r->bytes[dir == PF_OUT] += pd->tot_len;
1184	}
1185
1186	if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1187		pd->flags |= PFDESC_TCP_NORM;
1188
1189	flags = th->th_flags;
1190	if (flags & TH_SYN) {
1191		/* Illegal packet */
1192		if (flags & TH_RST)
1193			goto tcp_drop;
1194
1195		if (flags & TH_FIN)
1196			goto tcp_drop;
1197	} else {
1198		/* Illegal packet */
1199		if (!(flags & (TH_ACK|TH_RST)))
1200			goto tcp_drop;
1201	}
1202
1203	if (!(flags & TH_ACK)) {
1204		/* These flags are only valid if ACK is set */
1205		if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1206			goto tcp_drop;
1207	}
1208
1209	/* Check for illegal header length */
1210	if (th->th_off < (sizeof(struct tcphdr) >> 2))
1211		goto tcp_drop;
1212
1213	/* If flags changed, or reserved data set, then adjust */
1214	if (flags != th->th_flags || th->th_x2 != 0) {
1215		u_int16_t	ov, nv;
1216
1217		ov = *(u_int16_t *)(&th->th_ack + 1);
1218		th->th_flags = flags;
1219		th->th_x2 = 0;
1220		nv = *(u_int16_t *)(&th->th_ack + 1);
1221
1222		th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1223		rewrite = 1;
1224	}
1225
1226	/* Remove urgent pointer, if TH_URG is not set */
1227	if (!(flags & TH_URG) && th->th_urp) {
1228		th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1229		    0, 0);
1230		th->th_urp = 0;
1231		rewrite = 1;
1232	}
1233
1234	/* Process options */
1235	if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1236		rewrite = 1;
1237
1238	/* copy back packet headers if we sanitized */
1239	if (rewrite)
1240		m_copyback(m, off, sizeof(*th), (caddr_t)th);
1241
1242	return (PF_PASS);
1243
1244 tcp_drop:
1245	REASON_SET(&reason, PFRES_NORM);
1246	if (rm != NULL && r->log)
1247		PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd,
1248		    1);
1249	return (PF_DROP);
1250}
1251
1252int
1253pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1254    struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1255{
1256	u_int32_t tsval, tsecr;
1257	u_int8_t hdr[60];
1258	u_int8_t *opt;
1259
1260	KASSERT((src->scrub == NULL),
1261	    ("pf_normalize_tcp_init: src->scrub != NULL"));
1262
1263	src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1264	if (src->scrub == NULL)
1265		return (1);
1266
1267	switch (pd->af) {
1268#ifdef INET
1269	case AF_INET: {
1270		struct ip *h = mtod(m, struct ip *);
1271		src->scrub->pfss_ttl = h->ip_ttl;
1272		break;
1273	}
1274#endif /* INET */
1275#ifdef INET6
1276	case AF_INET6: {
1277		struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1278		src->scrub->pfss_ttl = h->ip6_hlim;
1279		break;
1280	}
1281#endif /* INET6 */
1282	}
1283
1284
1285	/*
1286	 * All normalizations below are only begun if we see the start of
1287	 * the connections.  They must all set an enabled bit in pfss_flags
1288	 */
1289	if ((th->th_flags & TH_SYN) == 0)
1290		return (0);
1291
1292
1293	if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1294	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1295		/* Diddle with TCP options */
1296		int hlen;
1297		opt = hdr + sizeof(struct tcphdr);
1298		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1299		while (hlen >= TCPOLEN_TIMESTAMP) {
1300			switch (*opt) {
1301			case TCPOPT_EOL:	/* FALLTHROUGH */
1302			case TCPOPT_NOP:
1303				opt++;
1304				hlen--;
1305				break;
1306			case TCPOPT_TIMESTAMP:
1307				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1308					src->scrub->pfss_flags |=
1309					    PFSS_TIMESTAMP;
1310					src->scrub->pfss_ts_mod =
1311					    htonl(arc4random());
1312
1313					/* note PFSS_PAWS not set yet */
1314					memcpy(&tsval, &opt[2],
1315					    sizeof(u_int32_t));
1316					memcpy(&tsecr, &opt[6],
1317					    sizeof(u_int32_t));
1318					src->scrub->pfss_tsval0 = ntohl(tsval);
1319					src->scrub->pfss_tsval = ntohl(tsval);
1320					src->scrub->pfss_tsecr = ntohl(tsecr);
1321					getmicrouptime(&src->scrub->pfss_last);
1322				}
1323				/* FALLTHROUGH */
1324			default:
1325				hlen -= MAX(opt[1], 2);
1326				opt += MAX(opt[1], 2);
1327				break;
1328			}
1329		}
1330	}
1331
1332	return (0);
1333}
1334
1335void
1336pf_normalize_tcp_cleanup(struct pf_state *state)
1337{
1338	if (state->src.scrub)
1339		uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1340	if (state->dst.scrub)
1341		uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1342
1343	/* Someday... flush the TCP segment reassembly descriptors. */
1344}
1345
1346int
1347pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1348    u_short *reason, struct tcphdr *th, struct pf_state *state,
1349    struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1350{
1351	struct timeval uptime;
1352	u_int32_t tsval, tsecr;
1353	u_int tsval_from_last;
1354	u_int8_t hdr[60];
1355	u_int8_t *opt;
1356	int copyback = 0;
1357	int got_ts = 0;
1358
1359	KASSERT((src->scrub || dst->scrub),
1360	    ("%s: src->scrub && dst->scrub!", __func__));
1361
1362	/*
1363	 * Enforce the minimum TTL seen for this connection.  Negate a common
1364	 * technique to evade an intrusion detection system and confuse
1365	 * firewall state code.
1366	 */
1367	switch (pd->af) {
1368#ifdef INET
1369	case AF_INET: {
1370		if (src->scrub) {
1371			struct ip *h = mtod(m, struct ip *);
1372			if (h->ip_ttl > src->scrub->pfss_ttl)
1373				src->scrub->pfss_ttl = h->ip_ttl;
1374			h->ip_ttl = src->scrub->pfss_ttl;
1375		}
1376		break;
1377	}
1378#endif /* INET */
1379#ifdef INET6
1380	case AF_INET6: {
1381		if (src->scrub) {
1382			struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1383			if (h->ip6_hlim > src->scrub->pfss_ttl)
1384				src->scrub->pfss_ttl = h->ip6_hlim;
1385			h->ip6_hlim = src->scrub->pfss_ttl;
1386		}
1387		break;
1388	}
1389#endif /* INET6 */
1390	}
1391
1392	if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1393	    ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1394	    (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1395	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1396		/* Diddle with TCP options */
1397		int hlen;
1398		opt = hdr + sizeof(struct tcphdr);
1399		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1400		while (hlen >= TCPOLEN_TIMESTAMP) {
1401			switch (*opt) {
1402			case TCPOPT_EOL:	/* FALLTHROUGH */
1403			case TCPOPT_NOP:
1404				opt++;
1405				hlen--;
1406				break;
1407			case TCPOPT_TIMESTAMP:
1408				/* Modulate the timestamps.  Can be used for
1409				 * NAT detection, OS uptime determination or
1410				 * reboot detection.
1411				 */
1412
1413				if (got_ts) {
1414					/* Huh?  Multiple timestamps!? */
1415					if (V_pf_status.debug >= PF_DEBUG_MISC) {
1416						DPFPRINTF(("multiple TS??"));
1417						pf_print_state(state);
1418						printf("\n");
1419					}
1420					REASON_SET(reason, PFRES_TS);
1421					return (PF_DROP);
1422				}
1423				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1424					memcpy(&tsval, &opt[2],
1425					    sizeof(u_int32_t));
1426					if (tsval && src->scrub &&
1427					    (src->scrub->pfss_flags &
1428					    PFSS_TIMESTAMP)) {
1429						tsval = ntohl(tsval);
1430						pf_change_proto_a(m, &opt[2],
1431						    &th->th_sum,
1432						    htonl(tsval +
1433						    src->scrub->pfss_ts_mod),
1434						    0);
1435						copyback = 1;
1436					}
1437
1438					/* Modulate TS reply iff valid (!0) */
1439					memcpy(&tsecr, &opt[6],
1440					    sizeof(u_int32_t));
1441					if (tsecr && dst->scrub &&
1442					    (dst->scrub->pfss_flags &
1443					    PFSS_TIMESTAMP)) {
1444						tsecr = ntohl(tsecr)
1445						    - dst->scrub->pfss_ts_mod;
1446						pf_change_proto_a(m, &opt[6],
1447						    &th->th_sum, htonl(tsecr),
1448						    0);
1449						copyback = 1;
1450					}
1451					got_ts = 1;
1452				}
1453				/* FALLTHROUGH */
1454			default:
1455				hlen -= MAX(opt[1], 2);
1456				opt += MAX(opt[1], 2);
1457				break;
1458			}
1459		}
1460		if (copyback) {
1461			/* Copyback the options, caller copys back header */
1462			*writeback = 1;
1463			m_copyback(m, off + sizeof(struct tcphdr),
1464			    (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1465			    sizeof(struct tcphdr));
1466		}
1467	}
1468
1469
1470	/*
1471	 * Must invalidate PAWS checks on connections idle for too long.
1472	 * The fastest allowed timestamp clock is 1ms.  That turns out to
1473	 * be about 24 days before it wraps.  XXX Right now our lowerbound
1474	 * TS echo check only works for the first 12 days of a connection
1475	 * when the TS has exhausted half its 32bit space
1476	 */
1477#define TS_MAX_IDLE	(24*24*60*60)
1478#define TS_MAX_CONN	(12*24*60*60)	/* XXX remove when better tsecr check */
1479
1480	getmicrouptime(&uptime);
1481	if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1482	    (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1483	    time_uptime - state->creation > TS_MAX_CONN))  {
1484		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1485			DPFPRINTF(("src idled out of PAWS\n"));
1486			pf_print_state(state);
1487			printf("\n");
1488		}
1489		src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1490		    | PFSS_PAWS_IDLED;
1491	}
1492	if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1493	    uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1494		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1495			DPFPRINTF(("dst idled out of PAWS\n"));
1496			pf_print_state(state);
1497			printf("\n");
1498		}
1499		dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1500		    | PFSS_PAWS_IDLED;
1501	}
1502
1503	if (got_ts && src->scrub && dst->scrub &&
1504	    (src->scrub->pfss_flags & PFSS_PAWS) &&
1505	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
1506		/* Validate that the timestamps are "in-window".
1507		 * RFC1323 describes TCP Timestamp options that allow
1508		 * measurement of RTT (round trip time) and PAWS
1509		 * (protection against wrapped sequence numbers).  PAWS
1510		 * gives us a set of rules for rejecting packets on
1511		 * long fat pipes (packets that were somehow delayed
1512		 * in transit longer than the time it took to send the
1513		 * full TCP sequence space of 4Gb).  We can use these
1514		 * rules and infer a few others that will let us treat
1515		 * the 32bit timestamp and the 32bit echoed timestamp
1516		 * as sequence numbers to prevent a blind attacker from
1517		 * inserting packets into a connection.
1518		 *
1519		 * RFC1323 tells us:
1520		 *  - The timestamp on this packet must be greater than
1521		 *    or equal to the last value echoed by the other
1522		 *    endpoint.  The RFC says those will be discarded
1523		 *    since it is a dup that has already been acked.
1524		 *    This gives us a lowerbound on the timestamp.
1525		 *        timestamp >= other last echoed timestamp
1526		 *  - The timestamp will be less than or equal to
1527		 *    the last timestamp plus the time between the
1528		 *    last packet and now.  The RFC defines the max
1529		 *    clock rate as 1ms.  We will allow clocks to be
1530		 *    up to 10% fast and will allow a total difference
1531		 *    or 30 seconds due to a route change.  And this
1532		 *    gives us an upperbound on the timestamp.
1533		 *        timestamp <= last timestamp + max ticks
1534		 *    We have to be careful here.  Windows will send an
1535		 *    initial timestamp of zero and then initialize it
1536		 *    to a random value after the 3whs; presumably to
1537		 *    avoid a DoS by having to call an expensive RNG
1538		 *    during a SYN flood.  Proof MS has at least one
1539		 *    good security geek.
1540		 *
1541		 *  - The TCP timestamp option must also echo the other
1542		 *    endpoints timestamp.  The timestamp echoed is the
1543		 *    one carried on the earliest unacknowledged segment
1544		 *    on the left edge of the sequence window.  The RFC
1545		 *    states that the host will reject any echoed
1546		 *    timestamps that were larger than any ever sent.
1547		 *    This gives us an upperbound on the TS echo.
1548		 *        tescr <= largest_tsval
1549		 *  - The lowerbound on the TS echo is a little more
1550		 *    tricky to determine.  The other endpoint's echoed
1551		 *    values will not decrease.  But there may be
1552		 *    network conditions that re-order packets and
1553		 *    cause our view of them to decrease.  For now the
1554		 *    only lowerbound we can safely determine is that
1555		 *    the TS echo will never be less than the original
1556		 *    TS.  XXX There is probably a better lowerbound.
1557		 *    Remove TS_MAX_CONN with better lowerbound check.
1558		 *        tescr >= other original TS
1559		 *
1560		 * It is also important to note that the fastest
1561		 * timestamp clock of 1ms will wrap its 32bit space in
1562		 * 24 days.  So we just disable TS checking after 24
1563		 * days of idle time.  We actually must use a 12d
1564		 * connection limit until we can come up with a better
1565		 * lowerbound to the TS echo check.
1566		 */
1567		struct timeval delta_ts;
1568		int ts_fudge;
1569
1570
1571		/*
1572		 * PFTM_TS_DIFF is how many seconds of leeway to allow
1573		 * a host's timestamp.  This can happen if the previous
1574		 * packet got delayed in transit for much longer than
1575		 * this packet.
1576		 */
1577		if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1578			ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1579
1580		/* Calculate max ticks since the last timestamp */
1581#define TS_MAXFREQ	1100		/* RFC max TS freq of 1Khz + 10% skew */
1582#define TS_MICROSECS	1000000		/* microseconds per second */
1583		delta_ts = uptime;
1584		timevalsub(&delta_ts, &src->scrub->pfss_last);
1585		tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1586		tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1587
1588		if ((src->state >= TCPS_ESTABLISHED &&
1589		    dst->state >= TCPS_ESTABLISHED) &&
1590		    (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1591		    SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1592		    (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1593		    SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1594			/* Bad RFC1323 implementation or an insertion attack.
1595			 *
1596			 * - Solaris 2.6 and 2.7 are known to send another ACK
1597			 *   after the FIN,FIN|ACK,ACK closing that carries
1598			 *   an old timestamp.
1599			 */
1600
1601			DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1602			    SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1603			    SEQ_GT(tsval, src->scrub->pfss_tsval +
1604			    tsval_from_last) ? '1' : ' ',
1605			    SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1606			    SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1607			DPFPRINTF((" tsval: %u  tsecr: %u  +ticks: %u  "
1608			    "idle: %jus %lums\n",
1609			    tsval, tsecr, tsval_from_last,
1610			    (uintmax_t)delta_ts.tv_sec,
1611			    delta_ts.tv_usec / 1000));
1612			DPFPRINTF((" src->tsval: %u  tsecr: %u\n",
1613			    src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1614			DPFPRINTF((" dst->tsval: %u  tsecr: %u  tsval0: %u"
1615			    "\n", dst->scrub->pfss_tsval,
1616			    dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1617			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1618				pf_print_state(state);
1619				pf_print_flags(th->th_flags);
1620				printf("\n");
1621			}
1622			REASON_SET(reason, PFRES_TS);
1623			return (PF_DROP);
1624		}
1625
1626		/* XXX I'd really like to require tsecr but it's optional */
1627
1628	} else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1629	    ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1630	    || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1631	    src->scrub && dst->scrub &&
1632	    (src->scrub->pfss_flags & PFSS_PAWS) &&
1633	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
1634		/* Didn't send a timestamp.  Timestamps aren't really useful
1635		 * when:
1636		 *  - connection opening or closing (often not even sent).
1637		 *    but we must not let an attacker to put a FIN on a
1638		 *    data packet to sneak it through our ESTABLISHED check.
1639		 *  - on a TCP reset.  RFC suggests not even looking at TS.
1640		 *  - on an empty ACK.  The TS will not be echoed so it will
1641		 *    probably not help keep the RTT calculation in sync and
1642		 *    there isn't as much danger when the sequence numbers
1643		 *    got wrapped.  So some stacks don't include TS on empty
1644		 *    ACKs :-(
1645		 *
1646		 * To minimize the disruption to mostly RFC1323 conformant
1647		 * stacks, we will only require timestamps on data packets.
1648		 *
1649		 * And what do ya know, we cannot require timestamps on data
1650		 * packets.  There appear to be devices that do legitimate
1651		 * TCP connection hijacking.  There are HTTP devices that allow
1652		 * a 3whs (with timestamps) and then buffer the HTTP request.
1653		 * If the intermediate device has the HTTP response cache, it
1654		 * will spoof the response but not bother timestamping its
1655		 * packets.  So we can look for the presence of a timestamp in
1656		 * the first data packet and if there, require it in all future
1657		 * packets.
1658		 */
1659
1660		if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1661			/*
1662			 * Hey!  Someone tried to sneak a packet in.  Or the
1663			 * stack changed its RFC1323 behavior?!?!
1664			 */
1665			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1666				DPFPRINTF(("Did not receive expected RFC1323 "
1667				    "timestamp\n"));
1668				pf_print_state(state);
1669				pf_print_flags(th->th_flags);
1670				printf("\n");
1671			}
1672			REASON_SET(reason, PFRES_TS);
1673			return (PF_DROP);
1674		}
1675	}
1676
1677
1678	/*
1679	 * We will note if a host sends his data packets with or without
1680	 * timestamps.  And require all data packets to contain a timestamp
1681	 * if the first does.  PAWS implicitly requires that all data packets be
1682	 * timestamped.  But I think there are middle-man devices that hijack
1683	 * TCP streams immediately after the 3whs and don't timestamp their
1684	 * packets (seen in a WWW accelerator or cache).
1685	 */
1686	if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1687	    (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1688		if (got_ts)
1689			src->scrub->pfss_flags |= PFSS_DATA_TS;
1690		else {
1691			src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1692			if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1693			    (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1694				/* Don't warn if other host rejected RFC1323 */
1695				DPFPRINTF(("Broken RFC1323 stack did not "
1696				    "timestamp data packet. Disabled PAWS "
1697				    "security.\n"));
1698				pf_print_state(state);
1699				pf_print_flags(th->th_flags);
1700				printf("\n");
1701			}
1702		}
1703	}
1704
1705
1706	/*
1707	 * Update PAWS values
1708	 */
1709	if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1710	    (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1711		getmicrouptime(&src->scrub->pfss_last);
1712		if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1713		    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1714			src->scrub->pfss_tsval = tsval;
1715
1716		if (tsecr) {
1717			if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1718			    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1719				src->scrub->pfss_tsecr = tsecr;
1720
1721			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1722			    (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1723			    src->scrub->pfss_tsval0 == 0)) {
1724				/* tsval0 MUST be the lowest timestamp */
1725				src->scrub->pfss_tsval0 = tsval;
1726			}
1727
1728			/* Only fully initialized after a TS gets echoed */
1729			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1730				src->scrub->pfss_flags |= PFSS_PAWS;
1731		}
1732	}
1733
1734	/* I have a dream....  TCP segment reassembly.... */
1735	return (0);
1736}
1737
1738static int
1739pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1740    int off, sa_family_t af)
1741{
1742	u_int16_t	*mss;
1743	int		 thoff;
1744	int		 opt, cnt, optlen = 0;
1745	int		 rewrite = 0;
1746	u_char		 opts[TCP_MAXOLEN];
1747	u_char		*optp = opts;
1748
1749	thoff = th->th_off << 2;
1750	cnt = thoff - sizeof(struct tcphdr);
1751
1752	if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
1753	    NULL, NULL, af))
1754		return (rewrite);
1755
1756	for (; cnt > 0; cnt -= optlen, optp += optlen) {
1757		opt = optp[0];
1758		if (opt == TCPOPT_EOL)
1759			break;
1760		if (opt == TCPOPT_NOP)
1761			optlen = 1;
1762		else {
1763			if (cnt < 2)
1764				break;
1765			optlen = optp[1];
1766			if (optlen < 2 || optlen > cnt)
1767				break;
1768		}
1769		switch (opt) {
1770		case TCPOPT_MAXSEG:
1771			mss = (u_int16_t *)(optp + 2);
1772			if ((ntohs(*mss)) > r->max_mss) {
1773				th->th_sum = pf_proto_cksum_fixup(m,
1774				    th->th_sum, *mss, htons(r->max_mss), 0);
1775				*mss = htons(r->max_mss);
1776				rewrite = 1;
1777			}
1778			break;
1779		default:
1780			break;
1781		}
1782	}
1783
1784	if (rewrite)
1785		m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
1786
1787	return (rewrite);
1788}
1789
1790#ifdef INET
1791static void
1792pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
1793{
1794	struct mbuf		*m = *m0;
1795	struct ip		*h = mtod(m, struct ip *);
1796
1797	/* Clear IP_DF if no-df was requested */
1798	if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1799		u_int16_t ip_off = h->ip_off;
1800
1801		h->ip_off &= htons(~IP_DF);
1802		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1803	}
1804
1805	/* Enforce a minimum ttl, may cause endless packet loops */
1806	if (min_ttl && h->ip_ttl < min_ttl) {
1807		u_int16_t ip_ttl = h->ip_ttl;
1808
1809		h->ip_ttl = min_ttl;
1810		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
1811	}
1812
1813	/* Enforce tos */
1814	if (flags & PFRULE_SET_TOS) {
1815		u_int16_t	ov, nv;
1816
1817		ov = *(u_int16_t *)h;
1818		h->ip_tos = tos;
1819		nv = *(u_int16_t *)h;
1820
1821		h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
1822	}
1823
1824	/* random-id, but not for fragments */
1825	if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
1826		uint16_t ip_id = h->ip_id;
1827
1828		ip_fillid(h);
1829		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
1830	}
1831}
1832#endif /* INET */
1833
1834#ifdef INET6
1835static void
1836pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl)
1837{
1838	struct mbuf		*m = *m0;
1839	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
1840
1841	/* Enforce a minimum ttl, may cause endless packet loops */
1842	if (min_ttl && h->ip6_hlim < min_ttl)
1843		h->ip6_hlim = min_ttl;
1844}
1845#endif
1846