Deleted Added
full compact
pf_norm.c (126259) pf_norm.c (126261)
1/* $FreeBSD: head/sys/contrib/pf/net/pf_norm.c 126261 2004-02-26 02:34:12Z mlaier $ */
1/* $OpenBSD: pf_norm.c,v 1.75 2003/08/29 01:49:08 dhartmei Exp $ */
2
3/*
4 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
2/* $OpenBSD: pf_norm.c,v 1.75 2003/08/29 01:49:08 dhartmei Exp $ */
3
4/*
5 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#if defined(__FreeBSD__)
30#include "opt_inet.h"
31#include "opt_inet6.h"
32#include "opt_random_ip_id.h" /* or ip_var does not export it */
33#include "opt_pf.h"
34#define NPFLOG DEV_PFLOG
35#else
28#include "pflog.h"
36#include "pflog.h"
37#endif
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/mbuf.h>
33#include <sys/filio.h>
34#include <sys/fcntl.h>
35#include <sys/socket.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/mbuf.h>
42#include <sys/filio.h>
43#include <sys/fcntl.h>
44#include <sys/socket.h>
45#include <sys/kernel.h>
46#include <sys/time.h>
47#if !defined(__FreeBSD__)
38#include <sys/pool.h>
48#include <sys/pool.h>
49#endif
39
50
51#if !defined(__FreeBSD__)
40#include <dev/rndvar.h>
52#include <dev/rndvar.h>
53#endif
41#include <net/if.h>
42#include <net/if_types.h>
43#include <net/bpf.h>
44#include <net/route.h>
45#include <net/if_pflog.h>
46
47#include <netinet/in.h>
48#include <netinet/in_var.h>
49#include <netinet/in_systm.h>
50#include <netinet/ip.h>
51#include <netinet/ip_var.h>
52#include <netinet/tcp.h>
53#include <netinet/tcp_seq.h>
54#include <netinet/udp.h>
55#include <netinet/ip_icmp.h>
56
57#ifdef INET6
58#include <netinet/ip6.h>
59#endif /* INET6 */
60
61#include <net/pfvar.h>
62
54#include <net/if.h>
55#include <net/if_types.h>
56#include <net/bpf.h>
57#include <net/route.h>
58#include <net/if_pflog.h>
59
60#include <netinet/in.h>
61#include <netinet/in_var.h>
62#include <netinet/in_systm.h>
63#include <netinet/ip.h>
64#include <netinet/ip_var.h>
65#include <netinet/tcp.h>
66#include <netinet/tcp_seq.h>
67#include <netinet/udp.h>
68#include <netinet/ip_icmp.h>
69
70#ifdef INET6
71#include <netinet/ip6.h>
72#endif /* INET6 */
73
74#include <net/pfvar.h>
75
76#if defined(__FreeBSD__) && defined(INET6)
77/*
78 * XXX: This should go to netinet/ip6.h (KAME)
79 */
80/* IPv6 options: common part */
81struct ip6_opt {
82 u_int8_t ip6o_type;
83 u_int8_t ip6o_len;
84} __packed;
85
86/* Jumbo Payload Option */
87struct ip6_opt_jumbo {
88 u_int8_t ip6oj_type;
89 u_int8_t ip6oj_len;
90 u_int8_t ip6oj_jumbo_len[4];
91} __packed;
92
93/* NSAP Address Option */
94struct ip6_opt_nsap {
95 u_int8_t ip6on_type;
96 u_int8_t ip6on_len;
97 u_int8_t ip6on_src_nsap_len;
98 u_int8_t ip6on_dst_nsap_len;
99 /* followed by source NSAP */
100 /* followed by destination NSAP */
101} __packed;
102
103/* Tunnel Limit Option */
104struct ip6_opt_tunnel {
105 u_int8_t ip6ot_type;
106 u_int8_t ip6ot_len;
107 u_int8_t ip6ot_encap_limit;
108} __packed;
109
110/* Router Alert Option */
111struct ip6_opt_router {
112 u_int8_t ip6or_type;
113 u_int8_t ip6or_len;
114 u_int8_t ip6or_value[2];
115} __packed;
116#endif /* __FreeBSD__ && INET6 */
117
118#if !defined(__FreeBSD__)
63struct pf_frent {
64 LIST_ENTRY(pf_frent) fr_next;
65 struct ip *fr_ip;
66 struct mbuf *fr_m;
67};
68
69struct pf_frcache {
70 LIST_ENTRY(pf_frcache) fr_next;
71 uint16_t fr_off;
72 uint16_t fr_end;
73};
119struct pf_frent {
120 LIST_ENTRY(pf_frent) fr_next;
121 struct ip *fr_ip;
122 struct mbuf *fr_m;
123};
124
125struct pf_frcache {
126 LIST_ENTRY(pf_frcache) fr_next;
127 uint16_t fr_off;
128 uint16_t fr_end;
129};
130#endif
74
75#define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
76#define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
77#define PFFRAG_DROP 0x0004 /* Drop all fragments */
78#define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
79
131
132#define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
133#define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
134#define PFFRAG_DROP 0x0004 /* Drop all fragments */
135#define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
136
137#if !defined(__FreeBSD__)
80struct pf_fragment {
81 RB_ENTRY(pf_fragment) fr_entry;
82 TAILQ_ENTRY(pf_fragment) frag_next;
83 struct in_addr fr_src;
84 struct in_addr fr_dst;
85 u_int8_t fr_p; /* protocol of this fragment */
86 u_int8_t fr_flags; /* status flags */
87 u_int16_t fr_id; /* fragment id for reassemble */
88 u_int16_t fr_max; /* fragment data max */
89 u_int32_t fr_timeout;
90#define fr_queue fr_u.fru_queue
91#define fr_cache fr_u.fru_cache
92 union {
93 LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */
94 LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */
95 } fr_u;
96};
138struct pf_fragment {
139 RB_ENTRY(pf_fragment) fr_entry;
140 TAILQ_ENTRY(pf_fragment) frag_next;
141 struct in_addr fr_src;
142 struct in_addr fr_dst;
143 u_int8_t fr_p; /* protocol of this fragment */
144 u_int8_t fr_flags; /* status flags */
145 u_int16_t fr_id; /* fragment id for reassemble */
146 u_int16_t fr_max; /* fragment data max */
147 u_int32_t fr_timeout;
148#define fr_queue fr_u.fru_queue
149#define fr_cache fr_u.fru_cache
150 union {
151 LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */
152 LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */
153 } fr_u;
154};
155#endif
97
98TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue;
99TAILQ_HEAD(pf_cachequeue, pf_fragment) pf_cachequeue;
100
101static __inline int pf_frag_compare(struct pf_fragment *,
102 struct pf_fragment *);
103RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree;
104RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
105RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
106
107/* Private prototypes */
156
157TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue;
158TAILQ_HEAD(pf_cachequeue, pf_fragment) pf_cachequeue;
159
160static __inline int pf_frag_compare(struct pf_fragment *,
161 struct pf_fragment *);
162RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree;
163RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
164RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
165
166/* Private prototypes */
167#ifndef RANDOM_IP_ID
168extern u_int16_t ip_randomid(void);
169#endif
108void pf_ip2key(struct pf_fragment *, struct ip *);
109void pf_remove_fragment(struct pf_fragment *);
110void pf_flush_fragments(void);
111void pf_free_fragment(struct pf_fragment *);
112struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
113struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
114 struct pf_frent *, int);
115struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
116 struct pf_fragment **, int, int, int *);
117u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t);
118int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
119 struct tcphdr *, int);
120
121#define DPFPRINTF(x) if (pf_status.debug >= PF_DEBUG_MISC) \
122 { printf("%s: ", __func__); printf x ;}
123
124/* Globals */
170void pf_ip2key(struct pf_fragment *, struct ip *);
171void pf_remove_fragment(struct pf_fragment *);
172void pf_flush_fragments(void);
173void pf_free_fragment(struct pf_fragment *);
174struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
175struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
176 struct pf_frent *, int);
177struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
178 struct pf_fragment **, int, int, int *);
179u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t);
180int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
181 struct tcphdr *, int);
182
183#define DPFPRINTF(x) if (pf_status.debug >= PF_DEBUG_MISC) \
184 { printf("%s: ", __func__); printf x ;}
185
186/* Globals */
187#if defined(__FreeBSD__)
188uma_zone_t pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
189uma_zone_t pf_state_scrub_pl;
190#else
125struct pool pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
126struct pool pf_state_scrub_pl;
191struct pool pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
192struct pool pf_state_scrub_pl;
193#endif
127int pf_nfrents, pf_ncache;
128
129void
130pf_normalize_init(void)
131{
194int pf_nfrents, pf_ncache;
195
196void
197pf_normalize_init(void)
198{
199#if defined(__FreeBSD__)
200 /*
201 * XXX
202 * No high water mark support(It's hint not hard limit).
203 * uma_zone_set_max(pf_frag_pl, PFFRAG_FRAG_HIWAT);
204 */
205 uma_zone_set_max(pf_frent_pl, PFFRAG_FRENT_HIWAT);
206 uma_zone_set_max(pf_cache_pl, PFFRAG_FRCACHE_HIWAT);
207 uma_zone_set_max(pf_cent_pl, PFFRAG_FRCENT_HIWAT);
208#else
132 pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
133 NULL);
134 pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
135 NULL);
136 pool_init(&pf_cache_pl, sizeof(struct pf_fragment), 0, 0, 0,
137 "pffrcache", NULL);
138 pool_init(&pf_cent_pl, sizeof(struct pf_frcache), 0, 0, 0, "pffrcent",
139 NULL);
140 pool_init(&pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0, 0, 0,
141 "pfstscr", NULL);
142
143 pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
144 pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
145 pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
146 pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
209 pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
210 NULL);
211 pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
212 NULL);
213 pool_init(&pf_cache_pl, sizeof(struct pf_fragment), 0, 0, 0,
214 "pffrcache", NULL);
215 pool_init(&pf_cent_pl, sizeof(struct pf_frcache), 0, 0, 0, "pffrcent",
216 NULL);
217 pool_init(&pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0, 0, 0,
218 "pfstscr", NULL);
219
220 pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
221 pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
222 pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
223 pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
224#endif
147
148 TAILQ_INIT(&pf_fragqueue);
149 TAILQ_INIT(&pf_cachequeue);
150}
151
152static __inline int
153pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
154{
155 int diff;
156
157 if ((diff = a->fr_id - b->fr_id))
158 return (diff);
159 else if ((diff = a->fr_p - b->fr_p))
160 return (diff);
161 else if (a->fr_src.s_addr < b->fr_src.s_addr)
162 return (-1);
163 else if (a->fr_src.s_addr > b->fr_src.s_addr)
164 return (1);
165 else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
166 return (-1);
167 else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
168 return (1);
169 return (0);
170}
171
172void
173pf_purge_expired_fragments(void)
174{
175 struct pf_fragment *frag;
225
226 TAILQ_INIT(&pf_fragqueue);
227 TAILQ_INIT(&pf_cachequeue);
228}
229
230static __inline int
231pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
232{
233 int diff;
234
235 if ((diff = a->fr_id - b->fr_id))
236 return (diff);
237 else if ((diff = a->fr_p - b->fr_p))
238 return (diff);
239 else if (a->fr_src.s_addr < b->fr_src.s_addr)
240 return (-1);
241 else if (a->fr_src.s_addr > b->fr_src.s_addr)
242 return (1);
243 else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
244 return (-1);
245 else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
246 return (1);
247 return (0);
248}
249
250void
251pf_purge_expired_fragments(void)
252{
253 struct pf_fragment *frag;
254#if defined(__FreeBSD__)
255 u_int32_t expire = time_second -
256 pf_default_rule.timeout[PFTM_FRAG];
257#else
176 u_int32_t expire = time.tv_sec -
177 pf_default_rule.timeout[PFTM_FRAG];
258 u_int32_t expire = time.tv_sec -
259 pf_default_rule.timeout[PFTM_FRAG];
260#endif
178
179 while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
261
262 while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
263#if defined(__FreeBSD__)
264 KASSERT((BUFFER_FRAGMENTS(frag)),
265 ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
266#else
180 KASSERT(BUFFER_FRAGMENTS(frag));
267 KASSERT(BUFFER_FRAGMENTS(frag));
268#endif
181 if (frag->fr_timeout > expire)
182 break;
183
184 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
185 pf_free_fragment(frag);
186 }
187
188 while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
269 if (frag->fr_timeout > expire)
270 break;
271
272 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
273 pf_free_fragment(frag);
274 }
275
276 while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
277#if defined(__FreeBSD__)
278 KASSERT((!BUFFER_FRAGMENTS(frag)),
279 ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__));
280#else
189 KASSERT(!BUFFER_FRAGMENTS(frag));
281 KASSERT(!BUFFER_FRAGMENTS(frag));
282#endif
190 if (frag->fr_timeout > expire)
191 break;
192
193 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
194 pf_free_fragment(frag);
283 if (frag->fr_timeout > expire)
284 break;
285
286 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
287 pf_free_fragment(frag);
288#if defined(__FreeBSD__)
289 KASSERT((TAILQ_EMPTY(&pf_cachequeue) ||
290 TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag),
291 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
292 __FUNCTION__));
293#else
195 KASSERT(TAILQ_EMPTY(&pf_cachequeue) ||
196 TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag);
294 KASSERT(TAILQ_EMPTY(&pf_cachequeue) ||
295 TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag);
296#endif
197 }
198}
199
200/*
201 * Try to flush old fragments to make space for new ones
202 */
203
204void
205pf_flush_fragments(void)
206{
207 struct pf_fragment *frag;
208 int goal;
209
210 goal = pf_nfrents * 9 / 10;
211 DPFPRINTF(("trying to free > %d frents\n",
212 pf_nfrents - goal));
213 while (goal < pf_nfrents) {
214 frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
215 if (frag == NULL)
216 break;
217 pf_free_fragment(frag);
218 }
219
220
221 goal = pf_ncache * 9 / 10;
222 DPFPRINTF(("trying to free > %d cache entries\n",
223 pf_ncache - goal));
224 while (goal < pf_ncache) {
225 frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
226 if (frag == NULL)
227 break;
228 pf_free_fragment(frag);
229 }
230}
231
232/* Frees the fragments and all associated entries */
233
234void
235pf_free_fragment(struct pf_fragment *frag)
236{
237 struct pf_frent *frent;
238 struct pf_frcache *frcache;
239
240 /* Free all fragments */
241 if (BUFFER_FRAGMENTS(frag)) {
242 for (frent = LIST_FIRST(&frag->fr_queue); frent;
243 frent = LIST_FIRST(&frag->fr_queue)) {
244 LIST_REMOVE(frent, fr_next);
245
246 m_freem(frent->fr_m);
247 pool_put(&pf_frent_pl, frent);
248 pf_nfrents--;
249 }
250 } else {
251 for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
252 frcache = LIST_FIRST(&frag->fr_cache)) {
253 LIST_REMOVE(frcache, fr_next);
254
297 }
298}
299
300/*
301 * Try to flush old fragments to make space for new ones
302 */
303
304void
305pf_flush_fragments(void)
306{
307 struct pf_fragment *frag;
308 int goal;
309
310 goal = pf_nfrents * 9 / 10;
311 DPFPRINTF(("trying to free > %d frents\n",
312 pf_nfrents - goal));
313 while (goal < pf_nfrents) {
314 frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
315 if (frag == NULL)
316 break;
317 pf_free_fragment(frag);
318 }
319
320
321 goal = pf_ncache * 9 / 10;
322 DPFPRINTF(("trying to free > %d cache entries\n",
323 pf_ncache - goal));
324 while (goal < pf_ncache) {
325 frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
326 if (frag == NULL)
327 break;
328 pf_free_fragment(frag);
329 }
330}
331
332/* Frees the fragments and all associated entries */
333
334void
335pf_free_fragment(struct pf_fragment *frag)
336{
337 struct pf_frent *frent;
338 struct pf_frcache *frcache;
339
340 /* Free all fragments */
341 if (BUFFER_FRAGMENTS(frag)) {
342 for (frent = LIST_FIRST(&frag->fr_queue); frent;
343 frent = LIST_FIRST(&frag->fr_queue)) {
344 LIST_REMOVE(frent, fr_next);
345
346 m_freem(frent->fr_m);
347 pool_put(&pf_frent_pl, frent);
348 pf_nfrents--;
349 }
350 } else {
351 for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
352 frcache = LIST_FIRST(&frag->fr_cache)) {
353 LIST_REMOVE(frcache, fr_next);
354
355#if defined(__FreeBSD__)
356 KASSERT((LIST_EMPTY(&frag->fr_cache) ||
357 LIST_FIRST(&frag->fr_cache)->fr_off >
358 frcache->fr_end),
359 ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >"
360 " frcache->fr_end): %s", __FUNCTION__));
361#else
255 KASSERT(LIST_EMPTY(&frag->fr_cache) ||
256 LIST_FIRST(&frag->fr_cache)->fr_off >
257 frcache->fr_end);
362 KASSERT(LIST_EMPTY(&frag->fr_cache) ||
363 LIST_FIRST(&frag->fr_cache)->fr_off >
364 frcache->fr_end);
365#endif
258
259 pool_put(&pf_cent_pl, frcache);
260 pf_ncache--;
261 }
262 }
263
264 pf_remove_fragment(frag);
265}
266
267void
268pf_ip2key(struct pf_fragment *key, struct ip *ip)
269{
270 key->fr_p = ip->ip_p;
271 key->fr_id = ip->ip_id;
272 key->fr_src.s_addr = ip->ip_src.s_addr;
273 key->fr_dst.s_addr = ip->ip_dst.s_addr;
274}
275
276struct pf_fragment *
277pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
278{
279 struct pf_fragment key;
280 struct pf_fragment *frag;
281
282 pf_ip2key(&key, ip);
283
284 frag = RB_FIND(pf_frag_tree, tree, &key);
285 if (frag != NULL) {
286 /* XXX Are we sure we want to update the timeout? */
366
367 pool_put(&pf_cent_pl, frcache);
368 pf_ncache--;
369 }
370 }
371
372 pf_remove_fragment(frag);
373}
374
375void
376pf_ip2key(struct pf_fragment *key, struct ip *ip)
377{
378 key->fr_p = ip->ip_p;
379 key->fr_id = ip->ip_id;
380 key->fr_src.s_addr = ip->ip_src.s_addr;
381 key->fr_dst.s_addr = ip->ip_dst.s_addr;
382}
383
384struct pf_fragment *
385pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
386{
387 struct pf_fragment key;
388 struct pf_fragment *frag;
389
390 pf_ip2key(&key, ip);
391
392 frag = RB_FIND(pf_frag_tree, tree, &key);
393 if (frag != NULL) {
394 /* XXX Are we sure we want to update the timeout? */
395#if defined(__FreeBSD__)
396 frag->fr_timeout = time_second;
397#else
287 frag->fr_timeout = time.tv_sec;
398 frag->fr_timeout = time.tv_sec;
399#endif
288 if (BUFFER_FRAGMENTS(frag)) {
289 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
290 TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
291 } else {
292 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
293 TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
294 }
295 }
296
297 return (frag);
298}
299
300/* Removes a fragment from the fragment queue and frees the fragment */
301
302void
303pf_remove_fragment(struct pf_fragment *frag)
304{
305 if (BUFFER_FRAGMENTS(frag)) {
306 RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
307 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
308 pool_put(&pf_frag_pl, frag);
309 } else {
310 RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
311 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
312 pool_put(&pf_cache_pl, frag);
313 }
314}
315
316#define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3)
317struct mbuf *
318pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
319 struct pf_frent *frent, int mff)
320{
321 struct mbuf *m = *m0, *m2;
322 struct pf_frent *frea, *next;
323 struct pf_frent *frep = NULL;
324 struct ip *ip = frent->fr_ip;
325 int hlen = ip->ip_hl << 2;
326 u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
327 u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4;
328 u_int16_t max = ip_len + off;
329
400 if (BUFFER_FRAGMENTS(frag)) {
401 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
402 TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
403 } else {
404 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
405 TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
406 }
407 }
408
409 return (frag);
410}
411
412/* Removes a fragment from the fragment queue and frees the fragment */
413
414void
415pf_remove_fragment(struct pf_fragment *frag)
416{
417 if (BUFFER_FRAGMENTS(frag)) {
418 RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
419 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
420 pool_put(&pf_frag_pl, frag);
421 } else {
422 RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
423 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
424 pool_put(&pf_cache_pl, frag);
425 }
426}
427
428#define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3)
429struct mbuf *
430pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
431 struct pf_frent *frent, int mff)
432{
433 struct mbuf *m = *m0, *m2;
434 struct pf_frent *frea, *next;
435 struct pf_frent *frep = NULL;
436 struct ip *ip = frent->fr_ip;
437 int hlen = ip->ip_hl << 2;
438 u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
439 u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4;
440 u_int16_t max = ip_len + off;
441
442#if defined(__FreeBSD__)
443 KASSERT((*frag == NULL || BUFFER_FRAGMENTS(*frag)),
444 ("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
445#else
330 KASSERT(*frag == NULL || BUFFER_FRAGMENTS(*frag));
446 KASSERT(*frag == NULL || BUFFER_FRAGMENTS(*frag));
447#endif
331
332 /* Strip off ip header */
333 m->m_data += hlen;
334 m->m_len -= hlen;
335
336 /* Create a new reassembly queue for this packet */
337 if (*frag == NULL) {
338 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
339 if (*frag == NULL) {
340 pf_flush_fragments();
341 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
342 if (*frag == NULL)
343 goto drop_fragment;
344 }
345
346 (*frag)->fr_flags = 0;
347 (*frag)->fr_max = 0;
348 (*frag)->fr_src = frent->fr_ip->ip_src;
349 (*frag)->fr_dst = frent->fr_ip->ip_dst;
350 (*frag)->fr_p = frent->fr_ip->ip_p;
351 (*frag)->fr_id = frent->fr_ip->ip_id;
448
449 /* Strip off ip header */
450 m->m_data += hlen;
451 m->m_len -= hlen;
452
453 /* Create a new reassembly queue for this packet */
454 if (*frag == NULL) {
455 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
456 if (*frag == NULL) {
457 pf_flush_fragments();
458 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
459 if (*frag == NULL)
460 goto drop_fragment;
461 }
462
463 (*frag)->fr_flags = 0;
464 (*frag)->fr_max = 0;
465 (*frag)->fr_src = frent->fr_ip->ip_src;
466 (*frag)->fr_dst = frent->fr_ip->ip_dst;
467 (*frag)->fr_p = frent->fr_ip->ip_p;
468 (*frag)->fr_id = frent->fr_ip->ip_id;
469#if defined(__FreeBSD__)
470 (*frag)->fr_timeout = time_second;
471#else
352 (*frag)->fr_timeout = time.tv_sec;
472 (*frag)->fr_timeout = time.tv_sec;
473#endif
353 LIST_INIT(&(*frag)->fr_queue);
354
355 RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
356 TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
357
358 /* We do not have a previous fragment */
359 frep = NULL;
360 goto insert;
361 }
362
363 /*
364 * Find a fragment after the current one:
365 * - off contains the real shifted offset.
366 */
367 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
368 if (FR_IP_OFF(frea) > off)
369 break;
370 frep = frea;
371 }
372
474 LIST_INIT(&(*frag)->fr_queue);
475
476 RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
477 TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
478
479 /* We do not have a previous fragment */
480 frep = NULL;
481 goto insert;
482 }
483
484 /*
485 * Find a fragment after the current one:
486 * - off contains the real shifted offset.
487 */
488 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
489 if (FR_IP_OFF(frea) > off)
490 break;
491 frep = frea;
492 }
493
494#if defined(__FreeBSD__)
495 KASSERT((frep != NULL || frea != NULL),
496 ("!(frep != NULL || frea != NULL): %s", __FUNCTION__));;
497#else
373 KASSERT(frep != NULL || frea != NULL);
498 KASSERT(frep != NULL || frea != NULL);
499#endif
374
375 if (frep != NULL &&
376 FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl *
377 4 > off)
378 {
379 u_int16_t precut;
380
381 precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) -
382 frep->fr_ip->ip_hl * 4 - off;
383 if (precut >= ip_len)
384 goto drop_fragment;
385 m_adj(frent->fr_m, precut);
386 DPFPRINTF(("overlap -%d\n", precut));
387 /* Enforce 8 byte boundaries */
388 ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3));
389 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
390 ip_len -= precut;
391 ip->ip_len = htons(ip_len);
392 }
393
394 for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
395 frea = next)
396 {
397 u_int16_t aftercut;
398
399 aftercut = ip_len + off - FR_IP_OFF(frea);
400 DPFPRINTF(("adjust overlap %d\n", aftercut));
401 if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl
402 * 4)
403 {
404 frea->fr_ip->ip_len =
405 htons(ntohs(frea->fr_ip->ip_len) - aftercut);
406 frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) +
407 (aftercut >> 3));
408 m_adj(frea->fr_m, aftercut);
409 break;
410 }
411
412 /* This fragment is completely overlapped, loose it */
413 next = LIST_NEXT(frea, fr_next);
414 m_freem(frea->fr_m);
415 LIST_REMOVE(frea, fr_next);
416 pool_put(&pf_frent_pl, frea);
417 pf_nfrents--;
418 }
419
420 insert:
421 /* Update maximum data size */
422 if ((*frag)->fr_max < max)
423 (*frag)->fr_max = max;
424 /* This is the last segment */
425 if (!mff)
426 (*frag)->fr_flags |= PFFRAG_SEENLAST;
427
428 if (frep == NULL)
429 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
430 else
431 LIST_INSERT_AFTER(frep, frent, fr_next);
432
433 /* Check if we are completely reassembled */
434 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
435 return (NULL);
436
437 /* Check if we have all the data */
438 off = 0;
439 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
440 next = LIST_NEXT(frep, fr_next);
441
442 off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4;
443 if (off < (*frag)->fr_max &&
444 (next == NULL || FR_IP_OFF(next) != off))
445 {
446 DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
447 off, next == NULL ? -1 : FR_IP_OFF(next),
448 (*frag)->fr_max));
449 return (NULL);
450 }
451 }
452 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
453 if (off < (*frag)->fr_max)
454 return (NULL);
455
456 /* We have all the data */
457 frent = LIST_FIRST(&(*frag)->fr_queue);
500
501 if (frep != NULL &&
502 FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl *
503 4 > off)
504 {
505 u_int16_t precut;
506
507 precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) -
508 frep->fr_ip->ip_hl * 4 - off;
509 if (precut >= ip_len)
510 goto drop_fragment;
511 m_adj(frent->fr_m, precut);
512 DPFPRINTF(("overlap -%d\n", precut));
513 /* Enforce 8 byte boundaries */
514 ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3));
515 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
516 ip_len -= precut;
517 ip->ip_len = htons(ip_len);
518 }
519
520 for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
521 frea = next)
522 {
523 u_int16_t aftercut;
524
525 aftercut = ip_len + off - FR_IP_OFF(frea);
526 DPFPRINTF(("adjust overlap %d\n", aftercut));
527 if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl
528 * 4)
529 {
530 frea->fr_ip->ip_len =
531 htons(ntohs(frea->fr_ip->ip_len) - aftercut);
532 frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) +
533 (aftercut >> 3));
534 m_adj(frea->fr_m, aftercut);
535 break;
536 }
537
538 /* This fragment is completely overlapped, loose it */
539 next = LIST_NEXT(frea, fr_next);
540 m_freem(frea->fr_m);
541 LIST_REMOVE(frea, fr_next);
542 pool_put(&pf_frent_pl, frea);
543 pf_nfrents--;
544 }
545
546 insert:
547 /* Update maximum data size */
548 if ((*frag)->fr_max < max)
549 (*frag)->fr_max = max;
550 /* This is the last segment */
551 if (!mff)
552 (*frag)->fr_flags |= PFFRAG_SEENLAST;
553
554 if (frep == NULL)
555 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
556 else
557 LIST_INSERT_AFTER(frep, frent, fr_next);
558
559 /* Check if we are completely reassembled */
560 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
561 return (NULL);
562
563 /* Check if we have all the data */
564 off = 0;
565 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
566 next = LIST_NEXT(frep, fr_next);
567
568 off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4;
569 if (off < (*frag)->fr_max &&
570 (next == NULL || FR_IP_OFF(next) != off))
571 {
572 DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
573 off, next == NULL ? -1 : FR_IP_OFF(next),
574 (*frag)->fr_max));
575 return (NULL);
576 }
577 }
578 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
579 if (off < (*frag)->fr_max)
580 return (NULL);
581
582 /* We have all the data */
583 frent = LIST_FIRST(&(*frag)->fr_queue);
584#if defined(__FreeBSD__)
585 KASSERT((frent != NULL), ("frent == NULL: %s", __FUNCTION__));
586#else
458 KASSERT(frent != NULL);
587 KASSERT(frent != NULL);
588#endif
459 if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
460 DPFPRINTF(("drop: too big: %d\n", off));
461 pf_free_fragment(*frag);
462 *frag = NULL;
463 return (NULL);
464 }
465 next = LIST_NEXT(frent, fr_next);
466
467 /* Magic from ip_input */
468 ip = frent->fr_ip;
469 m = frent->fr_m;
470 m2 = m->m_next;
471 m->m_next = NULL;
472 m_cat(m, m2);
473 pool_put(&pf_frent_pl, frent);
474 pf_nfrents--;
475 for (frent = next; frent != NULL; frent = next) {
476 next = LIST_NEXT(frent, fr_next);
477
478 m2 = frent->fr_m;
479 pool_put(&pf_frent_pl, frent);
480 pf_nfrents--;
481 m_cat(m, m2);
482 }
483
484 ip->ip_src = (*frag)->fr_src;
485 ip->ip_dst = (*frag)->fr_dst;
486
487 /* Remove from fragment queue */
488 pf_remove_fragment(*frag);
489 *frag = NULL;
490
491 hlen = ip->ip_hl << 2;
492 ip->ip_len = htons(off + hlen);
493 m->m_len += hlen;
494 m->m_data -= hlen;
495
496 /* some debugging cruft by sklower, below, will go away soon */
497 /* XXX this should be done elsewhere */
498 if (m->m_flags & M_PKTHDR) {
499 int plen = 0;
500 for (m2 = m; m2; m2 = m2->m_next)
501 plen += m2->m_len;
502 m->m_pkthdr.len = plen;
503 }
504
505 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
506 return (m);
507
508 drop_fragment:
509 /* Oops - fail safe - drop packet */
510 pool_put(&pf_frent_pl, frent);
511 pf_nfrents--;
512 m_freem(m);
513 return (NULL);
514}
515
516struct mbuf *
517pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
518 int drop, int *nomem)
519{
520 struct mbuf *m = *m0;
521 struct pf_frcache *frp, *fra, *cur = NULL;
522 int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
523 u_int16_t off = ntohs(h->ip_off) << 3;
524 u_int16_t max = ip_len + off;
525 int hosed = 0;
526
589 if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
590 DPFPRINTF(("drop: too big: %d\n", off));
591 pf_free_fragment(*frag);
592 *frag = NULL;
593 return (NULL);
594 }
595 next = LIST_NEXT(frent, fr_next);
596
597 /* Magic from ip_input */
598 ip = frent->fr_ip;
599 m = frent->fr_m;
600 m2 = m->m_next;
601 m->m_next = NULL;
602 m_cat(m, m2);
603 pool_put(&pf_frent_pl, frent);
604 pf_nfrents--;
605 for (frent = next; frent != NULL; frent = next) {
606 next = LIST_NEXT(frent, fr_next);
607
608 m2 = frent->fr_m;
609 pool_put(&pf_frent_pl, frent);
610 pf_nfrents--;
611 m_cat(m, m2);
612 }
613
614 ip->ip_src = (*frag)->fr_src;
615 ip->ip_dst = (*frag)->fr_dst;
616
617 /* Remove from fragment queue */
618 pf_remove_fragment(*frag);
619 *frag = NULL;
620
621 hlen = ip->ip_hl << 2;
622 ip->ip_len = htons(off + hlen);
623 m->m_len += hlen;
624 m->m_data -= hlen;
625
626 /* some debugging cruft by sklower, below, will go away soon */
627 /* XXX this should be done elsewhere */
628 if (m->m_flags & M_PKTHDR) {
629 int plen = 0;
630 for (m2 = m; m2; m2 = m2->m_next)
631 plen += m2->m_len;
632 m->m_pkthdr.len = plen;
633 }
634
635 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
636 return (m);
637
638 drop_fragment:
639 /* Oops - fail safe - drop packet */
640 pool_put(&pf_frent_pl, frent);
641 pf_nfrents--;
642 m_freem(m);
643 return (NULL);
644}
645
646struct mbuf *
647pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
648 int drop, int *nomem)
649{
650 struct mbuf *m = *m0;
651 struct pf_frcache *frp, *fra, *cur = NULL;
652 int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
653 u_int16_t off = ntohs(h->ip_off) << 3;
654 u_int16_t max = ip_len + off;
655 int hosed = 0;
656
657#if defined(__FreeBSD__)
658 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
659 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
660#else
527 KASSERT(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
661 KASSERT(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
662#endif
528
529 /* Create a new range queue for this packet */
530 if (*frag == NULL) {
531 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
532 if (*frag == NULL) {
533 pf_flush_fragments();
534 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
535 if (*frag == NULL)
536 goto no_mem;
537 }
538
539 /* Get an entry for the queue */
540 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
541 if (cur == NULL) {
542 pool_put(&pf_cache_pl, *frag);
543 *frag = NULL;
544 goto no_mem;
545 }
546 pf_ncache++;
547
548 (*frag)->fr_flags = PFFRAG_NOBUFFER;
549 (*frag)->fr_max = 0;
550 (*frag)->fr_src = h->ip_src;
551 (*frag)->fr_dst = h->ip_dst;
552 (*frag)->fr_p = h->ip_p;
553 (*frag)->fr_id = h->ip_id;
663
664 /* Create a new range queue for this packet */
665 if (*frag == NULL) {
666 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
667 if (*frag == NULL) {
668 pf_flush_fragments();
669 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
670 if (*frag == NULL)
671 goto no_mem;
672 }
673
674 /* Get an entry for the queue */
675 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
676 if (cur == NULL) {
677 pool_put(&pf_cache_pl, *frag);
678 *frag = NULL;
679 goto no_mem;
680 }
681 pf_ncache++;
682
683 (*frag)->fr_flags = PFFRAG_NOBUFFER;
684 (*frag)->fr_max = 0;
685 (*frag)->fr_src = h->ip_src;
686 (*frag)->fr_dst = h->ip_dst;
687 (*frag)->fr_p = h->ip_p;
688 (*frag)->fr_id = h->ip_id;
689#if defined(__FreeBSD__)
690 (*frag)->fr_timeout = time_second;
691#else
554 (*frag)->fr_timeout = time.tv_sec;
692 (*frag)->fr_timeout = time.tv_sec;
693#endif
555
556 cur->fr_off = off;
557 cur->fr_end = max;
558 LIST_INIT(&(*frag)->fr_cache);
559 LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
560
561 RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
562 TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
563
564 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
565
566 goto pass;
567 }
568
569 /*
570 * Find a fragment after the current one:
571 * - off contains the real shifted offset.
572 */
573 frp = NULL;
574 LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
575 if (fra->fr_off > off)
576 break;
577 frp = fra;
578 }
579
694
695 cur->fr_off = off;
696 cur->fr_end = max;
697 LIST_INIT(&(*frag)->fr_cache);
698 LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
699
700 RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
701 TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
702
703 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
704
705 goto pass;
706 }
707
708 /*
709 * Find a fragment after the current one:
710 * - off contains the real shifted offset.
711 */
712 frp = NULL;
713 LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
714 if (fra->fr_off > off)
715 break;
716 frp = fra;
717 }
718
719#if defined(__FreeBSD__)
720 KASSERT((frp != NULL || fra != NULL),
721 ("!(frp != NULL || fra != NULL): %s", __FUNCTION__));
722#else
580 KASSERT(frp != NULL || fra != NULL);
723 KASSERT(frp != NULL || fra != NULL);
724#endif
581
582 if (frp != NULL) {
583 int precut;
584
585 precut = frp->fr_end - off;
586 if (precut >= ip_len) {
587 /* Fragment is entirely a duplicate */
588 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
589 h->ip_id, frp->fr_off, frp->fr_end, off, max));
590 goto drop_fragment;
591 }
592 if (precut == 0) {
593 /* They are adjacent. Fixup cache entry */
594 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
595 h->ip_id, frp->fr_off, frp->fr_end, off, max));
596 frp->fr_end = max;
597 } else if (precut > 0) {
598 /* The first part of this payload overlaps with a
599 * fragment that has already been passed.
600 * Need to trim off the first part of the payload.
601 * But to do so easily, we need to create another
602 * mbuf to throw the original header into.
603 */
604
605 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
606 h->ip_id, precut, frp->fr_off, frp->fr_end, off,
607 max));
608
609 off += precut;
610 max -= precut;
611 /* Update the previous frag to encompass this one */
612 frp->fr_end = max;
613
614 if (!drop) {
615 /* XXX Optimization opportunity
616 * This is a very heavy way to trim the payload.
617 * we could do it much faster by diddling mbuf
618 * internals but that would be even less legible
619 * than this mbuf magic. For my next trick,
620 * I'll pull a rabbit out of my laptop.
621 */
725
726 if (frp != NULL) {
727 int precut;
728
729 precut = frp->fr_end - off;
730 if (precut >= ip_len) {
731 /* Fragment is entirely a duplicate */
732 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
733 h->ip_id, frp->fr_off, frp->fr_end, off, max));
734 goto drop_fragment;
735 }
736 if (precut == 0) {
737 /* They are adjacent. Fixup cache entry */
738 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
739 h->ip_id, frp->fr_off, frp->fr_end, off, max));
740 frp->fr_end = max;
741 } else if (precut > 0) {
742 /* The first part of this payload overlaps with a
743 * fragment that has already been passed.
744 * Need to trim off the first part of the payload.
745 * But to do so easily, we need to create another
746 * mbuf to throw the original header into.
747 */
748
749 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
750 h->ip_id, precut, frp->fr_off, frp->fr_end, off,
751 max));
752
753 off += precut;
754 max -= precut;
755 /* Update the previous frag to encompass this one */
756 frp->fr_end = max;
757
758 if (!drop) {
759 /* XXX Optimization opportunity
760 * This is a very heavy way to trim the payload.
761 * we could do it much faster by diddling mbuf
762 * internals but that would be even less legible
763 * than this mbuf magic. For my next trick,
764 * I'll pull a rabbit out of my laptop.
765 */
766#if defined(__FreeBSD__)
767 *m0 = m_dup(m, M_DONTWAIT);
768 /* From KAME Project : We have missed this! */
769 m_adj(*m0, (h->ip_hl << 2) -
770 (*m0)->m_pkthdr.len);
771#else
622 *m0 = m_copym2(m, 0, h->ip_hl << 2, M_NOWAIT);
772 *m0 = m_copym2(m, 0, h->ip_hl << 2, M_NOWAIT);
773#endif
623 if (*m0 == NULL)
624 goto no_mem;
774 if (*m0 == NULL)
775 goto no_mem;
776#if defined(__FreeBSD__)
777 KASSERT(((*m0)->m_next == NULL),
778 ("(*m0)->m_next != NULL: %s",
779 __FUNCTION__));
780#else
625 KASSERT((*m0)->m_next == NULL);
781 KASSERT((*m0)->m_next == NULL);
782#endif
626 m_adj(m, precut + (h->ip_hl << 2));
627 m_cat(*m0, m);
628 m = *m0;
629 if (m->m_flags & M_PKTHDR) {
630 int plen = 0;
631 struct mbuf *t;
632 for (t = m; t; t = t->m_next)
633 plen += t->m_len;
634 m->m_pkthdr.len = plen;
635 }
636
637
638 h = mtod(m, struct ip *);
639
783 m_adj(m, precut + (h->ip_hl << 2));
784 m_cat(*m0, m);
785 m = *m0;
786 if (m->m_flags & M_PKTHDR) {
787 int plen = 0;
788 struct mbuf *t;
789 for (t = m; t; t = t->m_next)
790 plen += t->m_len;
791 m->m_pkthdr.len = plen;
792 }
793
794
795 h = mtod(m, struct ip *);
796
640
797#if defined(__FreeBSD__)
798 KASSERT(((int)m->m_len == ntohs(h->ip_len) - precut),
799 ("m->m_len != ntohs(h->ip_len) - precut: %s",
800 __FUNCTION__));
801#else
641 KASSERT((int)m->m_len == ntohs(h->ip_len) - precut);
802 KASSERT((int)m->m_len == ntohs(h->ip_len) - precut);
803#endif
642 h->ip_off = htons(ntohs(h->ip_off) + (precut >> 3));
643 h->ip_len = htons(ntohs(h->ip_len) - precut);
644 } else {
645 hosed++;
646 }
647 } else {
648 /* There is a gap between fragments */
649
650 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
651 h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
652 max));
653
654 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
655 if (cur == NULL)
656 goto no_mem;
657 pf_ncache++;
658
659 cur->fr_off = off;
660 cur->fr_end = max;
661 LIST_INSERT_AFTER(frp, cur, fr_next);
662 }
663 }
664
665 if (fra != NULL) {
666 int aftercut;
667 int merge = 0;
668
669 aftercut = max - fra->fr_off;
670 if (aftercut == 0) {
671 /* Adjacent fragments */
672 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
673 h->ip_id, off, max, fra->fr_off, fra->fr_end));
674 fra->fr_off = off;
675 merge = 1;
676 } else if (aftercut > 0) {
677 /* Need to chop off the tail of this fragment */
678 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
679 h->ip_id, aftercut, off, max, fra->fr_off,
680 fra->fr_end));
681 fra->fr_off = off;
682 max -= aftercut;
683
684 merge = 1;
685
686 if (!drop) {
687 m_adj(m, -aftercut);
688 if (m->m_flags & M_PKTHDR) {
689 int plen = 0;
690 struct mbuf *t;
691 for (t = m; t; t = t->m_next)
692 plen += t->m_len;
693 m->m_pkthdr.len = plen;
694 }
695 h = mtod(m, struct ip *);
804 h->ip_off = htons(ntohs(h->ip_off) + (precut >> 3));
805 h->ip_len = htons(ntohs(h->ip_len) - precut);
806 } else {
807 hosed++;
808 }
809 } else {
810 /* There is a gap between fragments */
811
812 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
813 h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
814 max));
815
816 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
817 if (cur == NULL)
818 goto no_mem;
819 pf_ncache++;
820
821 cur->fr_off = off;
822 cur->fr_end = max;
823 LIST_INSERT_AFTER(frp, cur, fr_next);
824 }
825 }
826
827 if (fra != NULL) {
828 int aftercut;
829 int merge = 0;
830
831 aftercut = max - fra->fr_off;
832 if (aftercut == 0) {
833 /* Adjacent fragments */
834 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
835 h->ip_id, off, max, fra->fr_off, fra->fr_end));
836 fra->fr_off = off;
837 merge = 1;
838 } else if (aftercut > 0) {
839 /* Need to chop off the tail of this fragment */
840 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
841 h->ip_id, aftercut, off, max, fra->fr_off,
842 fra->fr_end));
843 fra->fr_off = off;
844 max -= aftercut;
845
846 merge = 1;
847
848 if (!drop) {
849 m_adj(m, -aftercut);
850 if (m->m_flags & M_PKTHDR) {
851 int plen = 0;
852 struct mbuf *t;
853 for (t = m; t; t = t->m_next)
854 plen += t->m_len;
855 m->m_pkthdr.len = plen;
856 }
857 h = mtod(m, struct ip *);
858#if defined(__FreeBSD__)
859 KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut),
860 ("m->m_len != ntohs(h->ip_len) - aftercut: %s",
861 __FUNCTION__));
862#else
696 KASSERT((int)m->m_len == ntohs(h->ip_len) - aftercut);
863 KASSERT((int)m->m_len == ntohs(h->ip_len) - aftercut);
864#endif
697 h->ip_len = htons(ntohs(h->ip_len) - aftercut);
698 } else {
699 hosed++;
700 }
701 } else {
702 /* There is a gap between fragments */
703 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
704 h->ip_id, -aftercut, off, max, fra->fr_off,
705 fra->fr_end));
706
707 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
708 if (cur == NULL)
709 goto no_mem;
710 pf_ncache++;
711
712 cur->fr_off = off;
713 cur->fr_end = max;
714 LIST_INSERT_BEFORE(fra, cur, fr_next);
715 }
716
717
718 /* Need to glue together two separate fragment descriptors */
719 if (merge) {
720 if (cur && fra->fr_off <= cur->fr_end) {
721 /* Need to merge in a previous 'cur' */
722 DPFPRINTF(("fragcache[%d]: adjacent(merge "
723 "%d-%d) %d-%d (%d-%d)\n",
724 h->ip_id, cur->fr_off, cur->fr_end, off,
725 max, fra->fr_off, fra->fr_end));
726 fra->fr_off = cur->fr_off;
727 LIST_REMOVE(cur, fr_next);
728 pool_put(&pf_cent_pl, cur);
729 pf_ncache--;
730 cur = NULL;
731
732 } else if (frp && fra->fr_off <= frp->fr_end) {
733 /* Need to merge in a modified 'frp' */
865 h->ip_len = htons(ntohs(h->ip_len) - aftercut);
866 } else {
867 hosed++;
868 }
869 } else {
870 /* There is a gap between fragments */
871 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
872 h->ip_id, -aftercut, off, max, fra->fr_off,
873 fra->fr_end));
874
875 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
876 if (cur == NULL)
877 goto no_mem;
878 pf_ncache++;
879
880 cur->fr_off = off;
881 cur->fr_end = max;
882 LIST_INSERT_BEFORE(fra, cur, fr_next);
883 }
884
885
886 /* Need to glue together two separate fragment descriptors */
887 if (merge) {
888 if (cur && fra->fr_off <= cur->fr_end) {
889 /* Need to merge in a previous 'cur' */
890 DPFPRINTF(("fragcache[%d]: adjacent(merge "
891 "%d-%d) %d-%d (%d-%d)\n",
892 h->ip_id, cur->fr_off, cur->fr_end, off,
893 max, fra->fr_off, fra->fr_end));
894 fra->fr_off = cur->fr_off;
895 LIST_REMOVE(cur, fr_next);
896 pool_put(&pf_cent_pl, cur);
897 pf_ncache--;
898 cur = NULL;
899
900 } else if (frp && fra->fr_off <= frp->fr_end) {
901 /* Need to merge in a modified 'frp' */
902#if defined(__FreeBSD__)
903 KASSERT((cur == NULL), ("cur != NULL: %s",
904 __FUNCTION__));
905#else
734 KASSERT(cur == NULL);
906 KASSERT(cur == NULL);
907#endif
735 DPFPRINTF(("fragcache[%d]: adjacent(merge "
736 "%d-%d) %d-%d (%d-%d)\n",
737 h->ip_id, frp->fr_off, frp->fr_end, off,
738 max, fra->fr_off, fra->fr_end));
739 fra->fr_off = frp->fr_off;
740 LIST_REMOVE(frp, fr_next);
741 pool_put(&pf_cent_pl, frp);
742 pf_ncache--;
743 frp = NULL;
744
745 }
746 }
747 }
748
749 if (hosed) {
750 /*
751 * We must keep tracking the overall fragment even when
752 * we're going to drop it anyway so that we know when to
753 * free the overall descriptor. Thus we drop the frag late.
754 */
755 goto drop_fragment;
756 }
757
758
759 pass:
760 /* Update maximum data size */
761 if ((*frag)->fr_max < max)
762 (*frag)->fr_max = max;
763
764 /* This is the last segment */
765 if (!mff)
766 (*frag)->fr_flags |= PFFRAG_SEENLAST;
767
768 /* Check if we are completely reassembled */
769 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
770 LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
771 LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
772 /* Remove from fragment queue */
773 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
774 (*frag)->fr_max));
775 pf_free_fragment(*frag);
776 *frag = NULL;
777 }
778
779 return (m);
780
781 no_mem:
782 *nomem = 1;
783
784 /* Still need to pay attention to !IP_MF */
785 if (!mff && *frag != NULL)
786 (*frag)->fr_flags |= PFFRAG_SEENLAST;
787
788 m_freem(m);
789 return (NULL);
790
791 drop_fragment:
792
793 /* Still need to pay attention to !IP_MF */
794 if (!mff && *frag != NULL)
795 (*frag)->fr_flags |= PFFRAG_SEENLAST;
796
797 if (drop) {
798 /* This fragment has been deemed bad. Don't reass */
799 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
800 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
801 h->ip_id));
802 (*frag)->fr_flags |= PFFRAG_DROP;
803 }
804
805 m_freem(m);
806 return (NULL);
807}
808
809int
810pf_normalize_ip(struct mbuf **m0, int dir, struct ifnet *ifp, u_short *reason)
811{
812 struct mbuf *m = *m0;
813 struct pf_rule *r;
814 struct pf_frent *frent;
815 struct pf_fragment *frag = NULL;
816 struct ip *h = mtod(m, struct ip *);
817 int mff = (ntohs(h->ip_off) & IP_MF);
818 int hlen = h->ip_hl << 2;
819 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
820 u_int16_t max;
821 int ip_len;
822 int ip_off;
823
824 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
825 while (r != NULL) {
826 r->evaluations++;
827 if (r->ifp != NULL && r->ifp != ifp)
828 r = r->skip[PF_SKIP_IFP].ptr;
829 else if (r->direction && r->direction != dir)
830 r = r->skip[PF_SKIP_DIR].ptr;
831 else if (r->af && r->af != AF_INET)
832 r = r->skip[PF_SKIP_AF].ptr;
833 else if (r->proto && r->proto != h->ip_p)
834 r = r->skip[PF_SKIP_PROTO].ptr;
835 else if (PF_MISMATCHAW(&r->src.addr,
836 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, r->src.not))
837 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
838 else if (PF_MISMATCHAW(&r->dst.addr,
839 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, r->dst.not))
840 r = r->skip[PF_SKIP_DST_ADDR].ptr;
841 else
842 break;
843 }
844
845 if (r == NULL)
846 return (PF_PASS);
847 else
848 r->packets++;
849
850 /* Check for illegal packets */
851 if (hlen < (int)sizeof(struct ip))
852 goto drop;
853
854 if (hlen > ntohs(h->ip_len))
855 goto drop;
856
857 /* Clear IP_DF if the rule uses the no-df option */
858 if (r->rule_flag & PFRULE_NODF)
859 h->ip_off &= htons(~IP_DF);
860
861 /* We will need other tests here */
862 if (!fragoff && !mff)
863 goto no_fragment;
864
865 /* We're dealing with a fragment now. Don't allow fragments
866 * with IP_DF to enter the cache. If the flag was cleared by
867 * no-df above, fine. Otherwise drop it.
868 */
869 if (h->ip_off & htons(IP_DF)) {
870 DPFPRINTF(("IP_DF\n"));
871 goto bad;
872 }
873
874 ip_len = ntohs(h->ip_len) - hlen;
875 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
876
877 /* All fragments are 8 byte aligned */
878 if (mff && (ip_len & 0x7)) {
879 DPFPRINTF(("mff and %d\n", ip_len));
880 goto bad;
881 }
882
883 /* Respect maximum length */
884 if (fragoff + ip_len > IP_MAXPACKET) {
885 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
886 goto bad;
887 }
888 max = fragoff + ip_len;
889
890 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
891 /* Fully buffer all of the fragments */
892
893 frag = pf_find_fragment(h, &pf_frag_tree);
894
895 /* Check if we saw the last fragment already */
896 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
897 max > frag->fr_max)
898 goto bad;
899
900 /* Get an entry for the fragment queue */
901 frent = pool_get(&pf_frent_pl, PR_NOWAIT);
902 if (frent == NULL) {
903 REASON_SET(reason, PFRES_MEMORY);
904 return (PF_DROP);
905 }
906 pf_nfrents++;
907 frent->fr_ip = h;
908 frent->fr_m = m;
909
910 /* Might return a completely reassembled mbuf, or NULL */
911 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
912 *m0 = m = pf_reassemble(m0, &frag, frent, mff);
913
914 if (m == NULL)
915 return (PF_DROP);
916
917 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
918 goto drop;
919
920 h = mtod(m, struct ip *);
921 } else {
922 /* non-buffering fragment cache (drops or masks overlaps) */
923 int nomem = 0;
924
925 if (dir == PF_OUT) {
926 if (m_tag_find(m, PACKET_TAG_PF_FRAGCACHE, NULL) !=
927 NULL) {
928 /* Already passed the fragment cache in the
929 * input direction. If we continued, it would
930 * appear to be a dup and would be dropped.
931 */
932 goto fragment_pass;
933 }
934 }
935
936 frag = pf_find_fragment(h, &pf_cache_tree);
937
938 /* Check if we saw the last fragment already */
939 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
940 max > frag->fr_max) {
941 if (r->rule_flag & PFRULE_FRAGDROP)
942 frag->fr_flags |= PFFRAG_DROP;
943 goto bad;
944 }
945
946 *m0 = m = pf_fragcache(m0, h, &frag, mff,
947 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
948 if (m == NULL) {
949 if (nomem)
950 goto no_mem;
951 goto drop;
952 }
953
954 if (dir == PF_IN) {
955 struct m_tag *mtag;
956
957 mtag = m_tag_get(PACKET_TAG_PF_FRAGCACHE, 0, M_NOWAIT);
958 if (mtag == NULL)
959 goto no_mem;
960 m_tag_prepend(m, mtag);
961 }
962 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
963 goto drop;
964 goto fragment_pass;
965 }
966
967 no_fragment:
968 /* At this point, only IP_DF is allowed in ip_off */
969 h->ip_off &= htons(IP_DF);
970
971 /* Enforce a minimum ttl, may cause endless packet loops */
972 if (r->min_ttl && h->ip_ttl < r->min_ttl)
973 h->ip_ttl = r->min_ttl;
974
975 if (r->rule_flag & PFRULE_RANDOMID)
976 h->ip_id = ip_randomid();
977
978 return (PF_PASS);
979
980 fragment_pass:
981 /* Enforce a minimum ttl, may cause endless packet loops */
982 if (r->min_ttl && h->ip_ttl < r->min_ttl)
983 h->ip_ttl = r->min_ttl;
984
985 return (PF_PASS);
986
987 no_mem:
988 REASON_SET(reason, PFRES_MEMORY);
989 if (r != NULL && r->log)
990 PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r, NULL, NULL);
991 return (PF_DROP);
992
993 drop:
994 REASON_SET(reason, PFRES_NORM);
995 if (r != NULL && r->log)
996 PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r, NULL, NULL);
997 return (PF_DROP);
998
999 bad:
1000 DPFPRINTF(("dropping bad fragment\n"));
1001
1002 /* Free associated fragments */
1003 if (frag != NULL)
1004 pf_free_fragment(frag);
1005
1006 REASON_SET(reason, PFRES_FRAG);
1007 if (r != NULL && r->log)
1008 PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1009
1010 return (PF_DROP);
1011}
1012
1013#ifdef INET6
1014int
1015pf_normalize_ip6(struct mbuf **m0, int dir, struct ifnet *ifp, u_short *reason)
1016{
1017 struct mbuf *m = *m0;
1018 struct pf_rule *r;
1019 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1020 int off;
1021 struct ip6_ext ext;
1022 struct ip6_opt opt;
1023 struct ip6_opt_jumbo jumbo;
1024 struct ip6_frag frag;
1025 u_int32_t jumbolen = 0, plen;
1026 u_int16_t fragoff = 0;
1027 int optend;
1028 int ooff;
1029 u_int8_t proto;
1030 int terminal;
1031
1032 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1033 while (r != NULL) {
1034 r->evaluations++;
1035 if (r->ifp != NULL && r->ifp != ifp)
1036 r = r->skip[PF_SKIP_IFP].ptr;
1037 else if (r->direction && r->direction != dir)
1038 r = r->skip[PF_SKIP_DIR].ptr;
1039 else if (r->af && r->af != AF_INET6)
1040 r = r->skip[PF_SKIP_AF].ptr;
1041#if 0 /* header chain! */
1042 else if (r->proto && r->proto != h->ip6_nxt)
1043 r = r->skip[PF_SKIP_PROTO].ptr;
1044#endif
1045 else if (PF_MISMATCHAW(&r->src.addr,
1046 (struct pf_addr *)&h->ip6_src, AF_INET6, r->src.not))
1047 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1048 else if (PF_MISMATCHAW(&r->dst.addr,
1049 (struct pf_addr *)&h->ip6_dst, AF_INET6, r->dst.not))
1050 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1051 else
1052 break;
1053 }
1054
1055 if (r == NULL)
1056 return (PF_PASS);
1057 else
1058 r->packets++;
1059
1060 /* Check for illegal packets */
1061 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1062 goto drop;
1063
1064 off = sizeof(struct ip6_hdr);
1065 proto = h->ip6_nxt;
1066 terminal = 0;
1067 do {
1068 switch (proto) {
1069 case IPPROTO_FRAGMENT:
1070 goto fragment;
1071 break;
1072 case IPPROTO_AH:
1073 case IPPROTO_ROUTING:
1074 case IPPROTO_DSTOPTS:
1075 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1076 NULL, AF_INET6))
1077 goto shortpkt;
1078 if (proto == IPPROTO_AH)
1079 off += (ext.ip6e_len + 2) * 4;
1080 else
1081 off += (ext.ip6e_len + 1) * 8;
1082 proto = ext.ip6e_nxt;
1083 break;
1084 case IPPROTO_HOPOPTS:
1085 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1086 NULL, AF_INET6))
1087 goto shortpkt;
1088 optend = off + (ext.ip6e_len + 1) * 8;
1089 ooff = off + sizeof(ext);
1090 do {
1091 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1092 sizeof(opt.ip6o_type), NULL, NULL,
1093 AF_INET6))
1094 goto shortpkt;
1095 if (opt.ip6o_type == IP6OPT_PAD1) {
1096 ooff++;
1097 continue;
1098 }
1099 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1100 NULL, NULL, AF_INET6))
1101 goto shortpkt;
1102 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1103 goto drop;
1104 switch (opt.ip6o_type) {
1105 case IP6OPT_JUMBO:
1106 if (h->ip6_plen != 0)
1107 goto drop;
1108 if (!pf_pull_hdr(m, ooff, &jumbo,
1109 sizeof(jumbo), NULL, NULL,
1110 AF_INET6))
1111 goto shortpkt;
1112 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1113 sizeof(jumbolen));
1114 jumbolen = ntohl(jumbolen);
1115 if (jumbolen <= IPV6_MAXPACKET)
1116 goto drop;
1117 if (sizeof(struct ip6_hdr) + jumbolen !=
1118 m->m_pkthdr.len)
1119 goto drop;
1120 break;
1121 default:
1122 break;
1123 }
1124 ooff += sizeof(opt) + opt.ip6o_len;
1125 } while (ooff < optend);
1126
1127 off = optend;
1128 proto = ext.ip6e_nxt;
1129 break;
1130 default:
1131 terminal = 1;
1132 break;
1133 }
1134 } while (!terminal);
1135
1136 /* jumbo payload option must be present, or plen > 0 */
1137 if (ntohs(h->ip6_plen) == 0)
1138 plen = jumbolen;
1139 else
1140 plen = ntohs(h->ip6_plen);
1141 if (plen == 0)
1142 goto drop;
1143 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1144 goto shortpkt;
1145
1146 /* Enforce a minimum ttl, may cause endless packet loops */
1147 if (r->min_ttl && h->ip6_hlim < r->min_ttl)
1148 h->ip6_hlim = r->min_ttl;
1149
1150 return (PF_PASS);
1151
1152 fragment:
1153 if (ntohs(h->ip6_plen) == 0 || jumbolen)
1154 goto drop;
1155 plen = ntohs(h->ip6_plen);
1156
1157 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1158 goto shortpkt;
1159 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1160 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
1161 goto badfrag;
1162
1163 /* do something about it */
1164 return (PF_PASS);
1165
1166 shortpkt:
1167 REASON_SET(reason, PFRES_SHORT);
1168 if (r != NULL && r->log)
1169 PFLOG_PACKET(ifp, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1170 return (PF_DROP);
1171
1172 drop:
1173 REASON_SET(reason, PFRES_NORM);
1174 if (r != NULL && r->log)
1175 PFLOG_PACKET(ifp, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1176 return (PF_DROP);
1177
1178 badfrag:
1179 REASON_SET(reason, PFRES_FRAG);
1180 if (r != NULL && r->log)
1181 PFLOG_PACKET(ifp, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1182 return (PF_DROP);
1183}
1184#endif
1185
1186int
1187pf_normalize_tcp(int dir, struct ifnet *ifp, struct mbuf *m, int ipoff,
1188 int off, void *h, struct pf_pdesc *pd)
1189{
1190 struct pf_rule *r, *rm = NULL;
1191 struct tcphdr *th = pd->hdr.tcp;
1192 int rewrite = 0;
1193 u_short reason;
1194 u_int8_t flags;
1195 sa_family_t af = pd->af;
1196
1197 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1198 while (r != NULL) {
1199 r->evaluations++;
1200 if (r->ifp != NULL && r->ifp != ifp)
1201 r = r->skip[PF_SKIP_IFP].ptr;
1202 else if (r->direction && r->direction != dir)
1203 r = r->skip[PF_SKIP_DIR].ptr;
1204 else if (r->af && r->af != af)
1205 r = r->skip[PF_SKIP_AF].ptr;
1206 else if (r->proto && r->proto != pd->proto)
1207 r = r->skip[PF_SKIP_PROTO].ptr;
1208 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
1209 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1210 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1211 r->src.port[0], r->src.port[1], th->th_sport))
1212 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1213 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
1214 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1215 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1216 r->dst.port[0], r->dst.port[1], th->th_dport))
1217 r = r->skip[PF_SKIP_DST_PORT].ptr;
1218 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1219 pf_osfp_fingerprint(pd, m, off, th),
1220 r->os_fingerprint))
1221 r = TAILQ_NEXT(r, entries);
1222 else {
1223 rm = r;
1224 break;
1225 }
1226 }
1227
1228 if (rm == NULL)
1229 return (PF_PASS);
1230 else
1231 r->packets++;
1232
1233 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1234 pd->flags |= PFDESC_TCP_NORM;
1235
1236 flags = th->th_flags;
1237 if (flags & TH_SYN) {
1238 /* Illegal packet */
1239 if (flags & TH_RST)
1240 goto tcp_drop;
1241
1242 if (flags & TH_FIN)
1243 flags &= ~TH_FIN;
1244 } else {
1245 /* Illegal packet */
1246 if (!(flags & (TH_ACK|TH_RST)))
1247 goto tcp_drop;
1248 }
1249
1250 if (!(flags & TH_ACK)) {
1251 /* These flags are only valid if ACK is set */
1252 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1253 goto tcp_drop;
1254 }
1255
1256 /* Check for illegal header length */
1257 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1258 goto tcp_drop;
1259
1260 /* If flags changed, or reserved data set, then adjust */
1261 if (flags != th->th_flags || th->th_x2 != 0) {
1262 u_int16_t ov, nv;
1263
1264 ov = *(u_int16_t *)(&th->th_ack + 1);
1265 th->th_flags = flags;
1266 th->th_x2 = 0;
1267 nv = *(u_int16_t *)(&th->th_ack + 1);
1268
1269 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv);
1270 rewrite = 1;
1271 }
1272
1273 /* Remove urgent pointer, if TH_URG is not set */
1274 if (!(flags & TH_URG) && th->th_urp) {
1275 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0);
1276 th->th_urp = 0;
1277 rewrite = 1;
1278 }
1279
1280 /* Process options */
1281 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off))
1282 rewrite = 1;
1283
1284 /* copy back packet headers if we sanitized */
1285 if (rewrite)
908 DPFPRINTF(("fragcache[%d]: adjacent(merge "
909 "%d-%d) %d-%d (%d-%d)\n",
910 h->ip_id, frp->fr_off, frp->fr_end, off,
911 max, fra->fr_off, fra->fr_end));
912 fra->fr_off = frp->fr_off;
913 LIST_REMOVE(frp, fr_next);
914 pool_put(&pf_cent_pl, frp);
915 pf_ncache--;
916 frp = NULL;
917
918 }
919 }
920 }
921
922 if (hosed) {
923 /*
924 * We must keep tracking the overall fragment even when
925 * we're going to drop it anyway so that we know when to
926 * free the overall descriptor. Thus we drop the frag late.
927 */
928 goto drop_fragment;
929 }
930
931
932 pass:
933 /* Update maximum data size */
934 if ((*frag)->fr_max < max)
935 (*frag)->fr_max = max;
936
937 /* This is the last segment */
938 if (!mff)
939 (*frag)->fr_flags |= PFFRAG_SEENLAST;
940
941 /* Check if we are completely reassembled */
942 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
943 LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
944 LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
945 /* Remove from fragment queue */
946 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
947 (*frag)->fr_max));
948 pf_free_fragment(*frag);
949 *frag = NULL;
950 }
951
952 return (m);
953
954 no_mem:
955 *nomem = 1;
956
957 /* Still need to pay attention to !IP_MF */
958 if (!mff && *frag != NULL)
959 (*frag)->fr_flags |= PFFRAG_SEENLAST;
960
961 m_freem(m);
962 return (NULL);
963
964 drop_fragment:
965
966 /* Still need to pay attention to !IP_MF */
967 if (!mff && *frag != NULL)
968 (*frag)->fr_flags |= PFFRAG_SEENLAST;
969
970 if (drop) {
971 /* This fragment has been deemed bad. Don't reass */
972 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
973 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
974 h->ip_id));
975 (*frag)->fr_flags |= PFFRAG_DROP;
976 }
977
978 m_freem(m);
979 return (NULL);
980}
981
982int
983pf_normalize_ip(struct mbuf **m0, int dir, struct ifnet *ifp, u_short *reason)
984{
985 struct mbuf *m = *m0;
986 struct pf_rule *r;
987 struct pf_frent *frent;
988 struct pf_fragment *frag = NULL;
989 struct ip *h = mtod(m, struct ip *);
990 int mff = (ntohs(h->ip_off) & IP_MF);
991 int hlen = h->ip_hl << 2;
992 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
993 u_int16_t max;
994 int ip_len;
995 int ip_off;
996
997 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
998 while (r != NULL) {
999 r->evaluations++;
1000 if (r->ifp != NULL && r->ifp != ifp)
1001 r = r->skip[PF_SKIP_IFP].ptr;
1002 else if (r->direction && r->direction != dir)
1003 r = r->skip[PF_SKIP_DIR].ptr;
1004 else if (r->af && r->af != AF_INET)
1005 r = r->skip[PF_SKIP_AF].ptr;
1006 else if (r->proto && r->proto != h->ip_p)
1007 r = r->skip[PF_SKIP_PROTO].ptr;
1008 else if (PF_MISMATCHAW(&r->src.addr,
1009 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, r->src.not))
1010 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1011 else if (PF_MISMATCHAW(&r->dst.addr,
1012 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, r->dst.not))
1013 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1014 else
1015 break;
1016 }
1017
1018 if (r == NULL)
1019 return (PF_PASS);
1020 else
1021 r->packets++;
1022
1023 /* Check for illegal packets */
1024 if (hlen < (int)sizeof(struct ip))
1025 goto drop;
1026
1027 if (hlen > ntohs(h->ip_len))
1028 goto drop;
1029
1030 /* Clear IP_DF if the rule uses the no-df option */
1031 if (r->rule_flag & PFRULE_NODF)
1032 h->ip_off &= htons(~IP_DF);
1033
1034 /* We will need other tests here */
1035 if (!fragoff && !mff)
1036 goto no_fragment;
1037
1038 /* We're dealing with a fragment now. Don't allow fragments
1039 * with IP_DF to enter the cache. If the flag was cleared by
1040 * no-df above, fine. Otherwise drop it.
1041 */
1042 if (h->ip_off & htons(IP_DF)) {
1043 DPFPRINTF(("IP_DF\n"));
1044 goto bad;
1045 }
1046
1047 ip_len = ntohs(h->ip_len) - hlen;
1048 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1049
1050 /* All fragments are 8 byte aligned */
1051 if (mff && (ip_len & 0x7)) {
1052 DPFPRINTF(("mff and %d\n", ip_len));
1053 goto bad;
1054 }
1055
1056 /* Respect maximum length */
1057 if (fragoff + ip_len > IP_MAXPACKET) {
1058 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1059 goto bad;
1060 }
1061 max = fragoff + ip_len;
1062
1063 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
1064 /* Fully buffer all of the fragments */
1065
1066 frag = pf_find_fragment(h, &pf_frag_tree);
1067
1068 /* Check if we saw the last fragment already */
1069 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1070 max > frag->fr_max)
1071 goto bad;
1072
1073 /* Get an entry for the fragment queue */
1074 frent = pool_get(&pf_frent_pl, PR_NOWAIT);
1075 if (frent == NULL) {
1076 REASON_SET(reason, PFRES_MEMORY);
1077 return (PF_DROP);
1078 }
1079 pf_nfrents++;
1080 frent->fr_ip = h;
1081 frent->fr_m = m;
1082
1083 /* Might return a completely reassembled mbuf, or NULL */
1084 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1085 *m0 = m = pf_reassemble(m0, &frag, frent, mff);
1086
1087 if (m == NULL)
1088 return (PF_DROP);
1089
1090 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1091 goto drop;
1092
1093 h = mtod(m, struct ip *);
1094 } else {
1095 /* non-buffering fragment cache (drops or masks overlaps) */
1096 int nomem = 0;
1097
1098 if (dir == PF_OUT) {
1099 if (m_tag_find(m, PACKET_TAG_PF_FRAGCACHE, NULL) !=
1100 NULL) {
1101 /* Already passed the fragment cache in the
1102 * input direction. If we continued, it would
1103 * appear to be a dup and would be dropped.
1104 */
1105 goto fragment_pass;
1106 }
1107 }
1108
1109 frag = pf_find_fragment(h, &pf_cache_tree);
1110
1111 /* Check if we saw the last fragment already */
1112 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1113 max > frag->fr_max) {
1114 if (r->rule_flag & PFRULE_FRAGDROP)
1115 frag->fr_flags |= PFFRAG_DROP;
1116 goto bad;
1117 }
1118
1119 *m0 = m = pf_fragcache(m0, h, &frag, mff,
1120 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
1121 if (m == NULL) {
1122 if (nomem)
1123 goto no_mem;
1124 goto drop;
1125 }
1126
1127 if (dir == PF_IN) {
1128 struct m_tag *mtag;
1129
1130 mtag = m_tag_get(PACKET_TAG_PF_FRAGCACHE, 0, M_NOWAIT);
1131 if (mtag == NULL)
1132 goto no_mem;
1133 m_tag_prepend(m, mtag);
1134 }
1135 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1136 goto drop;
1137 goto fragment_pass;
1138 }
1139
1140 no_fragment:
1141 /* At this point, only IP_DF is allowed in ip_off */
1142 h->ip_off &= htons(IP_DF);
1143
1144 /* Enforce a minimum ttl, may cause endless packet loops */
1145 if (r->min_ttl && h->ip_ttl < r->min_ttl)
1146 h->ip_ttl = r->min_ttl;
1147
1148 if (r->rule_flag & PFRULE_RANDOMID)
1149 h->ip_id = ip_randomid();
1150
1151 return (PF_PASS);
1152
1153 fragment_pass:
1154 /* Enforce a minimum ttl, may cause endless packet loops */
1155 if (r->min_ttl && h->ip_ttl < r->min_ttl)
1156 h->ip_ttl = r->min_ttl;
1157
1158 return (PF_PASS);
1159
1160 no_mem:
1161 REASON_SET(reason, PFRES_MEMORY);
1162 if (r != NULL && r->log)
1163 PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1164 return (PF_DROP);
1165
1166 drop:
1167 REASON_SET(reason, PFRES_NORM);
1168 if (r != NULL && r->log)
1169 PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1170 return (PF_DROP);
1171
1172 bad:
1173 DPFPRINTF(("dropping bad fragment\n"));
1174
1175 /* Free associated fragments */
1176 if (frag != NULL)
1177 pf_free_fragment(frag);
1178
1179 REASON_SET(reason, PFRES_FRAG);
1180 if (r != NULL && r->log)
1181 PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1182
1183 return (PF_DROP);
1184}
1185
1186#ifdef INET6
1187int
1188pf_normalize_ip6(struct mbuf **m0, int dir, struct ifnet *ifp, u_short *reason)
1189{
1190 struct mbuf *m = *m0;
1191 struct pf_rule *r;
1192 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1193 int off;
1194 struct ip6_ext ext;
1195 struct ip6_opt opt;
1196 struct ip6_opt_jumbo jumbo;
1197 struct ip6_frag frag;
1198 u_int32_t jumbolen = 0, plen;
1199 u_int16_t fragoff = 0;
1200 int optend;
1201 int ooff;
1202 u_int8_t proto;
1203 int terminal;
1204
1205 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1206 while (r != NULL) {
1207 r->evaluations++;
1208 if (r->ifp != NULL && r->ifp != ifp)
1209 r = r->skip[PF_SKIP_IFP].ptr;
1210 else if (r->direction && r->direction != dir)
1211 r = r->skip[PF_SKIP_DIR].ptr;
1212 else if (r->af && r->af != AF_INET6)
1213 r = r->skip[PF_SKIP_AF].ptr;
1214#if 0 /* header chain! */
1215 else if (r->proto && r->proto != h->ip6_nxt)
1216 r = r->skip[PF_SKIP_PROTO].ptr;
1217#endif
1218 else if (PF_MISMATCHAW(&r->src.addr,
1219 (struct pf_addr *)&h->ip6_src, AF_INET6, r->src.not))
1220 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1221 else if (PF_MISMATCHAW(&r->dst.addr,
1222 (struct pf_addr *)&h->ip6_dst, AF_INET6, r->dst.not))
1223 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1224 else
1225 break;
1226 }
1227
1228 if (r == NULL)
1229 return (PF_PASS);
1230 else
1231 r->packets++;
1232
1233 /* Check for illegal packets */
1234 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1235 goto drop;
1236
1237 off = sizeof(struct ip6_hdr);
1238 proto = h->ip6_nxt;
1239 terminal = 0;
1240 do {
1241 switch (proto) {
1242 case IPPROTO_FRAGMENT:
1243 goto fragment;
1244 break;
1245 case IPPROTO_AH:
1246 case IPPROTO_ROUTING:
1247 case IPPROTO_DSTOPTS:
1248 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1249 NULL, AF_INET6))
1250 goto shortpkt;
1251 if (proto == IPPROTO_AH)
1252 off += (ext.ip6e_len + 2) * 4;
1253 else
1254 off += (ext.ip6e_len + 1) * 8;
1255 proto = ext.ip6e_nxt;
1256 break;
1257 case IPPROTO_HOPOPTS:
1258 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1259 NULL, AF_INET6))
1260 goto shortpkt;
1261 optend = off + (ext.ip6e_len + 1) * 8;
1262 ooff = off + sizeof(ext);
1263 do {
1264 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1265 sizeof(opt.ip6o_type), NULL, NULL,
1266 AF_INET6))
1267 goto shortpkt;
1268 if (opt.ip6o_type == IP6OPT_PAD1) {
1269 ooff++;
1270 continue;
1271 }
1272 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1273 NULL, NULL, AF_INET6))
1274 goto shortpkt;
1275 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1276 goto drop;
1277 switch (opt.ip6o_type) {
1278 case IP6OPT_JUMBO:
1279 if (h->ip6_plen != 0)
1280 goto drop;
1281 if (!pf_pull_hdr(m, ooff, &jumbo,
1282 sizeof(jumbo), NULL, NULL,
1283 AF_INET6))
1284 goto shortpkt;
1285 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1286 sizeof(jumbolen));
1287 jumbolen = ntohl(jumbolen);
1288 if (jumbolen <= IPV6_MAXPACKET)
1289 goto drop;
1290 if (sizeof(struct ip6_hdr) + jumbolen !=
1291 m->m_pkthdr.len)
1292 goto drop;
1293 break;
1294 default:
1295 break;
1296 }
1297 ooff += sizeof(opt) + opt.ip6o_len;
1298 } while (ooff < optend);
1299
1300 off = optend;
1301 proto = ext.ip6e_nxt;
1302 break;
1303 default:
1304 terminal = 1;
1305 break;
1306 }
1307 } while (!terminal);
1308
1309 /* jumbo payload option must be present, or plen > 0 */
1310 if (ntohs(h->ip6_plen) == 0)
1311 plen = jumbolen;
1312 else
1313 plen = ntohs(h->ip6_plen);
1314 if (plen == 0)
1315 goto drop;
1316 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1317 goto shortpkt;
1318
1319 /* Enforce a minimum ttl, may cause endless packet loops */
1320 if (r->min_ttl && h->ip6_hlim < r->min_ttl)
1321 h->ip6_hlim = r->min_ttl;
1322
1323 return (PF_PASS);
1324
1325 fragment:
1326 if (ntohs(h->ip6_plen) == 0 || jumbolen)
1327 goto drop;
1328 plen = ntohs(h->ip6_plen);
1329
1330 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1331 goto shortpkt;
1332 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1333 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
1334 goto badfrag;
1335
1336 /* do something about it */
1337 return (PF_PASS);
1338
1339 shortpkt:
1340 REASON_SET(reason, PFRES_SHORT);
1341 if (r != NULL && r->log)
1342 PFLOG_PACKET(ifp, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1343 return (PF_DROP);
1344
1345 drop:
1346 REASON_SET(reason, PFRES_NORM);
1347 if (r != NULL && r->log)
1348 PFLOG_PACKET(ifp, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1349 return (PF_DROP);
1350
1351 badfrag:
1352 REASON_SET(reason, PFRES_FRAG);
1353 if (r != NULL && r->log)
1354 PFLOG_PACKET(ifp, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1355 return (PF_DROP);
1356}
1357#endif
1358
1359int
1360pf_normalize_tcp(int dir, struct ifnet *ifp, struct mbuf *m, int ipoff,
1361 int off, void *h, struct pf_pdesc *pd)
1362{
1363 struct pf_rule *r, *rm = NULL;
1364 struct tcphdr *th = pd->hdr.tcp;
1365 int rewrite = 0;
1366 u_short reason;
1367 u_int8_t flags;
1368 sa_family_t af = pd->af;
1369
1370 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1371 while (r != NULL) {
1372 r->evaluations++;
1373 if (r->ifp != NULL && r->ifp != ifp)
1374 r = r->skip[PF_SKIP_IFP].ptr;
1375 else if (r->direction && r->direction != dir)
1376 r = r->skip[PF_SKIP_DIR].ptr;
1377 else if (r->af && r->af != af)
1378 r = r->skip[PF_SKIP_AF].ptr;
1379 else if (r->proto && r->proto != pd->proto)
1380 r = r->skip[PF_SKIP_PROTO].ptr;
1381 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
1382 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1383 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1384 r->src.port[0], r->src.port[1], th->th_sport))
1385 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1386 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
1387 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1388 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1389 r->dst.port[0], r->dst.port[1], th->th_dport))
1390 r = r->skip[PF_SKIP_DST_PORT].ptr;
1391 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1392 pf_osfp_fingerprint(pd, m, off, th),
1393 r->os_fingerprint))
1394 r = TAILQ_NEXT(r, entries);
1395 else {
1396 rm = r;
1397 break;
1398 }
1399 }
1400
1401 if (rm == NULL)
1402 return (PF_PASS);
1403 else
1404 r->packets++;
1405
1406 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1407 pd->flags |= PFDESC_TCP_NORM;
1408
1409 flags = th->th_flags;
1410 if (flags & TH_SYN) {
1411 /* Illegal packet */
1412 if (flags & TH_RST)
1413 goto tcp_drop;
1414
1415 if (flags & TH_FIN)
1416 flags &= ~TH_FIN;
1417 } else {
1418 /* Illegal packet */
1419 if (!(flags & (TH_ACK|TH_RST)))
1420 goto tcp_drop;
1421 }
1422
1423 if (!(flags & TH_ACK)) {
1424 /* These flags are only valid if ACK is set */
1425 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1426 goto tcp_drop;
1427 }
1428
1429 /* Check for illegal header length */
1430 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1431 goto tcp_drop;
1432
1433 /* If flags changed, or reserved data set, then adjust */
1434 if (flags != th->th_flags || th->th_x2 != 0) {
1435 u_int16_t ov, nv;
1436
1437 ov = *(u_int16_t *)(&th->th_ack + 1);
1438 th->th_flags = flags;
1439 th->th_x2 = 0;
1440 nv = *(u_int16_t *)(&th->th_ack + 1);
1441
1442 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv);
1443 rewrite = 1;
1444 }
1445
1446 /* Remove urgent pointer, if TH_URG is not set */
1447 if (!(flags & TH_URG) && th->th_urp) {
1448 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0);
1449 th->th_urp = 0;
1450 rewrite = 1;
1451 }
1452
1453 /* Process options */
1454 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off))
1455 rewrite = 1;
1456
1457 /* copy back packet headers if we sanitized */
1458 if (rewrite)
1286 m_copyback(m, off, sizeof(*th), th);
1459 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1287
1288 return (PF_PASS);
1289
1290 tcp_drop:
1291 REASON_SET(&reason, PFRES_NORM);
1292 if (rm != NULL && r->log)
1293 PFLOG_PACKET(ifp, h, m, AF_INET, dir, reason, r, NULL, NULL);
1294 return (PF_DROP);
1295}
1296
1297int
1298pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1299 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1300{
1301 u_int8_t hdr[60];
1302 u_int8_t *opt;
1303
1460
1461 return (PF_PASS);
1462
1463 tcp_drop:
1464 REASON_SET(&reason, PFRES_NORM);
1465 if (rm != NULL && r->log)
1466 PFLOG_PACKET(ifp, h, m, AF_INET, dir, reason, r, NULL, NULL);
1467 return (PF_DROP);
1468}
1469
1470int
1471pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1472 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1473{
1474 u_int8_t hdr[60];
1475 u_int8_t *opt;
1476
1477#if defined(__FreeBSD__)
1478 KASSERT((src->scrub == NULL),
1479 ("pf_normalize_tcp_init: src->scrub != NULL"));
1480#else
1304 KASSERT(src->scrub == NULL);
1481 KASSERT(src->scrub == NULL);
1482#endif
1305
1306 src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
1307 if (src->scrub == NULL)
1308 return (1);
1309 bzero(src->scrub, sizeof(*src->scrub));
1310
1311 switch (pd->af) {
1312#ifdef INET
1313 case AF_INET: {
1314 struct ip *h = mtod(m, struct ip *);
1315 src->scrub->pfss_ttl = h->ip_ttl;
1316 break;
1317 }
1318#endif /* INET */
1319#ifdef INET6
1320 case AF_INET6: {
1321 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1322 src->scrub->pfss_ttl = h->ip6_hlim;
1323 break;
1324 }
1325#endif /* INET6 */
1326 }
1327
1328
1329 /*
1330 * All normalizations below are only begun if we see the start of
1331 * the connections. They must all set an enabled bit in pfss_flags
1332 */
1333 if ((th->th_flags & TH_SYN) == 0)
1334 return 0;
1335
1336
1337 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1338 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1339 /* Diddle with TCP options */
1340 int hlen;
1341 opt = hdr + sizeof(struct tcphdr);
1342 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1343 while (hlen >= TCPOLEN_TIMESTAMP) {
1344 switch (*opt) {
1345 case TCPOPT_EOL: /* FALLTHROUGH */
1346 case TCPOPT_NOP:
1347 opt++;
1348 hlen--;
1349 break;
1350 case TCPOPT_TIMESTAMP:
1351 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1352 src->scrub->pfss_flags |=
1353 PFSS_TIMESTAMP;
1354 src->scrub->pfss_ts_mod = arc4random();
1355 }
1356 /* FALLTHROUGH */
1357 default:
1358 hlen -= opt[1];
1359 opt += opt[1];
1360 break;
1361 }
1362 }
1363 }
1364
1365 return (0);
1366}
1367
1368void
1369pf_normalize_tcp_cleanup(struct pf_state *state)
1370{
1371 if (state->src.scrub)
1372 pool_put(&pf_state_scrub_pl, state->src.scrub);
1373 if (state->dst.scrub)
1374 pool_put(&pf_state_scrub_pl, state->dst.scrub);
1375
1376 /* Someday... flush the TCP segment reassembly descriptors. */
1377}
1378
1379int
1380pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1381 u_short *reason, struct tcphdr *th, struct pf_state_peer *src,
1382 struct pf_state_peer *dst, int *writeback)
1383{
1384 u_int8_t hdr[60];
1385 u_int8_t *opt;
1386 int copyback = 0;
1387
1483
1484 src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
1485 if (src->scrub == NULL)
1486 return (1);
1487 bzero(src->scrub, sizeof(*src->scrub));
1488
1489 switch (pd->af) {
1490#ifdef INET
1491 case AF_INET: {
1492 struct ip *h = mtod(m, struct ip *);
1493 src->scrub->pfss_ttl = h->ip_ttl;
1494 break;
1495 }
1496#endif /* INET */
1497#ifdef INET6
1498 case AF_INET6: {
1499 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1500 src->scrub->pfss_ttl = h->ip6_hlim;
1501 break;
1502 }
1503#endif /* INET6 */
1504 }
1505
1506
1507 /*
1508 * All normalizations below are only begun if we see the start of
1509 * the connections. They must all set an enabled bit in pfss_flags
1510 */
1511 if ((th->th_flags & TH_SYN) == 0)
1512 return 0;
1513
1514
1515 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1516 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1517 /* Diddle with TCP options */
1518 int hlen;
1519 opt = hdr + sizeof(struct tcphdr);
1520 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1521 while (hlen >= TCPOLEN_TIMESTAMP) {
1522 switch (*opt) {
1523 case TCPOPT_EOL: /* FALLTHROUGH */
1524 case TCPOPT_NOP:
1525 opt++;
1526 hlen--;
1527 break;
1528 case TCPOPT_TIMESTAMP:
1529 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1530 src->scrub->pfss_flags |=
1531 PFSS_TIMESTAMP;
1532 src->scrub->pfss_ts_mod = arc4random();
1533 }
1534 /* FALLTHROUGH */
1535 default:
1536 hlen -= opt[1];
1537 opt += opt[1];
1538 break;
1539 }
1540 }
1541 }
1542
1543 return (0);
1544}
1545
1546void
1547pf_normalize_tcp_cleanup(struct pf_state *state)
1548{
1549 if (state->src.scrub)
1550 pool_put(&pf_state_scrub_pl, state->src.scrub);
1551 if (state->dst.scrub)
1552 pool_put(&pf_state_scrub_pl, state->dst.scrub);
1553
1554 /* Someday... flush the TCP segment reassembly descriptors. */
1555}
1556
1557int
1558pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1559 u_short *reason, struct tcphdr *th, struct pf_state_peer *src,
1560 struct pf_state_peer *dst, int *writeback)
1561{
1562 u_int8_t hdr[60];
1563 u_int8_t *opt;
1564 int copyback = 0;
1565
1566#if defined(__FreeBSD__)
1567 KASSERT((src->scrub || dst->scrub),
1568 ("pf_normalize_tcp_statefull: src->scrub && dst->scrub!"));
1569#else
1388 KASSERT(src->scrub || dst->scrub);
1570 KASSERT(src->scrub || dst->scrub);
1571#endif
1389
1390 /*
1391 * Enforce the minimum TTL seen for this connection. Negate a common
1392 * technique to evade an intrusion detection system and confuse
1393 * firewall state code.
1394 */
1395 switch (pd->af) {
1396#ifdef INET
1397 case AF_INET: {
1398 if (src->scrub) {
1399 struct ip *h = mtod(m, struct ip *);
1400 if (h->ip_ttl > src->scrub->pfss_ttl)
1401 src->scrub->pfss_ttl = h->ip_ttl;
1402 h->ip_ttl = src->scrub->pfss_ttl;
1403 }
1404 break;
1405 }
1406#endif /* INET */
1407#ifdef INET6
1408 case AF_INET6: {
1409 if (dst->scrub) {
1410 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1411 if (h->ip6_hlim > src->scrub->pfss_ttl)
1412 src->scrub->pfss_ttl = h->ip6_hlim;
1413 h->ip6_hlim = src->scrub->pfss_ttl;
1414 }
1415 break;
1416 }
1417#endif /* INET6 */
1418 }
1419
1420 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1421 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1422 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1423 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1424 /* Diddle with TCP options */
1425 int hlen;
1426 opt = hdr + sizeof(struct tcphdr);
1427 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1428 while (hlen >= TCPOLEN_TIMESTAMP) {
1429 switch (*opt) {
1430 case TCPOPT_EOL: /* FALLTHROUGH */
1431 case TCPOPT_NOP:
1432 opt++;
1433 hlen--;
1434 break;
1435 case TCPOPT_TIMESTAMP:
1436 /* Modulate the timestamps. Can be used for
1437 * NAT detection, OS uptime determination or
1438 * reboot detection.
1439 */
1440 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1441 u_int32_t ts_value;
1442 if (src->scrub &&
1443 (src->scrub->pfss_flags &
1444 PFSS_TIMESTAMP)) {
1445 memcpy(&ts_value, &opt[2],
1446 sizeof(u_int32_t));
1447 ts_value = htonl(ntohl(ts_value)
1448 + src->scrub->pfss_ts_mod);
1449 pf_change_a(&opt[2],
1450 &th->th_sum, ts_value, 0);
1451 copyback = 1;
1452 }
1453 if (dst->scrub &&
1454 (dst->scrub->pfss_flags &
1455 PFSS_TIMESTAMP)) {
1456 memcpy(&ts_value, &opt[6],
1457 sizeof(u_int32_t));
1458 ts_value = htonl(ntohl(ts_value)
1459 - dst->scrub->pfss_ts_mod);
1460 pf_change_a(&opt[6],
1461 &th->th_sum, ts_value, 0);
1462 copyback = 1;
1463 }
1464 }
1465 /* FALLTHROUGH */
1466 default:
1467 hlen -= opt[1];
1468 opt += opt[1];
1469 break;
1470 }
1471 }
1472 if (copyback) {
1473 /* Copyback the options, caller copys back header */
1474 *writeback = 1;
1475 m_copyback(m, off + sizeof(struct tcphdr),
1476 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1477 sizeof(struct tcphdr));
1478 }
1479 }
1480
1481
1482 /* I have a dream.... TCP segment reassembly.... */
1483 return (0);
1484}
1485int
1486pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1487 int off)
1488{
1489 u_int16_t *mss;
1490 int thoff;
1491 int opt, cnt, optlen = 0;
1492 int rewrite = 0;
1493 u_char *optp;
1494
1495 thoff = th->th_off << 2;
1496 cnt = thoff - sizeof(struct tcphdr);
1497 optp = mtod(m, caddr_t) + off + sizeof(struct tcphdr);
1498
1499 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1500 opt = optp[0];
1501 if (opt == TCPOPT_EOL)
1502 break;
1503 if (opt == TCPOPT_NOP)
1504 optlen = 1;
1505 else {
1506 if (cnt < 2)
1507 break;
1508 optlen = optp[1];
1509 if (optlen < 2 || optlen > cnt)
1510 break;
1511 }
1512 switch (opt) {
1513 case TCPOPT_MAXSEG:
1514 mss = (u_int16_t *)(optp + 2);
1515 if ((ntohs(*mss)) > r->max_mss) {
1516 th->th_sum = pf_cksum_fixup(th->th_sum,
1517 *mss, htons(r->max_mss));
1518 *mss = htons(r->max_mss);
1519 rewrite = 1;
1520 }
1521 break;
1522 default:
1523 break;
1524 }
1525 }
1526
1527 return (rewrite);
1528}
1572
1573 /*
1574 * Enforce the minimum TTL seen for this connection. Negate a common
1575 * technique to evade an intrusion detection system and confuse
1576 * firewall state code.
1577 */
1578 switch (pd->af) {
1579#ifdef INET
1580 case AF_INET: {
1581 if (src->scrub) {
1582 struct ip *h = mtod(m, struct ip *);
1583 if (h->ip_ttl > src->scrub->pfss_ttl)
1584 src->scrub->pfss_ttl = h->ip_ttl;
1585 h->ip_ttl = src->scrub->pfss_ttl;
1586 }
1587 break;
1588 }
1589#endif /* INET */
1590#ifdef INET6
1591 case AF_INET6: {
1592 if (dst->scrub) {
1593 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1594 if (h->ip6_hlim > src->scrub->pfss_ttl)
1595 src->scrub->pfss_ttl = h->ip6_hlim;
1596 h->ip6_hlim = src->scrub->pfss_ttl;
1597 }
1598 break;
1599 }
1600#endif /* INET6 */
1601 }
1602
1603 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1604 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1605 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1606 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1607 /* Diddle with TCP options */
1608 int hlen;
1609 opt = hdr + sizeof(struct tcphdr);
1610 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1611 while (hlen >= TCPOLEN_TIMESTAMP) {
1612 switch (*opt) {
1613 case TCPOPT_EOL: /* FALLTHROUGH */
1614 case TCPOPT_NOP:
1615 opt++;
1616 hlen--;
1617 break;
1618 case TCPOPT_TIMESTAMP:
1619 /* Modulate the timestamps. Can be used for
1620 * NAT detection, OS uptime determination or
1621 * reboot detection.
1622 */
1623 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1624 u_int32_t ts_value;
1625 if (src->scrub &&
1626 (src->scrub->pfss_flags &
1627 PFSS_TIMESTAMP)) {
1628 memcpy(&ts_value, &opt[2],
1629 sizeof(u_int32_t));
1630 ts_value = htonl(ntohl(ts_value)
1631 + src->scrub->pfss_ts_mod);
1632 pf_change_a(&opt[2],
1633 &th->th_sum, ts_value, 0);
1634 copyback = 1;
1635 }
1636 if (dst->scrub &&
1637 (dst->scrub->pfss_flags &
1638 PFSS_TIMESTAMP)) {
1639 memcpy(&ts_value, &opt[6],
1640 sizeof(u_int32_t));
1641 ts_value = htonl(ntohl(ts_value)
1642 - dst->scrub->pfss_ts_mod);
1643 pf_change_a(&opt[6],
1644 &th->th_sum, ts_value, 0);
1645 copyback = 1;
1646 }
1647 }
1648 /* FALLTHROUGH */
1649 default:
1650 hlen -= opt[1];
1651 opt += opt[1];
1652 break;
1653 }
1654 }
1655 if (copyback) {
1656 /* Copyback the options, caller copys back header */
1657 *writeback = 1;
1658 m_copyback(m, off + sizeof(struct tcphdr),
1659 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1660 sizeof(struct tcphdr));
1661 }
1662 }
1663
1664
1665 /* I have a dream.... TCP segment reassembly.... */
1666 return (0);
1667}
1668int
1669pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1670 int off)
1671{
1672 u_int16_t *mss;
1673 int thoff;
1674 int opt, cnt, optlen = 0;
1675 int rewrite = 0;
1676 u_char *optp;
1677
1678 thoff = th->th_off << 2;
1679 cnt = thoff - sizeof(struct tcphdr);
1680 optp = mtod(m, caddr_t) + off + sizeof(struct tcphdr);
1681
1682 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1683 opt = optp[0];
1684 if (opt == TCPOPT_EOL)
1685 break;
1686 if (opt == TCPOPT_NOP)
1687 optlen = 1;
1688 else {
1689 if (cnt < 2)
1690 break;
1691 optlen = optp[1];
1692 if (optlen < 2 || optlen > cnt)
1693 break;
1694 }
1695 switch (opt) {
1696 case TCPOPT_MAXSEG:
1697 mss = (u_int16_t *)(optp + 2);
1698 if ((ntohs(*mss)) > r->max_mss) {
1699 th->th_sum = pf_cksum_fixup(th->th_sum,
1700 *mss, htons(r->max_mss));
1701 *mss = htons(r->max_mss);
1702 rewrite = 1;
1703 }
1704 break;
1705 default:
1706 break;
1707 }
1708 }
1709
1710 return (rewrite);
1711}