Deleted Added
full compact
frag6.c (290471) frag6.c (293470)
1/*-
2 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the project nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
30 */
31
32#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the project nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/netinet6/frag6.c 290471 2015-11-06 23:07:43Z adrian $");
33__FBSDID("$FreeBSD: head/sys/netinet6/frag6.c 293470 2016-01-09 09:34:39Z melifaro $");
34
35#include "opt_rss.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/domain.h>
34
35#include "opt_rss.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/domain.h>
42#include <sys/eventhandler.h>
42#include <sys/protosw.h>
43#include <sys/socket.h>
44#include <sys/errno.h>
45#include <sys/time.h>
46#include <sys/kernel.h>
47#include <sys/syslog.h>
48
49#include <net/if.h>
50#include <net/if_var.h>
51#include <net/netisr.h>
52#include <net/route.h>
53#include <net/vnet.h>
54
55#include <netinet/in.h>
56#include <netinet/in_var.h>
57#include <netinet/ip6.h>
58#include <netinet6/ip6_var.h>
59#include <netinet/icmp6.h>
60#include <netinet/in_systm.h> /* for ECN definitions */
61#include <netinet/ip.h> /* for ECN definitions */
62
63#include <security/mac/mac_framework.h>
64
65static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *);
66static void frag6_deq(struct ip6asfrag *);
67static void frag6_insque(struct ip6q *, struct ip6q *);
68static void frag6_remque(struct ip6q *);
69static void frag6_freef(struct ip6q *);
70
71static struct mtx ip6qlock;
72/*
73 * These fields all protected by ip6qlock.
74 */
75static VNET_DEFINE(u_int, frag6_nfragpackets);
76static VNET_DEFINE(u_int, frag6_nfrags);
77static VNET_DEFINE(struct ip6q, ip6q); /* ip6 reassemble queue */
78
79#define V_frag6_nfragpackets VNET(frag6_nfragpackets)
80#define V_frag6_nfrags VNET(frag6_nfrags)
81#define V_ip6q VNET(ip6q)
82
83#define IP6Q_LOCK_INIT() mtx_init(&ip6qlock, "ip6qlock", NULL, MTX_DEF);
84#define IP6Q_LOCK() mtx_lock(&ip6qlock)
85#define IP6Q_TRYLOCK() mtx_trylock(&ip6qlock)
86#define IP6Q_LOCK_ASSERT() mtx_assert(&ip6qlock, MA_OWNED)
87#define IP6Q_UNLOCK() mtx_unlock(&ip6qlock)
88
89static MALLOC_DEFINE(M_FTABLE, "fragment", "fragment reassembly header");
90
91/*
92 * Initialise reassembly queue and fragment identifier.
93 */
94static void
95frag6_change(void *tag)
96{
97
98 V_ip6_maxfragpackets = nmbclusters / 4;
99 V_ip6_maxfrags = nmbclusters / 4;
100}
101
102void
103frag6_init(void)
104{
105
106 V_ip6_maxfragpackets = nmbclusters / 4;
107 V_ip6_maxfrags = nmbclusters / 4;
108 V_ip6q.ip6q_next = V_ip6q.ip6q_prev = &V_ip6q;
109
110 if (!IS_DEFAULT_VNET(curvnet))
111 return;
112
113 EVENTHANDLER_REGISTER(nmbclusters_change,
114 frag6_change, NULL, EVENTHANDLER_PRI_ANY);
115
116 IP6Q_LOCK_INIT();
117}
118
119/*
120 * In RFC2460, fragment and reassembly rule do not agree with each other,
121 * in terms of next header field handling in fragment header.
122 * While the sender will use the same value for all of the fragmented packets,
123 * receiver is suggested not to check the consistency.
124 *
125 * fragment rule (p20):
126 * (2) A Fragment header containing:
127 * The Next Header value that identifies the first header of
128 * the Fragmentable Part of the original packet.
129 * -> next header field is same for all fragments
130 *
131 * reassembly rule (p21):
132 * The Next Header field of the last header of the Unfragmentable
133 * Part is obtained from the Next Header field of the first
134 * fragment's Fragment header.
135 * -> should grab it from the first fragment only
136 *
137 * The following note also contradicts with fragment rule - noone is going to
138 * send different fragment with different next header field.
139 *
140 * additional note (p22):
141 * The Next Header values in the Fragment headers of different
142 * fragments of the same original packet may differ. Only the value
143 * from the Offset zero fragment packet is used for reassembly.
144 * -> should grab it from the first fragment only
145 *
146 * There is no explicit reason given in the RFC. Historical reason maybe?
147 */
148/*
149 * Fragment input
150 */
151int
152frag6_input(struct mbuf **mp, int *offp, int proto)
153{
154 struct mbuf *m = *mp, *t;
155 struct ip6_hdr *ip6;
156 struct ip6_frag *ip6f;
157 struct ip6q *q6;
158 struct ip6asfrag *af6, *ip6af, *af6dwn;
159 struct in6_ifaddr *ia;
160 int offset = *offp, nxt, i, next;
161 int first_frag = 0;
162 int fragoff, frgpartlen; /* must be larger than u_int16_t */
163 struct ifnet *dstifp;
164 u_int8_t ecn, ecn0;
165#ifdef RSS
166 struct m_tag *mtag;
167 struct ip6_direct_ctx *ip6dc;
168#endif
169
170#if 0
171 char ip6buf[INET6_ADDRSTRLEN];
172#endif
173
174 ip6 = mtod(m, struct ip6_hdr *);
175#ifndef PULLDOWN_TEST
176 IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
177 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
178#else
179 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
180 if (ip6f == NULL)
181 return (IPPROTO_DONE);
182#endif
183
184 dstifp = NULL;
185 /* find the destination interface of the packet. */
186 ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
187 if (ia != NULL) {
188 dstifp = ia->ia_ifp;
189 ifa_free(&ia->ia_ifa);
190 }
191 /* jumbo payload can't contain a fragment header */
192 if (ip6->ip6_plen == 0) {
193 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
194 in6_ifstat_inc(dstifp, ifs6_reass_fail);
195 return IPPROTO_DONE;
196 }
197
198 /*
199 * check whether fragment packet's fragment length is
200 * multiple of 8 octets.
201 * sizeof(struct ip6_frag) == 8
202 * sizeof(struct ip6_hdr) = 40
203 */
204 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
205 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
206 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
207 offsetof(struct ip6_hdr, ip6_plen));
208 in6_ifstat_inc(dstifp, ifs6_reass_fail);
209 return IPPROTO_DONE;
210 }
211
212 IP6STAT_INC(ip6s_fragments);
213 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
214
215 /* offset now points to data portion */
216 offset += sizeof(struct ip6_frag);
217
218 /*
219 * RFC 6946: Handle "atomic" fragments (offset and m bit set to 0)
220 * upfront, unrelated to any reassembly. Just skip the fragment header.
221 */
222 if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
223 /* XXX-BZ we want dedicated counters for this. */
224 IP6STAT_INC(ip6s_reassembled);
225 in6_ifstat_inc(dstifp, ifs6_reass_ok);
226 *offp = offset;
227 return (ip6f->ip6f_nxt);
228 }
229
230 IP6Q_LOCK();
231
232 /*
233 * Enforce upper bound on number of fragments.
234 * If maxfrag is 0, never accept fragments.
235 * If maxfrag is -1, accept all fragments without limitation.
236 */
237 if (V_ip6_maxfrags < 0)
238 ;
239 else if (V_frag6_nfrags >= (u_int)V_ip6_maxfrags)
240 goto dropfrag;
241
242 for (q6 = V_ip6q.ip6q_next; q6 != &V_ip6q; q6 = q6->ip6q_next)
243 if (ip6f->ip6f_ident == q6->ip6q_ident &&
244 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
245 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
246#ifdef MAC
247 && mac_ip6q_match(m, q6)
248#endif
249 )
250 break;
251
252 if (q6 == &V_ip6q) {
253 /*
254 * the first fragment to arrive, create a reassembly queue.
255 */
256 first_frag = 1;
257
258 /*
259 * Enforce upper bound on number of fragmented packets
260 * for which we attempt reassembly;
261 * If maxfragpackets is 0, never accept fragments.
262 * If maxfragpackets is -1, accept all fragments without
263 * limitation.
264 */
265 if (V_ip6_maxfragpackets < 0)
266 ;
267 else if (V_frag6_nfragpackets >= (u_int)V_ip6_maxfragpackets)
268 goto dropfrag;
269 V_frag6_nfragpackets++;
270 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
271 M_NOWAIT);
272 if (q6 == NULL)
273 goto dropfrag;
274 bzero(q6, sizeof(*q6));
275#ifdef MAC
276 if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
277 free(q6, M_FTABLE);
278 goto dropfrag;
279 }
280 mac_ip6q_create(m, q6);
281#endif
282 frag6_insque(q6, &V_ip6q);
283
284 /* ip6q_nxt will be filled afterwards, from 1st fragment */
285 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
286#ifdef notyet
287 q6->ip6q_nxtp = (u_char *)nxtp;
288#endif
289 q6->ip6q_ident = ip6f->ip6f_ident;
290 q6->ip6q_ttl = IPV6_FRAGTTL;
291 q6->ip6q_src = ip6->ip6_src;
292 q6->ip6q_dst = ip6->ip6_dst;
293 q6->ip6q_ecn =
294 (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
295 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
296
297 q6->ip6q_nfrag = 0;
298 }
299
300 /*
301 * If it's the 1st fragment, record the length of the
302 * unfragmentable part and the next header of the fragment header.
303 */
304 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
305 if (fragoff == 0) {
306 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
307 sizeof(struct ip6_frag);
308 q6->ip6q_nxt = ip6f->ip6f_nxt;
309 }
310
311 /*
312 * Check that the reassembled packet would not exceed 65535 bytes
313 * in size.
314 * If it would exceed, discard the fragment and return an ICMP error.
315 */
316 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
317 if (q6->ip6q_unfrglen >= 0) {
318 /* The 1st fragment has already arrived. */
319 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
320 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
321 offset - sizeof(struct ip6_frag) +
322 offsetof(struct ip6_frag, ip6f_offlg));
323 IP6Q_UNLOCK();
324 return (IPPROTO_DONE);
325 }
326 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
327 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
328 offset - sizeof(struct ip6_frag) +
329 offsetof(struct ip6_frag, ip6f_offlg));
330 IP6Q_UNLOCK();
331 return (IPPROTO_DONE);
332 }
333 /*
334 * If it's the first fragment, do the above check for each
335 * fragment already stored in the reassembly queue.
336 */
337 if (fragoff == 0) {
338 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
339 af6 = af6dwn) {
340 af6dwn = af6->ip6af_down;
341
342 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
343 IPV6_MAXPACKET) {
344 struct mbuf *merr = IP6_REASS_MBUF(af6);
345 struct ip6_hdr *ip6err;
346 int erroff = af6->ip6af_offset;
347
348 /* dequeue the fragment. */
349 frag6_deq(af6);
350 free(af6, M_FTABLE);
351
352 /* adjust pointer. */
353 ip6err = mtod(merr, struct ip6_hdr *);
354
355 /*
356 * Restore source and destination addresses
357 * in the erroneous IPv6 header.
358 */
359 ip6err->ip6_src = q6->ip6q_src;
360 ip6err->ip6_dst = q6->ip6q_dst;
361
362 icmp6_error(merr, ICMP6_PARAM_PROB,
363 ICMP6_PARAMPROB_HEADER,
364 erroff - sizeof(struct ip6_frag) +
365 offsetof(struct ip6_frag, ip6f_offlg));
366 }
367 }
368 }
369
370 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE,
371 M_NOWAIT);
372 if (ip6af == NULL)
373 goto dropfrag;
374 bzero(ip6af, sizeof(*ip6af));
375 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
376 ip6af->ip6af_off = fragoff;
377 ip6af->ip6af_frglen = frgpartlen;
378 ip6af->ip6af_offset = offset;
379 IP6_REASS_MBUF(ip6af) = m;
380
381 if (first_frag) {
382 af6 = (struct ip6asfrag *)q6;
383 goto insert;
384 }
385
386 /*
387 * Handle ECN by comparing this segment with the first one;
388 * if CE is set, do not lose CE.
389 * drop if CE and not-ECT are mixed for the same packet.
390 */
391 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
392 ecn0 = q6->ip6q_ecn;
393 if (ecn == IPTOS_ECN_CE) {
394 if (ecn0 == IPTOS_ECN_NOTECT) {
395 free(ip6af, M_FTABLE);
396 goto dropfrag;
397 }
398 if (ecn0 != IPTOS_ECN_CE)
399 q6->ip6q_ecn = IPTOS_ECN_CE;
400 }
401 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
402 free(ip6af, M_FTABLE);
403 goto dropfrag;
404 }
405
406 /*
407 * Find a segment which begins after this one does.
408 */
409 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
410 af6 = af6->ip6af_down)
411 if (af6->ip6af_off > ip6af->ip6af_off)
412 break;
413
414#if 0
415 /*
416 * If there is a preceding segment, it may provide some of
417 * our data already. If so, drop the data from the incoming
418 * segment. If it provides all of our data, drop us.
419 */
420 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
421 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
422 - ip6af->ip6af_off;
423 if (i > 0) {
424 if (i >= ip6af->ip6af_frglen)
425 goto dropfrag;
426 m_adj(IP6_REASS_MBUF(ip6af), i);
427 ip6af->ip6af_off += i;
428 ip6af->ip6af_frglen -= i;
429 }
430 }
431
432 /*
433 * While we overlap succeeding segments trim them or,
434 * if they are completely covered, dequeue them.
435 */
436 while (af6 != (struct ip6asfrag *)q6 &&
437 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
438 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
439 if (i < af6->ip6af_frglen) {
440 af6->ip6af_frglen -= i;
441 af6->ip6af_off += i;
442 m_adj(IP6_REASS_MBUF(af6), i);
443 break;
444 }
445 af6 = af6->ip6af_down;
446 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
447 frag6_deq(af6->ip6af_up);
448 }
449#else
450 /*
451 * If the incoming framgent overlaps some existing fragments in
452 * the reassembly queue, drop it, since it is dangerous to override
453 * existing fragments from a security point of view.
454 * We don't know which fragment is the bad guy - here we trust
455 * fragment that came in earlier, with no real reason.
456 *
457 * Note: due to changes after disabling this part, mbuf passed to
458 * m_adj() below now does not meet the requirement.
459 */
460 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
461 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
462 - ip6af->ip6af_off;
463 if (i > 0) {
464#if 0 /* suppress the noisy log */
465 log(LOG_ERR, "%d bytes of a fragment from %s "
466 "overlaps the previous fragment\n",
467 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
468#endif
469 free(ip6af, M_FTABLE);
470 goto dropfrag;
471 }
472 }
473 if (af6 != (struct ip6asfrag *)q6) {
474 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
475 if (i > 0) {
476#if 0 /* suppress the noisy log */
477 log(LOG_ERR, "%d bytes of a fragment from %s "
478 "overlaps the succeeding fragment",
479 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
480#endif
481 free(ip6af, M_FTABLE);
482 goto dropfrag;
483 }
484 }
485#endif
486
487insert:
488#ifdef MAC
489 if (!first_frag)
490 mac_ip6q_update(m, q6);
491#endif
492
493 /*
494 * Stick new segment in its place;
495 * check for complete reassembly.
496 * Move to front of packet queue, as we are
497 * the most recently active fragmented packet.
498 */
499 frag6_enq(ip6af, af6->ip6af_up);
500 V_frag6_nfrags++;
501 q6->ip6q_nfrag++;
502#if 0 /* xxx */
503 if (q6 != V_ip6q.ip6q_next) {
504 frag6_remque(q6);
505 frag6_insque(q6, &V_ip6q);
506 }
507#endif
508 next = 0;
509 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
510 af6 = af6->ip6af_down) {
511 if (af6->ip6af_off != next) {
512 IP6Q_UNLOCK();
513 return IPPROTO_DONE;
514 }
515 next += af6->ip6af_frglen;
516 }
517 if (af6->ip6af_up->ip6af_mff) {
518 IP6Q_UNLOCK();
519 return IPPROTO_DONE;
520 }
521
522 /*
523 * Reassembly is complete; concatenate fragments.
524 */
525 ip6af = q6->ip6q_down;
526 t = m = IP6_REASS_MBUF(ip6af);
527 af6 = ip6af->ip6af_down;
528 frag6_deq(ip6af);
529 while (af6 != (struct ip6asfrag *)q6) {
530 af6dwn = af6->ip6af_down;
531 frag6_deq(af6);
532 while (t->m_next)
533 t = t->m_next;
534 m_adj(IP6_REASS_MBUF(af6), af6->ip6af_offset);
535 m_cat(t, IP6_REASS_MBUF(af6));
536 free(af6, M_FTABLE);
537 af6 = af6dwn;
538 }
539
540 /* adjust offset to point where the original next header starts */
541 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
542 free(ip6af, M_FTABLE);
543 ip6 = mtod(m, struct ip6_hdr *);
544 ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
545 if (q6->ip6q_ecn == IPTOS_ECN_CE)
546 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
547 nxt = q6->ip6q_nxt;
548#ifdef notyet
549 *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
550#endif
551
552 if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) {
553 frag6_remque(q6);
554 V_frag6_nfrags -= q6->ip6q_nfrag;
555#ifdef MAC
556 mac_ip6q_destroy(q6);
557#endif
558 free(q6, M_FTABLE);
559 V_frag6_nfragpackets--;
560
561 goto dropfrag;
562 }
563
564 /*
565 * Store NXT to the original.
566 */
567 {
568 char *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */
569 *prvnxtp = nxt;
570 }
571
572 frag6_remque(q6);
573 V_frag6_nfrags -= q6->ip6q_nfrag;
574#ifdef MAC
575 mac_ip6q_reassemble(q6, m);
576 mac_ip6q_destroy(q6);
577#endif
578 free(q6, M_FTABLE);
579 V_frag6_nfragpackets--;
580
581 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
582 int plen = 0;
583 for (t = m; t; t = t->m_next)
584 plen += t->m_len;
585 m->m_pkthdr.len = plen;
586 }
587
588#ifdef RSS
589 mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
590 M_NOWAIT);
591 if (mtag == NULL)
592 goto dropfrag;
593
594 ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
595 ip6dc->ip6dc_nxt = nxt;
596 ip6dc->ip6dc_off = offset;
597
598 m_tag_prepend(m, mtag);
599#endif
600
601 IP6Q_UNLOCK();
602 IP6STAT_INC(ip6s_reassembled);
603 in6_ifstat_inc(dstifp, ifs6_reass_ok);
604
605#ifdef RSS
606 /*
607 * Queue/dispatch for reprocessing.
608 */
609 netisr_dispatch(NETISR_IPV6_DIRECT, m);
610 return IPPROTO_DONE;
611#endif
612
613 /*
614 * Tell launch routine the next header
615 */
616
617 *mp = m;
618 *offp = offset;
619
620 return nxt;
621
622 dropfrag:
623 IP6Q_UNLOCK();
624 in6_ifstat_inc(dstifp, ifs6_reass_fail);
625 IP6STAT_INC(ip6s_fragdropped);
626 m_freem(m);
627 return IPPROTO_DONE;
628}
629
630/*
631 * Free a fragment reassembly header and all
632 * associated datagrams.
633 */
634void
635frag6_freef(struct ip6q *q6)
636{
637 struct ip6asfrag *af6, *down6;
638
639 IP6Q_LOCK_ASSERT();
640
641 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
642 af6 = down6) {
643 struct mbuf *m = IP6_REASS_MBUF(af6);
644
645 down6 = af6->ip6af_down;
646 frag6_deq(af6);
647
648 /*
649 * Return ICMP time exceeded error for the 1st fragment.
650 * Just free other fragments.
651 */
652 if (af6->ip6af_off == 0) {
653 struct ip6_hdr *ip6;
654
655 /* adjust pointer */
656 ip6 = mtod(m, struct ip6_hdr *);
657
658 /* restore source and destination addresses */
659 ip6->ip6_src = q6->ip6q_src;
660 ip6->ip6_dst = q6->ip6q_dst;
661
662 icmp6_error(m, ICMP6_TIME_EXCEEDED,
663 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
664 } else
665 m_freem(m);
666 free(af6, M_FTABLE);
667 }
668 frag6_remque(q6);
669 V_frag6_nfrags -= q6->ip6q_nfrag;
670#ifdef MAC
671 mac_ip6q_destroy(q6);
672#endif
673 free(q6, M_FTABLE);
674 V_frag6_nfragpackets--;
675}
676
677/*
678 * Put an ip fragment on a reassembly chain.
679 * Like insque, but pointers in middle of structure.
680 */
681void
682frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6)
683{
684
685 IP6Q_LOCK_ASSERT();
686
687 af6->ip6af_up = up6;
688 af6->ip6af_down = up6->ip6af_down;
689 up6->ip6af_down->ip6af_up = af6;
690 up6->ip6af_down = af6;
691}
692
693/*
694 * To frag6_enq as remque is to insque.
695 */
696void
697frag6_deq(struct ip6asfrag *af6)
698{
699
700 IP6Q_LOCK_ASSERT();
701
702 af6->ip6af_up->ip6af_down = af6->ip6af_down;
703 af6->ip6af_down->ip6af_up = af6->ip6af_up;
704}
705
706void
707frag6_insque(struct ip6q *new, struct ip6q *old)
708{
709
710 IP6Q_LOCK_ASSERT();
711
712 new->ip6q_prev = old;
713 new->ip6q_next = old->ip6q_next;
714 old->ip6q_next->ip6q_prev= new;
715 old->ip6q_next = new;
716}
717
718void
719frag6_remque(struct ip6q *p6)
720{
721
722 IP6Q_LOCK_ASSERT();
723
724 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
725 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
726}
727
728/*
729 * IPv6 reassembling timer processing;
730 * if a timer expires on a reassembly
731 * queue, discard it.
732 */
733void
734frag6_slowtimo(void)
735{
736 VNET_ITERATOR_DECL(vnet_iter);
737 struct ip6q *q6;
738
739 VNET_LIST_RLOCK_NOSLEEP();
740 IP6Q_LOCK();
741 VNET_FOREACH(vnet_iter) {
742 CURVNET_SET(vnet_iter);
743 q6 = V_ip6q.ip6q_next;
744 if (q6)
745 while (q6 != &V_ip6q) {
746 --q6->ip6q_ttl;
747 q6 = q6->ip6q_next;
748 if (q6->ip6q_prev->ip6q_ttl == 0) {
749 IP6STAT_INC(ip6s_fragtimeout);
750 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
751 frag6_freef(q6->ip6q_prev);
752 }
753 }
754 /*
755 * If we are over the maximum number of fragments
756 * (due to the limit being lowered), drain off
757 * enough to get down to the new limit.
758 */
759 while (V_frag6_nfragpackets > (u_int)V_ip6_maxfragpackets &&
760 V_ip6q.ip6q_prev) {
761 IP6STAT_INC(ip6s_fragoverflow);
762 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
763 frag6_freef(V_ip6q.ip6q_prev);
764 }
765 CURVNET_RESTORE();
766 }
767 IP6Q_UNLOCK();
768 VNET_LIST_RUNLOCK_NOSLEEP();
769}
770
771/*
772 * Drain off all datagram fragments.
773 */
774void
775frag6_drain(void)
776{
777 VNET_ITERATOR_DECL(vnet_iter);
778
779 VNET_LIST_RLOCK_NOSLEEP();
780 if (IP6Q_TRYLOCK() == 0) {
781 VNET_LIST_RUNLOCK_NOSLEEP();
782 return;
783 }
784 VNET_FOREACH(vnet_iter) {
785 CURVNET_SET(vnet_iter);
786 while (V_ip6q.ip6q_next != &V_ip6q) {
787 IP6STAT_INC(ip6s_fragdropped);
788 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
789 frag6_freef(V_ip6q.ip6q_next);
790 }
791 CURVNET_RESTORE();
792 }
793 IP6Q_UNLOCK();
794 VNET_LIST_RUNLOCK_NOSLEEP();
795}
796
797int
798ip6_deletefraghdr(struct mbuf *m, int offset, int wait)
799{
800 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
801 struct mbuf *t;
802
803 /* Delete frag6 header. */
804 if (m->m_len >= offset + sizeof(struct ip6_frag)) {
805 /* This is the only possible case with !PULLDOWN_TEST. */
806 bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag),
807 offset);
808 m->m_data += sizeof(struct ip6_frag);
809 m->m_len -= sizeof(struct ip6_frag);
810 } else {
811 /* This comes with no copy if the boundary is on cluster. */
812 if ((t = m_split(m, offset, wait)) == NULL)
813 return (ENOMEM);
814 m_adj(t, sizeof(struct ip6_frag));
815 m_cat(m, t);
816 }
817
818 return (0);
819}
43#include <sys/protosw.h>
44#include <sys/socket.h>
45#include <sys/errno.h>
46#include <sys/time.h>
47#include <sys/kernel.h>
48#include <sys/syslog.h>
49
50#include <net/if.h>
51#include <net/if_var.h>
52#include <net/netisr.h>
53#include <net/route.h>
54#include <net/vnet.h>
55
56#include <netinet/in.h>
57#include <netinet/in_var.h>
58#include <netinet/ip6.h>
59#include <netinet6/ip6_var.h>
60#include <netinet/icmp6.h>
61#include <netinet/in_systm.h> /* for ECN definitions */
62#include <netinet/ip.h> /* for ECN definitions */
63
64#include <security/mac/mac_framework.h>
65
66static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *);
67static void frag6_deq(struct ip6asfrag *);
68static void frag6_insque(struct ip6q *, struct ip6q *);
69static void frag6_remque(struct ip6q *);
70static void frag6_freef(struct ip6q *);
71
72static struct mtx ip6qlock;
73/*
74 * These fields all protected by ip6qlock.
75 */
76static VNET_DEFINE(u_int, frag6_nfragpackets);
77static VNET_DEFINE(u_int, frag6_nfrags);
78static VNET_DEFINE(struct ip6q, ip6q); /* ip6 reassemble queue */
79
80#define V_frag6_nfragpackets VNET(frag6_nfragpackets)
81#define V_frag6_nfrags VNET(frag6_nfrags)
82#define V_ip6q VNET(ip6q)
83
84#define IP6Q_LOCK_INIT() mtx_init(&ip6qlock, "ip6qlock", NULL, MTX_DEF);
85#define IP6Q_LOCK() mtx_lock(&ip6qlock)
86#define IP6Q_TRYLOCK() mtx_trylock(&ip6qlock)
87#define IP6Q_LOCK_ASSERT() mtx_assert(&ip6qlock, MA_OWNED)
88#define IP6Q_UNLOCK() mtx_unlock(&ip6qlock)
89
90static MALLOC_DEFINE(M_FTABLE, "fragment", "fragment reassembly header");
91
92/*
93 * Initialise reassembly queue and fragment identifier.
94 */
95static void
96frag6_change(void *tag)
97{
98
99 V_ip6_maxfragpackets = nmbclusters / 4;
100 V_ip6_maxfrags = nmbclusters / 4;
101}
102
103void
104frag6_init(void)
105{
106
107 V_ip6_maxfragpackets = nmbclusters / 4;
108 V_ip6_maxfrags = nmbclusters / 4;
109 V_ip6q.ip6q_next = V_ip6q.ip6q_prev = &V_ip6q;
110
111 if (!IS_DEFAULT_VNET(curvnet))
112 return;
113
114 EVENTHANDLER_REGISTER(nmbclusters_change,
115 frag6_change, NULL, EVENTHANDLER_PRI_ANY);
116
117 IP6Q_LOCK_INIT();
118}
119
120/*
121 * In RFC2460, fragment and reassembly rule do not agree with each other,
122 * in terms of next header field handling in fragment header.
123 * While the sender will use the same value for all of the fragmented packets,
124 * receiver is suggested not to check the consistency.
125 *
126 * fragment rule (p20):
127 * (2) A Fragment header containing:
128 * The Next Header value that identifies the first header of
129 * the Fragmentable Part of the original packet.
130 * -> next header field is same for all fragments
131 *
132 * reassembly rule (p21):
133 * The Next Header field of the last header of the Unfragmentable
134 * Part is obtained from the Next Header field of the first
135 * fragment's Fragment header.
136 * -> should grab it from the first fragment only
137 *
138 * The following note also contradicts with fragment rule - noone is going to
139 * send different fragment with different next header field.
140 *
141 * additional note (p22):
142 * The Next Header values in the Fragment headers of different
143 * fragments of the same original packet may differ. Only the value
144 * from the Offset zero fragment packet is used for reassembly.
145 * -> should grab it from the first fragment only
146 *
147 * There is no explicit reason given in the RFC. Historical reason maybe?
148 */
149/*
150 * Fragment input
151 */
152int
153frag6_input(struct mbuf **mp, int *offp, int proto)
154{
155 struct mbuf *m = *mp, *t;
156 struct ip6_hdr *ip6;
157 struct ip6_frag *ip6f;
158 struct ip6q *q6;
159 struct ip6asfrag *af6, *ip6af, *af6dwn;
160 struct in6_ifaddr *ia;
161 int offset = *offp, nxt, i, next;
162 int first_frag = 0;
163 int fragoff, frgpartlen; /* must be larger than u_int16_t */
164 struct ifnet *dstifp;
165 u_int8_t ecn, ecn0;
166#ifdef RSS
167 struct m_tag *mtag;
168 struct ip6_direct_ctx *ip6dc;
169#endif
170
171#if 0
172 char ip6buf[INET6_ADDRSTRLEN];
173#endif
174
175 ip6 = mtod(m, struct ip6_hdr *);
176#ifndef PULLDOWN_TEST
177 IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
178 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
179#else
180 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
181 if (ip6f == NULL)
182 return (IPPROTO_DONE);
183#endif
184
185 dstifp = NULL;
186 /* find the destination interface of the packet. */
187 ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
188 if (ia != NULL) {
189 dstifp = ia->ia_ifp;
190 ifa_free(&ia->ia_ifa);
191 }
192 /* jumbo payload can't contain a fragment header */
193 if (ip6->ip6_plen == 0) {
194 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
195 in6_ifstat_inc(dstifp, ifs6_reass_fail);
196 return IPPROTO_DONE;
197 }
198
199 /*
200 * check whether fragment packet's fragment length is
201 * multiple of 8 octets.
202 * sizeof(struct ip6_frag) == 8
203 * sizeof(struct ip6_hdr) = 40
204 */
205 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
206 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
207 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
208 offsetof(struct ip6_hdr, ip6_plen));
209 in6_ifstat_inc(dstifp, ifs6_reass_fail);
210 return IPPROTO_DONE;
211 }
212
213 IP6STAT_INC(ip6s_fragments);
214 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
215
216 /* offset now points to data portion */
217 offset += sizeof(struct ip6_frag);
218
219 /*
220 * RFC 6946: Handle "atomic" fragments (offset and m bit set to 0)
221 * upfront, unrelated to any reassembly. Just skip the fragment header.
222 */
223 if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
224 /* XXX-BZ we want dedicated counters for this. */
225 IP6STAT_INC(ip6s_reassembled);
226 in6_ifstat_inc(dstifp, ifs6_reass_ok);
227 *offp = offset;
228 return (ip6f->ip6f_nxt);
229 }
230
231 IP6Q_LOCK();
232
233 /*
234 * Enforce upper bound on number of fragments.
235 * If maxfrag is 0, never accept fragments.
236 * If maxfrag is -1, accept all fragments without limitation.
237 */
238 if (V_ip6_maxfrags < 0)
239 ;
240 else if (V_frag6_nfrags >= (u_int)V_ip6_maxfrags)
241 goto dropfrag;
242
243 for (q6 = V_ip6q.ip6q_next; q6 != &V_ip6q; q6 = q6->ip6q_next)
244 if (ip6f->ip6f_ident == q6->ip6q_ident &&
245 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
246 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
247#ifdef MAC
248 && mac_ip6q_match(m, q6)
249#endif
250 )
251 break;
252
253 if (q6 == &V_ip6q) {
254 /*
255 * the first fragment to arrive, create a reassembly queue.
256 */
257 first_frag = 1;
258
259 /*
260 * Enforce upper bound on number of fragmented packets
261 * for which we attempt reassembly;
262 * If maxfragpackets is 0, never accept fragments.
263 * If maxfragpackets is -1, accept all fragments without
264 * limitation.
265 */
266 if (V_ip6_maxfragpackets < 0)
267 ;
268 else if (V_frag6_nfragpackets >= (u_int)V_ip6_maxfragpackets)
269 goto dropfrag;
270 V_frag6_nfragpackets++;
271 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
272 M_NOWAIT);
273 if (q6 == NULL)
274 goto dropfrag;
275 bzero(q6, sizeof(*q6));
276#ifdef MAC
277 if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
278 free(q6, M_FTABLE);
279 goto dropfrag;
280 }
281 mac_ip6q_create(m, q6);
282#endif
283 frag6_insque(q6, &V_ip6q);
284
285 /* ip6q_nxt will be filled afterwards, from 1st fragment */
286 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
287#ifdef notyet
288 q6->ip6q_nxtp = (u_char *)nxtp;
289#endif
290 q6->ip6q_ident = ip6f->ip6f_ident;
291 q6->ip6q_ttl = IPV6_FRAGTTL;
292 q6->ip6q_src = ip6->ip6_src;
293 q6->ip6q_dst = ip6->ip6_dst;
294 q6->ip6q_ecn =
295 (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
296 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
297
298 q6->ip6q_nfrag = 0;
299 }
300
301 /*
302 * If it's the 1st fragment, record the length of the
303 * unfragmentable part and the next header of the fragment header.
304 */
305 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
306 if (fragoff == 0) {
307 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
308 sizeof(struct ip6_frag);
309 q6->ip6q_nxt = ip6f->ip6f_nxt;
310 }
311
312 /*
313 * Check that the reassembled packet would not exceed 65535 bytes
314 * in size.
315 * If it would exceed, discard the fragment and return an ICMP error.
316 */
317 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
318 if (q6->ip6q_unfrglen >= 0) {
319 /* The 1st fragment has already arrived. */
320 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
321 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
322 offset - sizeof(struct ip6_frag) +
323 offsetof(struct ip6_frag, ip6f_offlg));
324 IP6Q_UNLOCK();
325 return (IPPROTO_DONE);
326 }
327 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
328 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
329 offset - sizeof(struct ip6_frag) +
330 offsetof(struct ip6_frag, ip6f_offlg));
331 IP6Q_UNLOCK();
332 return (IPPROTO_DONE);
333 }
334 /*
335 * If it's the first fragment, do the above check for each
336 * fragment already stored in the reassembly queue.
337 */
338 if (fragoff == 0) {
339 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
340 af6 = af6dwn) {
341 af6dwn = af6->ip6af_down;
342
343 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
344 IPV6_MAXPACKET) {
345 struct mbuf *merr = IP6_REASS_MBUF(af6);
346 struct ip6_hdr *ip6err;
347 int erroff = af6->ip6af_offset;
348
349 /* dequeue the fragment. */
350 frag6_deq(af6);
351 free(af6, M_FTABLE);
352
353 /* adjust pointer. */
354 ip6err = mtod(merr, struct ip6_hdr *);
355
356 /*
357 * Restore source and destination addresses
358 * in the erroneous IPv6 header.
359 */
360 ip6err->ip6_src = q6->ip6q_src;
361 ip6err->ip6_dst = q6->ip6q_dst;
362
363 icmp6_error(merr, ICMP6_PARAM_PROB,
364 ICMP6_PARAMPROB_HEADER,
365 erroff - sizeof(struct ip6_frag) +
366 offsetof(struct ip6_frag, ip6f_offlg));
367 }
368 }
369 }
370
371 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE,
372 M_NOWAIT);
373 if (ip6af == NULL)
374 goto dropfrag;
375 bzero(ip6af, sizeof(*ip6af));
376 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
377 ip6af->ip6af_off = fragoff;
378 ip6af->ip6af_frglen = frgpartlen;
379 ip6af->ip6af_offset = offset;
380 IP6_REASS_MBUF(ip6af) = m;
381
382 if (first_frag) {
383 af6 = (struct ip6asfrag *)q6;
384 goto insert;
385 }
386
387 /*
388 * Handle ECN by comparing this segment with the first one;
389 * if CE is set, do not lose CE.
390 * drop if CE and not-ECT are mixed for the same packet.
391 */
392 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
393 ecn0 = q6->ip6q_ecn;
394 if (ecn == IPTOS_ECN_CE) {
395 if (ecn0 == IPTOS_ECN_NOTECT) {
396 free(ip6af, M_FTABLE);
397 goto dropfrag;
398 }
399 if (ecn0 != IPTOS_ECN_CE)
400 q6->ip6q_ecn = IPTOS_ECN_CE;
401 }
402 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
403 free(ip6af, M_FTABLE);
404 goto dropfrag;
405 }
406
407 /*
408 * Find a segment which begins after this one does.
409 */
410 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
411 af6 = af6->ip6af_down)
412 if (af6->ip6af_off > ip6af->ip6af_off)
413 break;
414
415#if 0
416 /*
417 * If there is a preceding segment, it may provide some of
418 * our data already. If so, drop the data from the incoming
419 * segment. If it provides all of our data, drop us.
420 */
421 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
422 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
423 - ip6af->ip6af_off;
424 if (i > 0) {
425 if (i >= ip6af->ip6af_frglen)
426 goto dropfrag;
427 m_adj(IP6_REASS_MBUF(ip6af), i);
428 ip6af->ip6af_off += i;
429 ip6af->ip6af_frglen -= i;
430 }
431 }
432
433 /*
434 * While we overlap succeeding segments trim them or,
435 * if they are completely covered, dequeue them.
436 */
437 while (af6 != (struct ip6asfrag *)q6 &&
438 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
439 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
440 if (i < af6->ip6af_frglen) {
441 af6->ip6af_frglen -= i;
442 af6->ip6af_off += i;
443 m_adj(IP6_REASS_MBUF(af6), i);
444 break;
445 }
446 af6 = af6->ip6af_down;
447 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
448 frag6_deq(af6->ip6af_up);
449 }
450#else
451 /*
452 * If the incoming framgent overlaps some existing fragments in
453 * the reassembly queue, drop it, since it is dangerous to override
454 * existing fragments from a security point of view.
455 * We don't know which fragment is the bad guy - here we trust
456 * fragment that came in earlier, with no real reason.
457 *
458 * Note: due to changes after disabling this part, mbuf passed to
459 * m_adj() below now does not meet the requirement.
460 */
461 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
462 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
463 - ip6af->ip6af_off;
464 if (i > 0) {
465#if 0 /* suppress the noisy log */
466 log(LOG_ERR, "%d bytes of a fragment from %s "
467 "overlaps the previous fragment\n",
468 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
469#endif
470 free(ip6af, M_FTABLE);
471 goto dropfrag;
472 }
473 }
474 if (af6 != (struct ip6asfrag *)q6) {
475 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
476 if (i > 0) {
477#if 0 /* suppress the noisy log */
478 log(LOG_ERR, "%d bytes of a fragment from %s "
479 "overlaps the succeeding fragment",
480 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
481#endif
482 free(ip6af, M_FTABLE);
483 goto dropfrag;
484 }
485 }
486#endif
487
488insert:
489#ifdef MAC
490 if (!first_frag)
491 mac_ip6q_update(m, q6);
492#endif
493
494 /*
495 * Stick new segment in its place;
496 * check for complete reassembly.
497 * Move to front of packet queue, as we are
498 * the most recently active fragmented packet.
499 */
500 frag6_enq(ip6af, af6->ip6af_up);
501 V_frag6_nfrags++;
502 q6->ip6q_nfrag++;
503#if 0 /* xxx */
504 if (q6 != V_ip6q.ip6q_next) {
505 frag6_remque(q6);
506 frag6_insque(q6, &V_ip6q);
507 }
508#endif
509 next = 0;
510 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
511 af6 = af6->ip6af_down) {
512 if (af6->ip6af_off != next) {
513 IP6Q_UNLOCK();
514 return IPPROTO_DONE;
515 }
516 next += af6->ip6af_frglen;
517 }
518 if (af6->ip6af_up->ip6af_mff) {
519 IP6Q_UNLOCK();
520 return IPPROTO_DONE;
521 }
522
523 /*
524 * Reassembly is complete; concatenate fragments.
525 */
526 ip6af = q6->ip6q_down;
527 t = m = IP6_REASS_MBUF(ip6af);
528 af6 = ip6af->ip6af_down;
529 frag6_deq(ip6af);
530 while (af6 != (struct ip6asfrag *)q6) {
531 af6dwn = af6->ip6af_down;
532 frag6_deq(af6);
533 while (t->m_next)
534 t = t->m_next;
535 m_adj(IP6_REASS_MBUF(af6), af6->ip6af_offset);
536 m_cat(t, IP6_REASS_MBUF(af6));
537 free(af6, M_FTABLE);
538 af6 = af6dwn;
539 }
540
541 /* adjust offset to point where the original next header starts */
542 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
543 free(ip6af, M_FTABLE);
544 ip6 = mtod(m, struct ip6_hdr *);
545 ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
546 if (q6->ip6q_ecn == IPTOS_ECN_CE)
547 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
548 nxt = q6->ip6q_nxt;
549#ifdef notyet
550 *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
551#endif
552
553 if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) {
554 frag6_remque(q6);
555 V_frag6_nfrags -= q6->ip6q_nfrag;
556#ifdef MAC
557 mac_ip6q_destroy(q6);
558#endif
559 free(q6, M_FTABLE);
560 V_frag6_nfragpackets--;
561
562 goto dropfrag;
563 }
564
565 /*
566 * Store NXT to the original.
567 */
568 {
569 char *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */
570 *prvnxtp = nxt;
571 }
572
573 frag6_remque(q6);
574 V_frag6_nfrags -= q6->ip6q_nfrag;
575#ifdef MAC
576 mac_ip6q_reassemble(q6, m);
577 mac_ip6q_destroy(q6);
578#endif
579 free(q6, M_FTABLE);
580 V_frag6_nfragpackets--;
581
582 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
583 int plen = 0;
584 for (t = m; t; t = t->m_next)
585 plen += t->m_len;
586 m->m_pkthdr.len = plen;
587 }
588
589#ifdef RSS
590 mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
591 M_NOWAIT);
592 if (mtag == NULL)
593 goto dropfrag;
594
595 ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
596 ip6dc->ip6dc_nxt = nxt;
597 ip6dc->ip6dc_off = offset;
598
599 m_tag_prepend(m, mtag);
600#endif
601
602 IP6Q_UNLOCK();
603 IP6STAT_INC(ip6s_reassembled);
604 in6_ifstat_inc(dstifp, ifs6_reass_ok);
605
606#ifdef RSS
607 /*
608 * Queue/dispatch for reprocessing.
609 */
610 netisr_dispatch(NETISR_IPV6_DIRECT, m);
611 return IPPROTO_DONE;
612#endif
613
614 /*
615 * Tell launch routine the next header
616 */
617
618 *mp = m;
619 *offp = offset;
620
621 return nxt;
622
623 dropfrag:
624 IP6Q_UNLOCK();
625 in6_ifstat_inc(dstifp, ifs6_reass_fail);
626 IP6STAT_INC(ip6s_fragdropped);
627 m_freem(m);
628 return IPPROTO_DONE;
629}
630
631/*
632 * Free a fragment reassembly header and all
633 * associated datagrams.
634 */
635void
636frag6_freef(struct ip6q *q6)
637{
638 struct ip6asfrag *af6, *down6;
639
640 IP6Q_LOCK_ASSERT();
641
642 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
643 af6 = down6) {
644 struct mbuf *m = IP6_REASS_MBUF(af6);
645
646 down6 = af6->ip6af_down;
647 frag6_deq(af6);
648
649 /*
650 * Return ICMP time exceeded error for the 1st fragment.
651 * Just free other fragments.
652 */
653 if (af6->ip6af_off == 0) {
654 struct ip6_hdr *ip6;
655
656 /* adjust pointer */
657 ip6 = mtod(m, struct ip6_hdr *);
658
659 /* restore source and destination addresses */
660 ip6->ip6_src = q6->ip6q_src;
661 ip6->ip6_dst = q6->ip6q_dst;
662
663 icmp6_error(m, ICMP6_TIME_EXCEEDED,
664 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
665 } else
666 m_freem(m);
667 free(af6, M_FTABLE);
668 }
669 frag6_remque(q6);
670 V_frag6_nfrags -= q6->ip6q_nfrag;
671#ifdef MAC
672 mac_ip6q_destroy(q6);
673#endif
674 free(q6, M_FTABLE);
675 V_frag6_nfragpackets--;
676}
677
678/*
679 * Put an ip fragment on a reassembly chain.
680 * Like insque, but pointers in middle of structure.
681 */
682void
683frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6)
684{
685
686 IP6Q_LOCK_ASSERT();
687
688 af6->ip6af_up = up6;
689 af6->ip6af_down = up6->ip6af_down;
690 up6->ip6af_down->ip6af_up = af6;
691 up6->ip6af_down = af6;
692}
693
694/*
695 * To frag6_enq as remque is to insque.
696 */
697void
698frag6_deq(struct ip6asfrag *af6)
699{
700
701 IP6Q_LOCK_ASSERT();
702
703 af6->ip6af_up->ip6af_down = af6->ip6af_down;
704 af6->ip6af_down->ip6af_up = af6->ip6af_up;
705}
706
707void
708frag6_insque(struct ip6q *new, struct ip6q *old)
709{
710
711 IP6Q_LOCK_ASSERT();
712
713 new->ip6q_prev = old;
714 new->ip6q_next = old->ip6q_next;
715 old->ip6q_next->ip6q_prev= new;
716 old->ip6q_next = new;
717}
718
719void
720frag6_remque(struct ip6q *p6)
721{
722
723 IP6Q_LOCK_ASSERT();
724
725 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
726 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
727}
728
729/*
730 * IPv6 reassembling timer processing;
731 * if a timer expires on a reassembly
732 * queue, discard it.
733 */
734void
735frag6_slowtimo(void)
736{
737 VNET_ITERATOR_DECL(vnet_iter);
738 struct ip6q *q6;
739
740 VNET_LIST_RLOCK_NOSLEEP();
741 IP6Q_LOCK();
742 VNET_FOREACH(vnet_iter) {
743 CURVNET_SET(vnet_iter);
744 q6 = V_ip6q.ip6q_next;
745 if (q6)
746 while (q6 != &V_ip6q) {
747 --q6->ip6q_ttl;
748 q6 = q6->ip6q_next;
749 if (q6->ip6q_prev->ip6q_ttl == 0) {
750 IP6STAT_INC(ip6s_fragtimeout);
751 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
752 frag6_freef(q6->ip6q_prev);
753 }
754 }
755 /*
756 * If we are over the maximum number of fragments
757 * (due to the limit being lowered), drain off
758 * enough to get down to the new limit.
759 */
760 while (V_frag6_nfragpackets > (u_int)V_ip6_maxfragpackets &&
761 V_ip6q.ip6q_prev) {
762 IP6STAT_INC(ip6s_fragoverflow);
763 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
764 frag6_freef(V_ip6q.ip6q_prev);
765 }
766 CURVNET_RESTORE();
767 }
768 IP6Q_UNLOCK();
769 VNET_LIST_RUNLOCK_NOSLEEP();
770}
771
772/*
773 * Drain off all datagram fragments.
774 */
775void
776frag6_drain(void)
777{
778 VNET_ITERATOR_DECL(vnet_iter);
779
780 VNET_LIST_RLOCK_NOSLEEP();
781 if (IP6Q_TRYLOCK() == 0) {
782 VNET_LIST_RUNLOCK_NOSLEEP();
783 return;
784 }
785 VNET_FOREACH(vnet_iter) {
786 CURVNET_SET(vnet_iter);
787 while (V_ip6q.ip6q_next != &V_ip6q) {
788 IP6STAT_INC(ip6s_fragdropped);
789 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
790 frag6_freef(V_ip6q.ip6q_next);
791 }
792 CURVNET_RESTORE();
793 }
794 IP6Q_UNLOCK();
795 VNET_LIST_RUNLOCK_NOSLEEP();
796}
797
798int
799ip6_deletefraghdr(struct mbuf *m, int offset, int wait)
800{
801 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
802 struct mbuf *t;
803
804 /* Delete frag6 header. */
805 if (m->m_len >= offset + sizeof(struct ip6_frag)) {
806 /* This is the only possible case with !PULLDOWN_TEST. */
807 bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag),
808 offset);
809 m->m_data += sizeof(struct ip6_frag);
810 m->m_len -= sizeof(struct ip6_frag);
811 } else {
812 /* This comes with no copy if the boundary is on cluster. */
813 if ((t = m_split(m, offset, wait)) == NULL)
814 return (ENOMEM);
815 m_adj(t, sizeof(struct ip6_frag));
816 m_cat(m, t);
817 }
818
819 return (0);
820}