1/*-
2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
4 * Copyright (c) 1982, 1986, 1988, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	@(#)ip_input.c	8.2 (Berkeley) 1/4/94
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD$");
36
37#include "opt_rss.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/eventhandler.h>
42#include <sys/kernel.h>
43#include <sys/hash.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/limits.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/sysctl.h>
50#include <sys/socket.h>
51
52#include <net/if.h>
53#include <net/if_var.h>
54#include <net/rss_config.h>
55#include <net/netisr.h>
56#include <net/vnet.h>
57
58#include <netinet/in.h>
59#include <netinet/ip.h>
60#include <netinet/ip_var.h>
61#include <netinet/in_rss.h>
62#ifdef MAC
63#include <security/mac/mac_framework.h>
64#endif
65
66SYSCTL_DECL(_net_inet_ip);
67
68/*
69 * Reassembly headers are stored in hash buckets.
70 */
71#define	IPREASS_NHASH_LOG2	10
72#define	IPREASS_NHASH		(1 << IPREASS_NHASH_LOG2)
73#define	IPREASS_HMASK		(IPREASS_NHASH - 1)
74
75struct ipqbucket {
76	TAILQ_HEAD(ipqhead, ipq) head;
77	struct mtx		 lock;
78	int			 count;
79};
80
81VNET_DEFINE_STATIC(struct ipqbucket, ipq[IPREASS_NHASH]);
82#define	V_ipq		VNET(ipq)
83VNET_DEFINE_STATIC(uint32_t, ipq_hashseed);
84#define V_ipq_hashseed   VNET(ipq_hashseed)
85
86#define	IPQ_LOCK(i)	mtx_lock(&V_ipq[i].lock)
87#define	IPQ_TRYLOCK(i)	mtx_trylock(&V_ipq[i].lock)
88#define	IPQ_UNLOCK(i)	mtx_unlock(&V_ipq[i].lock)
89#define	IPQ_LOCK_ASSERT(i)	mtx_assert(&V_ipq[i].lock, MA_OWNED)
90
91VNET_DEFINE_STATIC(int, ipreass_maxbucketsize);
92#define	V_ipreass_maxbucketsize	VNET(ipreass_maxbucketsize)
93
94void		ipreass_init(void);
95void		ipreass_drain(void);
96void		ipreass_slowtimo(void);
97#ifdef VIMAGE
98void		ipreass_destroy(void);
99#endif
100static int	sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS);
101static int	sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS);
102static void	ipreass_zone_change(void *);
103static void	ipreass_drain_tomax(void);
104static void	ipq_free(struct ipqbucket *, struct ipq *);
105static struct ipq * ipq_reuse(int);
106
107static inline void
108ipq_timeout(struct ipqbucket *bucket, struct ipq *fp)
109{
110
111	IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
112	ipq_free(bucket, fp);
113}
114
115static inline void
116ipq_drop(struct ipqbucket *bucket, struct ipq *fp)
117{
118
119	IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
120	ipq_free(bucket, fp);
121}
122
123/*
124 * By default, limit the number of IP fragments across all reassembly
125 * queues to  1/32 of the total number of mbuf clusters.
126 *
127 * Limit the total number of reassembly queues per VNET to the
128 * IP fragment limit, but ensure the limit will not allow any bucket
129 * to grow above 100 items. (The bucket limit is
130 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
131 * multiplier to reach a 100-item limit.)
132 * The 100-item limit was chosen as brief testing seems to show that
133 * this produces "reasonable" performance on some subset of systems
134 * under DoS attack.
135 */
136#define	IP_MAXFRAGS		(nmbclusters / 32)
137#define	IP_MAXFRAGPACKETS	(imin(IP_MAXFRAGS, IPREASS_NHASH * 50))
138
139static int		maxfrags;
140static volatile u_int	nfrags;
141SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW,
142    &maxfrags, 0,
143    "Maximum number of IPv4 fragments allowed across all reassembly queues");
144SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD,
145    __DEVOLATILE(u_int *, &nfrags), 0,
146    "Current number of IPv4 fragments across all reassembly queues");
147
148VNET_DEFINE_STATIC(uma_zone_t, ipq_zone);
149#define	V_ipq_zone	VNET(ipq_zone)
150SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET |
151    CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I",
152    "Maximum number of IPv4 fragment reassembly queue entries");
153SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET,
154    &VNET_NAME(ipq_zone),
155    "Current number of IPv4 fragment reassembly queue entries");
156
157VNET_DEFINE_STATIC(int, noreass);
158#define	V_noreass	VNET(noreass)
159
160VNET_DEFINE_STATIC(int, maxfragsperpacket);
161#define	V_maxfragsperpacket	VNET(maxfragsperpacket)
162SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW,
163    &VNET_NAME(maxfragsperpacket), 0,
164    "Maximum number of IPv4 fragments allowed per packet");
165SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize,
166    CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
167    sysctl_maxfragbucketsize, "I",
168    "Maximum number of IPv4 fragment reassembly queue entries per bucket");
169
170/*
171 * Take incoming datagram fragment and try to reassemble it into
172 * whole datagram.  If the argument is the first fragment or one
173 * in between the function will return NULL and store the mbuf
174 * in the fragment chain.  If the argument is the last fragment
175 * the packet will be reassembled and the pointer to the new
176 * mbuf returned for further processing.  Only m_tags attached
177 * to the first packet/fragment are preserved.
178 * The IP header is *NOT* adjusted out of iplen.
179 */
180#define	M_IP_FRAG	M_PROTO9
181struct mbuf *
182ip_reass(struct mbuf *m)
183{
184	struct ip *ip;
185	struct mbuf *p, *q, *nq, *t;
186	struct ipq *fp;
187	struct ifnet *srcifp;
188	struct ipqhead *head;
189	int i, hlen, next, tmpmax;
190	u_int8_t ecn, ecn0;
191	uint32_t hash, hashkey[3];
192#ifdef	RSS
193	uint32_t rss_hash, rss_type;
194#endif
195
196	/*
197	 * If no reassembling or maxfragsperpacket are 0,
198	 * never accept fragments.
199	 * Also, drop packet if it would exceed the maximum
200	 * number of fragments.
201	 */
202	tmpmax = maxfrags;
203	if (V_noreass == 1 || V_maxfragsperpacket == 0 ||
204	    (tmpmax >= 0 && atomic_load_int(&nfrags) >= (u_int)tmpmax)) {
205		IPSTAT_INC(ips_fragments);
206		IPSTAT_INC(ips_fragdropped);
207		m_freem(m);
208		return (NULL);
209	}
210
211	ip = mtod(m, struct ip *);
212	hlen = ip->ip_hl << 2;
213
214	/*
215	 * Adjust ip_len to not reflect header,
216	 * convert offset of this to bytes.
217	 */
218	ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
219	/*
220	 * Make sure that fragments have a data length
221	 * that's a non-zero multiple of 8 bytes, unless
222	 * this is the last fragment.
223	 */
224	if (ip->ip_len == htons(0) ||
225	    ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) {
226		IPSTAT_INC(ips_toosmall); /* XXX */
227		IPSTAT_INC(ips_fragdropped);
228		m_freem(m);
229		return (NULL);
230	}
231	if (ip->ip_off & htons(IP_MF))
232		m->m_flags |= M_IP_FRAG;
233	else
234		m->m_flags &= ~M_IP_FRAG;
235	ip->ip_off = htons(ntohs(ip->ip_off) << 3);
236
237	/*
238	 * Make sure the fragment lies within a packet of valid size.
239	 */
240	if (ntohs(ip->ip_len) + ntohs(ip->ip_off) > IP_MAXPACKET) {
241		IPSTAT_INC(ips_toolong);
242		IPSTAT_INC(ips_fragdropped);
243		m_freem(m);
244		return (NULL);
245	}
246
247	/*
248	 * Store receive network interface pointer for later.
249	 */
250	srcifp = m->m_pkthdr.rcvif;
251
252	/*
253	 * Attempt reassembly; if it succeeds, proceed.
254	 * ip_reass() will return a different mbuf.
255	 */
256	IPSTAT_INC(ips_fragments);
257	m->m_pkthdr.PH_loc.ptr = ip;
258
259	/*
260	 * Presence of header sizes in mbufs
261	 * would confuse code below.
262	 */
263	m->m_data += hlen;
264	m->m_len -= hlen;
265
266	hashkey[0] = ip->ip_src.s_addr;
267	hashkey[1] = ip->ip_dst.s_addr;
268	hashkey[2] = (uint32_t)ip->ip_p << 16;
269	hashkey[2] += ip->ip_id;
270	hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed);
271	hash &= IPREASS_HMASK;
272	head = &V_ipq[hash].head;
273	IPQ_LOCK(hash);
274
275	/*
276	 * Look for queue of fragments
277	 * of this datagram.
278	 */
279	TAILQ_FOREACH(fp, head, ipq_list)
280		if (ip->ip_id == fp->ipq_id &&
281		    ip->ip_src.s_addr == fp->ipq_src.s_addr &&
282		    ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
283#ifdef MAC
284		    mac_ipq_match(m, fp) &&
285#endif
286		    ip->ip_p == fp->ipq_p)
287			break;
288	/*
289	 * If first fragment to arrive, create a reassembly queue.
290	 */
291	if (fp == NULL) {
292		if (V_ipq[hash].count < V_ipreass_maxbucketsize)
293			fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
294		if (fp == NULL)
295			fp = ipq_reuse(hash);
296		if (fp == NULL)
297			goto dropfrag;
298#ifdef MAC
299		if (mac_ipq_init(fp, M_NOWAIT) != 0) {
300			uma_zfree(V_ipq_zone, fp);
301			fp = NULL;
302			goto dropfrag;
303		}
304		mac_ipq_create(m, fp);
305#endif
306		TAILQ_INSERT_HEAD(head, fp, ipq_list);
307		V_ipq[hash].count++;
308		fp->ipq_nfrags = 1;
309		atomic_add_int(&nfrags, 1);
310		fp->ipq_ttl = IPFRAGTTL;
311		fp->ipq_p = ip->ip_p;
312		fp->ipq_id = ip->ip_id;
313		fp->ipq_src = ip->ip_src;
314		fp->ipq_dst = ip->ip_dst;
315		fp->ipq_frags = m;
316		if (m->m_flags & M_IP_FRAG)
317			fp->ipq_maxoff = -1;
318		else
319			fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
320		m->m_nextpkt = NULL;
321		goto done;
322	} else {
323		/*
324		 * If we already saw the last fragment, make sure
325		 * this fragment's offset looks sane. Otherwise, if
326		 * this is the last fragment, record its endpoint.
327		 */
328		if (fp->ipq_maxoff > 0) {
329			i = ntohs(ip->ip_off) + ntohs(ip->ip_len);
330			if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) ||
331			    ((m->m_flags & M_IP_FRAG) == 0 &&
332			    i != fp->ipq_maxoff)) {
333				fp = NULL;
334				goto dropfrag;
335			}
336		} else if ((m->m_flags & M_IP_FRAG) == 0)
337			fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
338		fp->ipq_nfrags++;
339		atomic_add_int(&nfrags, 1);
340#ifdef MAC
341		mac_ipq_update(m, fp);
342#endif
343	}
344
345#define GETIP(m)	((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
346
347	/*
348	 * Handle ECN by comparing this segment with the first one;
349	 * if CE is set, do not lose CE.
350	 * drop if CE and not-ECT are mixed for the same packet.
351	 */
352	ecn = ip->ip_tos & IPTOS_ECN_MASK;
353	ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
354	if (ecn == IPTOS_ECN_CE) {
355		if (ecn0 == IPTOS_ECN_NOTECT)
356			goto dropfrag;
357		if (ecn0 != IPTOS_ECN_CE)
358			GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
359	}
360	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
361		goto dropfrag;
362
363	/*
364	 * Find a segment which begins after this one does.
365	 */
366	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
367		if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
368			break;
369
370	/*
371	 * If there is a preceding segment, it may provide some of
372	 * our data already.  If so, drop the data from the incoming
373	 * segment.  If it provides all of our data, drop us, otherwise
374	 * stick new segment in the proper place.
375	 *
376	 * If some of the data is dropped from the preceding
377	 * segment, then it's checksum is invalidated.
378	 */
379	if (p) {
380		i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
381		    ntohs(ip->ip_off);
382		if (i > 0) {
383			if (i >= ntohs(ip->ip_len))
384				goto dropfrag;
385			m_adj(m, i);
386			m->m_pkthdr.csum_flags = 0;
387			ip->ip_off = htons(ntohs(ip->ip_off) + i);
388			ip->ip_len = htons(ntohs(ip->ip_len) - i);
389		}
390		m->m_nextpkt = p->m_nextpkt;
391		p->m_nextpkt = m;
392	} else {
393		m->m_nextpkt = fp->ipq_frags;
394		fp->ipq_frags = m;
395	}
396
397	/*
398	 * While we overlap succeeding segments trim them or,
399	 * if they are completely covered, dequeue them.
400	 */
401	for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
402	    ntohs(GETIP(q)->ip_off); q = nq) {
403		i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
404		    ntohs(GETIP(q)->ip_off);
405		if (i < ntohs(GETIP(q)->ip_len)) {
406			GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
407			GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
408			m_adj(q, i);
409			q->m_pkthdr.csum_flags = 0;
410			break;
411		}
412		nq = q->m_nextpkt;
413		m->m_nextpkt = nq;
414		IPSTAT_INC(ips_fragdropped);
415		fp->ipq_nfrags--;
416		atomic_subtract_int(&nfrags, 1);
417		m_freem(q);
418	}
419
420	/*
421	 * Check for complete reassembly and perform frag per packet
422	 * limiting.
423	 *
424	 * Frag limiting is performed here so that the nth frag has
425	 * a chance to complete the packet before we drop the packet.
426	 * As a result, n+1 frags are actually allowed per packet, but
427	 * only n will ever be stored. (n = maxfragsperpacket.)
428	 *
429	 */
430	next = 0;
431	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
432		if (ntohs(GETIP(q)->ip_off) != next) {
433			if (fp->ipq_nfrags > V_maxfragsperpacket)
434				ipq_drop(&V_ipq[hash], fp);
435			goto done;
436		}
437		next += ntohs(GETIP(q)->ip_len);
438	}
439	/* Make sure the last packet didn't have the IP_MF flag */
440	if (p->m_flags & M_IP_FRAG) {
441		if (fp->ipq_nfrags > V_maxfragsperpacket)
442			ipq_drop(&V_ipq[hash], fp);
443		goto done;
444	}
445
446	/*
447	 * Reassembly is complete.  Make sure the packet is a sane size.
448	 */
449	q = fp->ipq_frags;
450	ip = GETIP(q);
451	if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
452		IPSTAT_INC(ips_toolong);
453		ipq_drop(&V_ipq[hash], fp);
454		goto done;
455	}
456
457	/*
458	 * Concatenate fragments.
459	 */
460	m = q;
461	t = m->m_next;
462	m->m_next = NULL;
463	m_cat(m, t);
464	nq = q->m_nextpkt;
465	q->m_nextpkt = NULL;
466	for (q = nq; q != NULL; q = nq) {
467		nq = q->m_nextpkt;
468		q->m_nextpkt = NULL;
469		m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
470		m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
471		m_demote_pkthdr(q);
472		m_cat(m, q);
473	}
474	/*
475	 * In order to do checksumming faster we do 'end-around carry' here
476	 * (and not in for{} loop), though it implies we are not going to
477	 * reassemble more than 64k fragments.
478	 */
479	while (m->m_pkthdr.csum_data & 0xffff0000)
480		m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
481		    (m->m_pkthdr.csum_data >> 16);
482	atomic_subtract_int(&nfrags, fp->ipq_nfrags);
483#ifdef MAC
484	mac_ipq_reassemble(fp, m);
485	mac_ipq_destroy(fp);
486#endif
487
488	/*
489	 * Create header for new ip packet by modifying header of first
490	 * packet;  dequeue and discard fragment reassembly header.
491	 * Make header visible.
492	 */
493	ip->ip_len = htons((ip->ip_hl << 2) + next);
494	ip->ip_src = fp->ipq_src;
495	ip->ip_dst = fp->ipq_dst;
496	TAILQ_REMOVE(head, fp, ipq_list);
497	V_ipq[hash].count--;
498	uma_zfree(V_ipq_zone, fp);
499	m->m_len += (ip->ip_hl << 2);
500	m->m_data -= (ip->ip_hl << 2);
501	/* some debugging cruft by sklower, below, will go away soon */
502	if (m->m_flags & M_PKTHDR) {	/* XXX this should be done elsewhere */
503		m_fixhdr(m);
504		/* set valid receive interface pointer */
505		m->m_pkthdr.rcvif = srcifp;
506	}
507	IPSTAT_INC(ips_reassembled);
508	IPQ_UNLOCK(hash);
509
510#ifdef	RSS
511	/*
512	 * Query the RSS layer for the flowid / flowtype for the
513	 * mbuf payload.
514	 *
515	 * For now, just assume we have to calculate a new one.
516	 * Later on we should check to see if the assigned flowid matches
517	 * what RSS wants for the given IP protocol and if so, just keep it.
518	 *
519	 * We then queue into the relevant netisr so it can be dispatched
520	 * to the correct CPU.
521	 *
522	 * Note - this may return 1, which means the flowid in the mbuf
523	 * is correct for the configured RSS hash types and can be used.
524	 */
525	if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
526		m->m_pkthdr.flowid = rss_hash;
527		M_HASHTYPE_SET(m, rss_type);
528	}
529
530	/*
531	 * Queue/dispatch for reprocessing.
532	 *
533	 * Note: this is much slower than just handling the frame in the
534	 * current receive context.  It's likely worth investigating
535	 * why this is.
536	 */
537	netisr_dispatch(NETISR_IP_DIRECT, m);
538	return (NULL);
539#endif
540
541	/* Handle in-line */
542	return (m);
543
544dropfrag:
545	IPSTAT_INC(ips_fragdropped);
546	if (fp != NULL) {
547		fp->ipq_nfrags--;
548		atomic_subtract_int(&nfrags, 1);
549	}
550	m_freem(m);
551done:
552	IPQ_UNLOCK(hash);
553	return (NULL);
554
555#undef GETIP
556}
557
558/*
559 * Initialize IP reassembly structures.
560 */
561void
562ipreass_init(void)
563{
564	int max;
565
566	for (int i = 0; i < IPREASS_NHASH; i++) {
567		TAILQ_INIT(&V_ipq[i].head);
568		mtx_init(&V_ipq[i].lock, "IP reassembly", NULL,
569		    MTX_DEF | MTX_DUPOK);
570		V_ipq[i].count = 0;
571	}
572	V_ipq_hashseed = arc4random();
573	V_maxfragsperpacket = 16;
574	V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
575	    NULL, UMA_ALIGN_PTR, 0);
576	max = IP_MAXFRAGPACKETS;
577	max = uma_zone_set_max(V_ipq_zone, max);
578	V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
579
580	if (IS_DEFAULT_VNET(curvnet)) {
581		maxfrags = IP_MAXFRAGS;
582		EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change,
583		    NULL, EVENTHANDLER_PRI_ANY);
584	}
585}
586
587/*
588 * If a timer expires on a reassembly queue, discard it.
589 */
590void
591ipreass_slowtimo(void)
592{
593	struct ipq *fp, *tmp;
594
595	for (int i = 0; i < IPREASS_NHASH; i++) {
596		IPQ_LOCK(i);
597		TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp)
598		if (--fp->ipq_ttl == 0)
599				ipq_timeout(&V_ipq[i], fp);
600		IPQ_UNLOCK(i);
601	}
602}
603
604/*
605 * Drain off all datagram fragments.
606 */
607void
608ipreass_drain(void)
609{
610
611	for (int i = 0; i < IPREASS_NHASH; i++) {
612		IPQ_LOCK(i);
613		while(!TAILQ_EMPTY(&V_ipq[i].head))
614			ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head));
615		KASSERT(V_ipq[i].count == 0,
616		    ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i,
617		    V_ipq[i].count, V_ipq));
618		IPQ_UNLOCK(i);
619	}
620}
621
622/*
623 * Drain off all datagram fragments belonging to
624 * the given network interface.
625 */
626static void
627ipreass_cleanup(void *arg __unused, struct ifnet *ifp)
628{
629	struct ipq *fp, *temp;
630	struct mbuf *m;
631	int i;
632
633	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
634
635	CURVNET_SET_QUIET(ifp->if_vnet);
636
637	/*
638	 * Skip processing if IPv4 reassembly is not initialised or
639	 * torn down by ipreass_destroy().
640	 */
641	if (V_ipq_zone == NULL) {
642		CURVNET_RESTORE();
643		return;
644	}
645
646	for (i = 0; i < IPREASS_NHASH; i++) {
647		IPQ_LOCK(i);
648		/* Scan fragment list. */
649		TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, temp) {
650			for (m = fp->ipq_frags; m != NULL; m = m->m_nextpkt) {
651				/* clear no longer valid rcvif pointer */
652				if (m->m_pkthdr.rcvif == ifp)
653					m->m_pkthdr.rcvif = NULL;
654			}
655		}
656		IPQ_UNLOCK(i);
657	}
658	CURVNET_RESTORE();
659}
660EVENTHANDLER_DEFINE(ifnet_departure_event, ipreass_cleanup, NULL, 0);
661
662#ifdef VIMAGE
663/*
664 * Destroy IP reassembly structures.
665 */
666void
667ipreass_destroy(void)
668{
669
670	ipreass_drain();
671	uma_zdestroy(V_ipq_zone);
672	V_ipq_zone = NULL;
673	for (int i = 0; i < IPREASS_NHASH; i++)
674		mtx_destroy(&V_ipq[i].lock);
675}
676#endif
677
678/*
679 * After maxnipq has been updated, propagate the change to UMA.  The UMA zone
680 * max has slightly different semantics than the sysctl, for historical
681 * reasons.
682 */
683static void
684ipreass_drain_tomax(void)
685{
686	struct ipq *fp;
687	int target;
688
689	/*
690	 * Make sure each bucket is under the new limit. If
691	 * necessary, drop enough of the oldest elements from
692	 * each bucket to get under the new limit.
693	 */
694	for (int i = 0; i < IPREASS_NHASH; i++) {
695		IPQ_LOCK(i);
696		while (V_ipq[i].count > V_ipreass_maxbucketsize &&
697		    (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL)
698			ipq_timeout(&V_ipq[i], fp);
699		IPQ_UNLOCK(i);
700	}
701
702	/*
703	 * If we are over the maximum number of fragments,
704	 * drain off enough to get down to the new limit,
705	 * stripping off last elements on queues.  Every
706	 * run we strip the oldest element from each bucket.
707	 */
708	target = uma_zone_get_max(V_ipq_zone);
709	while (uma_zone_get_cur(V_ipq_zone) > target) {
710		for (int i = 0; i < IPREASS_NHASH; i++) {
711			IPQ_LOCK(i);
712			fp = TAILQ_LAST(&V_ipq[i].head, ipqhead);
713			if (fp != NULL)
714				ipq_timeout(&V_ipq[i], fp);
715			IPQ_UNLOCK(i);
716		}
717	}
718}
719
720static void
721ipreass_zone_change(void *tag)
722{
723	VNET_ITERATOR_DECL(vnet_iter);
724	int max;
725
726	maxfrags = IP_MAXFRAGS;
727	max = IP_MAXFRAGPACKETS;
728	VNET_LIST_RLOCK_NOSLEEP();
729	VNET_FOREACH(vnet_iter) {
730		CURVNET_SET(vnet_iter);
731		max = uma_zone_set_max(V_ipq_zone, max);
732		V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
733		ipreass_drain_tomax();
734		CURVNET_RESTORE();
735	}
736	VNET_LIST_RUNLOCK_NOSLEEP();
737}
738
739/*
740 * Change the limit on the UMA zone, or disable the fragment allocation
741 * at all.  Since 0 and -1 is a special values here, we need our own handler,
742 * instead of sysctl_handle_uma_zone_max().
743 */
744static int
745sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS)
746{
747	int error, max;
748
749	if (V_noreass == 0) {
750		max = uma_zone_get_max(V_ipq_zone);
751		if (max == 0)
752			max = -1;
753	} else
754		max = 0;
755	error = sysctl_handle_int(oidp, &max, 0, req);
756	if (error || !req->newptr)
757		return (error);
758	if (max > 0) {
759		/*
760		 * XXXRW: Might be a good idea to sanity check the argument
761		 * and place an extreme upper bound.
762		 */
763		max = uma_zone_set_max(V_ipq_zone, max);
764		V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
765		ipreass_drain_tomax();
766		V_noreass = 0;
767	} else if (max == 0) {
768		V_noreass = 1;
769		ipreass_drain();
770	} else if (max == -1) {
771		V_noreass = 0;
772		uma_zone_set_max(V_ipq_zone, 0);
773		V_ipreass_maxbucketsize = INT_MAX;
774	} else
775		return (EINVAL);
776	return (0);
777}
778
779/*
780 * Seek for old fragment queue header that can be reused.  Try to
781 * reuse a header from currently locked hash bucket.
782 */
783static struct ipq *
784ipq_reuse(int start)
785{
786	struct ipq *fp;
787	int bucket, i;
788
789	IPQ_LOCK_ASSERT(start);
790
791	for (i = 0; i < IPREASS_NHASH; i++) {
792		bucket = (start + i) % IPREASS_NHASH;
793		if (bucket != start && IPQ_TRYLOCK(bucket) == 0)
794			continue;
795		fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead);
796		if (fp) {
797			struct mbuf *m;
798
799			IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
800			atomic_subtract_int(&nfrags, fp->ipq_nfrags);
801			while (fp->ipq_frags) {
802				m = fp->ipq_frags;
803				fp->ipq_frags = m->m_nextpkt;
804				m_freem(m);
805			}
806			TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list);
807			V_ipq[bucket].count--;
808			if (bucket != start)
809				IPQ_UNLOCK(bucket);
810			break;
811		}
812		if (bucket != start)
813			IPQ_UNLOCK(bucket);
814	}
815	IPQ_LOCK_ASSERT(start);
816	return (fp);
817}
818
819/*
820 * Free a fragment reassembly header and all associated datagrams.
821 */
822static void
823ipq_free(struct ipqbucket *bucket, struct ipq *fp)
824{
825	struct mbuf *q;
826
827	atomic_subtract_int(&nfrags, fp->ipq_nfrags);
828	while (fp->ipq_frags) {
829		q = fp->ipq_frags;
830		fp->ipq_frags = q->m_nextpkt;
831		m_freem(q);
832	}
833	TAILQ_REMOVE(&bucket->head, fp, ipq_list);
834	bucket->count--;
835	uma_zfree(V_ipq_zone, fp);
836}
837
838/*
839 * Get or set the maximum number of reassembly queues per bucket.
840 */
841static int
842sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS)
843{
844	int error, max;
845
846	max = V_ipreass_maxbucketsize;
847	error = sysctl_handle_int(oidp, &max, 0, req);
848	if (error || !req->newptr)
849		return (error);
850	if (max <= 0)
851		return (EINVAL);
852	V_ipreass_maxbucketsize = max;
853	ipreass_drain_tomax();
854	return (0);
855}
856