1/*-
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/kern/uipc_mbuf.c 364163 2020-08-12 12:11:44Z ae $");
34
35#include "opt_param.h"
36#include "opt_mbuf_stress_test.h"
37#include "opt_mbuf_profiling.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mbuf.h>
46#include <sys/sysctl.h>
47#include <sys/domain.h>
48#include <sys/protosw.h>
49#include <sys/uio.h>
50#include <sys/sdt.h>
51
52SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init,
53    "struct mbuf *", "mbufinfo_t *",
54    "uint32_t", "uint32_t",
55    "uint16_t", "uint16_t",
56    "uint32_t", "uint32_t",
57    "uint32_t", "uint32_t");
58
59SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr,
60    "uint32_t", "uint32_t",
61    "uint16_t", "uint16_t",
62    "struct mbuf *", "mbufinfo_t *");
63
64SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get,
65    "uint32_t", "uint32_t",
66    "uint16_t", "uint16_t",
67    "struct mbuf *", "mbufinfo_t *");
68
69SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl,
70    "uint32_t", "uint32_t",
71    "uint16_t", "uint16_t",
72    "uint32_t", "uint32_t",
73    "struct mbuf *", "mbufinfo_t *");
74
75SDT_PROBE_DEFINE5_XLATE(sdt, , , m__getjcl,
76    "uint32_t", "uint32_t",
77    "uint16_t", "uint16_t",
78    "uint32_t", "uint32_t",
79    "uint32_t", "uint32_t",
80    "struct mbuf *", "mbufinfo_t *");
81
82SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget,
83    "struct mbuf *", "mbufinfo_t *",
84    "uint32_t", "uint32_t",
85    "uint32_t", "uint32_t");
86
87SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget,
88    "struct mbuf *", "mbufinfo_t *",
89    "uint32_t", "uint32_t",
90    "uint32_t", "uint32_t",
91    "void*", "void*");
92
93SDT_PROBE_DEFINE(sdt, , , m__cljset);
94
95SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free,
96        "struct mbuf *", "mbufinfo_t *");
97
98SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem,
99    "struct mbuf *", "mbufinfo_t *");
100
101#include <security/mac/mac_framework.h>
102
103int	max_linkhdr;
104int	max_protohdr;
105int	max_hdr;
106int	max_datalen;
107#ifdef MBUF_STRESS_TEST
108int	m_defragpackets;
109int	m_defragbytes;
110int	m_defraguseless;
111int	m_defragfailure;
112int	m_defragrandomfailures;
113#endif
114
115/*
116 * sysctl(8) exported objects
117 */
118SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
119	   &max_linkhdr, 0, "Size of largest link layer header");
120SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
121	   &max_protohdr, 0, "Size of largest protocol layer header");
122SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
123	   &max_hdr, 0, "Size of largest link plus protocol header");
124SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
125	   &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
126#ifdef MBUF_STRESS_TEST
127SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
128	   &m_defragpackets, 0, "");
129SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
130	   &m_defragbytes, 0, "");
131SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
132	   &m_defraguseless, 0, "");
133SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
134	   &m_defragfailure, 0, "");
135SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
136	   &m_defragrandomfailures, 0, "");
137#endif
138
139/*
140 * Ensure the correct size of various mbuf parameters.  It could be off due
141 * to compiler-induced padding and alignment artifacts.
142 */
143CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
144CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
145
146/*
147 * mbuf data storage should be 64-bit aligned regardless of architectural
148 * pointer size; check this is the case with and without a packet header.
149 */
150CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
151CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
152
153/*
154 * While the specific values here don't matter too much (i.e., +/- a few
155 * words), we do want to ensure that changes to these values are carefully
156 * reasoned about and properly documented.  This is especially the case as
157 * network-protocol and device-driver modules encode these layouts, and must
158 * be recompiled if the structures change.  Check these values at compile time
159 * against the ones documented in comments in mbuf.h.
160 *
161 * NB: Possibly they should be documented there via #define's and not just
162 * comments.
163 */
164#if defined(__LP64__)
165CTASSERT(offsetof(struct mbuf, m_dat) == 32);
166CTASSERT(sizeof(struct pkthdr) == 56);
167CTASSERT(sizeof(struct m_ext) == 48);
168#else
169CTASSERT(offsetof(struct mbuf, m_dat) == 24);
170CTASSERT(sizeof(struct pkthdr) == 48);
171CTASSERT(sizeof(struct m_ext) == 28);
172#endif
173
174/*
175 * Assert that the queue(3) macros produce code of the same size as an old
176 * plain pointer does.
177 */
178#ifdef INVARIANTS
179static struct mbuf __used m_assertbuf;
180CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
181CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
182CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
183CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
184#endif
185
186/*
187 * Attach the cluster from *m to *n, set up m_ext in *n
188 * and bump the refcount of the cluster.
189 */
190void
191mb_dupcl(struct mbuf *n, struct mbuf *m)
192{
193	volatile u_int *refcnt;
194
195	KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
196	KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n));
197
198	n->m_ext = m->m_ext;
199	n->m_flags |= M_EXT;
200	n->m_flags |= m->m_flags & M_RDONLY;
201
202	/* See if this is the mbuf that holds the embedded refcount. */
203	if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
204		refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count;
205		n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF;
206	} else {
207		KASSERT(m->m_ext.ext_cnt != NULL,
208		    ("%s: no refcounting pointer on %p", __func__, m));
209		refcnt = m->m_ext.ext_cnt;
210	}
211
212	if (*refcnt == 1)
213		*refcnt += 1;
214	else
215		atomic_add_int(refcnt, 1);
216}
217
218void
219m_demote_pkthdr(struct mbuf *m)
220{
221
222	M_ASSERTPKTHDR(m);
223
224	m_tag_delete_chain(m, NULL);
225	m->m_flags &= ~M_PKTHDR;
226	bzero(&m->m_pkthdr, sizeof(struct pkthdr));
227}
228
229/*
230 * Clean up mbuf (chain) from any tags and packet headers.
231 * If "all" is set then the first mbuf in the chain will be
232 * cleaned too.
233 */
234void
235m_demote(struct mbuf *m0, int all, int flags)
236{
237	struct mbuf *m;
238
239	for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
240		KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
241		    __func__, m, m0));
242		if (m->m_flags & M_PKTHDR)
243			m_demote_pkthdr(m);
244		m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | flags);
245	}
246}
247
248/*
249 * Sanity checks on mbuf (chain) for use in KASSERT() and general
250 * debugging.
251 * Returns 0 or panics when bad and 1 on all tests passed.
252 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
253 * blow up later.
254 */
255int
256m_sanity(struct mbuf *m0, int sanitize)
257{
258	struct mbuf *m;
259	caddr_t a, b;
260	int pktlen = 0;
261
262#ifdef INVARIANTS
263#define	M_SANITY_ACTION(s)	panic("mbuf %p: " s, m)
264#else
265#define	M_SANITY_ACTION(s)	printf("mbuf %p: " s, m)
266#endif
267
268	for (m = m0; m != NULL; m = m->m_next) {
269		/*
270		 * Basic pointer checks.  If any of these fails then some
271		 * unrelated kernel memory before or after us is trashed.
272		 * No way to recover from that.
273		 */
274		a = M_START(m);
275		b = a + M_SIZE(m);
276		if ((caddr_t)m->m_data < a)
277			M_SANITY_ACTION("m_data outside mbuf data range left");
278		if ((caddr_t)m->m_data > b)
279			M_SANITY_ACTION("m_data outside mbuf data range right");
280		if ((caddr_t)m->m_data + m->m_len > b)
281			M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
282
283		/* m->m_nextpkt may only be set on first mbuf in chain. */
284		if (m != m0 && m->m_nextpkt != NULL) {
285			if (sanitize) {
286				m_freem(m->m_nextpkt);
287				m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
288			} else
289				M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
290		}
291
292		/* packet length (not mbuf length!) calculation */
293		if (m0->m_flags & M_PKTHDR)
294			pktlen += m->m_len;
295
296		/* m_tags may only be attached to first mbuf in chain. */
297		if (m != m0 && m->m_flags & M_PKTHDR &&
298		    !SLIST_EMPTY(&m->m_pkthdr.tags)) {
299			if (sanitize) {
300				m_tag_delete_chain(m, NULL);
301				/* put in 0xDEADC0DE perhaps? */
302			} else
303				M_SANITY_ACTION("m_tags on in-chain mbuf");
304		}
305
306		/* M_PKTHDR may only be set on first mbuf in chain */
307		if (m != m0 && m->m_flags & M_PKTHDR) {
308			if (sanitize) {
309				bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
310				m->m_flags &= ~M_PKTHDR;
311				/* put in 0xDEADCODE and leave hdr flag in */
312			} else
313				M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
314		}
315	}
316	m = m0;
317	if (pktlen && pktlen != m->m_pkthdr.len) {
318		if (sanitize)
319			m->m_pkthdr.len = 0;
320		else
321			M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
322	}
323	return 1;
324
325#undef	M_SANITY_ACTION
326}
327
328/*
329 * Non-inlined part of m_init().
330 */
331int
332m_pkthdr_init(struct mbuf *m, int how)
333{
334#ifdef MAC
335	int error;
336#endif
337	m->m_data = m->m_pktdat;
338	bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
339#ifdef MAC
340	/* If the label init fails, fail the alloc */
341	error = mac_mbuf_init(m, how);
342	if (error)
343		return (error);
344#endif
345
346	return (0);
347}
348
349/*
350 * "Move" mbuf pkthdr from "from" to "to".
351 * "from" must have M_PKTHDR set, and "to" must be empty.
352 */
353void
354m_move_pkthdr(struct mbuf *to, struct mbuf *from)
355{
356
357#if 0
358	/* see below for why these are not enabled */
359	M_ASSERTPKTHDR(to);
360	/* Note: with MAC, this may not be a good assertion. */
361	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
362	    ("m_move_pkthdr: to has tags"));
363#endif
364#ifdef MAC
365	/*
366	 * XXXMAC: It could be this should also occur for non-MAC?
367	 */
368	if (to->m_flags & M_PKTHDR)
369		m_tag_delete_chain(to, NULL);
370#endif
371	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
372	if ((to->m_flags & M_EXT) == 0)
373		to->m_data = to->m_pktdat;
374	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
375	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
376	from->m_flags &= ~M_PKTHDR;
377}
378
379/*
380 * Duplicate "from"'s mbuf pkthdr in "to".
381 * "from" must have M_PKTHDR set, and "to" must be empty.
382 * In particular, this does a deep copy of the packet tags.
383 */
384int
385m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
386{
387
388#if 0
389	/*
390	 * The mbuf allocator only initializes the pkthdr
391	 * when the mbuf is allocated with m_gethdr(). Many users
392	 * (e.g. m_copy*, m_prepend) use m_get() and then
393	 * smash the pkthdr as needed causing these
394	 * assertions to trip.  For now just disable them.
395	 */
396	M_ASSERTPKTHDR(to);
397	/* Note: with MAC, this may not be a good assertion. */
398	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
399#endif
400	MBUF_CHECKSLEEP(how);
401#ifdef MAC
402	if (to->m_flags & M_PKTHDR)
403		m_tag_delete_chain(to, NULL);
404#endif
405	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
406	if ((to->m_flags & M_EXT) == 0)
407		to->m_data = to->m_pktdat;
408	to->m_pkthdr = from->m_pkthdr;
409	SLIST_INIT(&to->m_pkthdr.tags);
410	return (m_tag_copy_chain(to, from, how));
411}
412
413/*
414 * Lesser-used path for M_PREPEND:
415 * allocate new mbuf to prepend to chain,
416 * copy junk along.
417 */
418struct mbuf *
419m_prepend(struct mbuf *m, int len, int how)
420{
421	struct mbuf *mn;
422
423	if (m->m_flags & M_PKTHDR)
424		mn = m_gethdr(how, m->m_type);
425	else
426		mn = m_get(how, m->m_type);
427	if (mn == NULL) {
428		m_freem(m);
429		return (NULL);
430	}
431	if (m->m_flags & M_PKTHDR)
432		m_move_pkthdr(mn, m);
433	mn->m_next = m;
434	m = mn;
435	if (len < M_SIZE(m))
436		M_ALIGN(m, len);
437	m->m_len = len;
438	return (m);
439}
440
441/*
442 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
443 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
444 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
445 * Note that the copy is read-only, because clusters are not copied,
446 * only their reference counts are incremented.
447 */
448struct mbuf *
449m_copym(struct mbuf *m, int off0, int len, int wait)
450{
451	struct mbuf *n, **np;
452	int off = off0;
453	struct mbuf *top;
454	int copyhdr = 0;
455
456	KASSERT(off >= 0, ("m_copym, negative off %d", off));
457	KASSERT(len >= 0, ("m_copym, negative len %d", len));
458	MBUF_CHECKSLEEP(wait);
459	if (off == 0 && m->m_flags & M_PKTHDR)
460		copyhdr = 1;
461	while (off > 0) {
462		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
463		if (off < m->m_len)
464			break;
465		off -= m->m_len;
466		m = m->m_next;
467	}
468	np = &top;
469	top = NULL;
470	while (len > 0) {
471		if (m == NULL) {
472			KASSERT(len == M_COPYALL,
473			    ("m_copym, length > size of mbuf chain"));
474			break;
475		}
476		if (copyhdr)
477			n = m_gethdr(wait, m->m_type);
478		else
479			n = m_get(wait, m->m_type);
480		*np = n;
481		if (n == NULL)
482			goto nospace;
483		if (copyhdr) {
484			if (!m_dup_pkthdr(n, m, wait))
485				goto nospace;
486			if (len == M_COPYALL)
487				n->m_pkthdr.len -= off0;
488			else
489				n->m_pkthdr.len = len;
490			copyhdr = 0;
491		}
492		n->m_len = min(len, m->m_len - off);
493		if (m->m_flags & M_EXT) {
494			n->m_data = m->m_data + off;
495			mb_dupcl(n, m);
496		} else
497			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
498			    (u_int)n->m_len);
499		if (len != M_COPYALL)
500			len -= n->m_len;
501		off = 0;
502		m = m->m_next;
503		np = &n->m_next;
504	}
505
506	return (top);
507nospace:
508	m_freem(top);
509	return (NULL);
510}
511
512/*
513 * Copy an entire packet, including header (which must be present).
514 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
515 * Note that the copy is read-only, because clusters are not copied,
516 * only their reference counts are incremented.
517 * Preserve alignment of the first mbuf so if the creator has left
518 * some room at the beginning (e.g. for inserting protocol headers)
519 * the copies still have the room available.
520 */
521struct mbuf *
522m_copypacket(struct mbuf *m, int how)
523{
524	struct mbuf *top, *n, *o;
525
526	MBUF_CHECKSLEEP(how);
527	n = m_get(how, m->m_type);
528	top = n;
529	if (n == NULL)
530		goto nospace;
531
532	if (!m_dup_pkthdr(n, m, how))
533		goto nospace;
534	n->m_len = m->m_len;
535	if (m->m_flags & M_EXT) {
536		n->m_data = m->m_data;
537		mb_dupcl(n, m);
538	} else {
539		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
540		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
541	}
542
543	m = m->m_next;
544	while (m) {
545		o = m_get(how, m->m_type);
546		if (o == NULL)
547			goto nospace;
548
549		n->m_next = o;
550		n = n->m_next;
551
552		n->m_len = m->m_len;
553		if (m->m_flags & M_EXT) {
554			n->m_data = m->m_data;
555			mb_dupcl(n, m);
556		} else {
557			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
558		}
559
560		m = m->m_next;
561	}
562	return top;
563nospace:
564	m_freem(top);
565	return (NULL);
566}
567
568/*
569 * Copy data from an mbuf chain starting "off" bytes from the beginning,
570 * continuing for "len" bytes, into the indicated buffer.
571 */
572void
573m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
574{
575	u_int count;
576
577	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
578	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
579	while (off > 0) {
580		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
581		if (off < m->m_len)
582			break;
583		off -= m->m_len;
584		m = m->m_next;
585	}
586	while (len > 0) {
587		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
588		count = min(m->m_len - off, len);
589		bcopy(mtod(m, caddr_t) + off, cp, count);
590		len -= count;
591		cp += count;
592		off = 0;
593		m = m->m_next;
594	}
595}
596
597/*
598 * Copy a packet header mbuf chain into a completely new chain, including
599 * copying any mbuf clusters.  Use this instead of m_copypacket() when
600 * you need a writable copy of an mbuf chain.
601 */
602struct mbuf *
603m_dup(const struct mbuf *m, int how)
604{
605	struct mbuf **p, *top = NULL;
606	int remain, moff, nsize;
607
608	MBUF_CHECKSLEEP(how);
609	/* Sanity check */
610	if (m == NULL)
611		return (NULL);
612	M_ASSERTPKTHDR(m);
613
614	/* While there's more data, get a new mbuf, tack it on, and fill it */
615	remain = m->m_pkthdr.len;
616	moff = 0;
617	p = &top;
618	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
619		struct mbuf *n;
620
621		/* Get the next new mbuf */
622		if (remain >= MINCLSIZE) {
623			n = m_getcl(how, m->m_type, 0);
624			nsize = MCLBYTES;
625		} else {
626			n = m_get(how, m->m_type);
627			nsize = MLEN;
628		}
629		if (n == NULL)
630			goto nospace;
631
632		if (top == NULL) {		/* First one, must be PKTHDR */
633			if (!m_dup_pkthdr(n, m, how)) {
634				m_free(n);
635				goto nospace;
636			}
637			if ((n->m_flags & M_EXT) == 0)
638				nsize = MHLEN;
639			n->m_flags &= ~M_RDONLY;
640		}
641		n->m_len = 0;
642
643		/* Link it into the new chain */
644		*p = n;
645		p = &n->m_next;
646
647		/* Copy data from original mbuf(s) into new mbuf */
648		while (n->m_len < nsize && m != NULL) {
649			int chunk = min(nsize - n->m_len, m->m_len - moff);
650
651			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
652			moff += chunk;
653			n->m_len += chunk;
654			remain -= chunk;
655			if (moff == m->m_len) {
656				m = m->m_next;
657				moff = 0;
658			}
659		}
660
661		/* Check correct total mbuf length */
662		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
663		    	("%s: bogus m_pkthdr.len", __func__));
664	}
665	return (top);
666
667nospace:
668	m_freem(top);
669	return (NULL);
670}
671
672/*
673 * Concatenate mbuf chain n to m.
674 * Both chains must be of the same type (e.g. MT_DATA).
675 * Any m_pkthdr is not updated.
676 */
677void
678m_cat(struct mbuf *m, struct mbuf *n)
679{
680	while (m->m_next)
681		m = m->m_next;
682	while (n) {
683		if (!M_WRITABLE(m) ||
684		    M_TRAILINGSPACE(m) < n->m_len) {
685			/* just join the two chains */
686			m->m_next = n;
687			return;
688		}
689		/* splat the data from one into the other */
690		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
691		    (u_int)n->m_len);
692		m->m_len += n->m_len;
693		n = m_free(n);
694	}
695}
696
697/*
698 * Concatenate two pkthdr mbuf chains.
699 */
700void
701m_catpkt(struct mbuf *m, struct mbuf *n)
702{
703
704	M_ASSERTPKTHDR(m);
705	M_ASSERTPKTHDR(n);
706
707	m->m_pkthdr.len += n->m_pkthdr.len;
708	m_demote(n, 1, 0);
709
710	m_cat(m, n);
711}
712
713void
714m_adj(struct mbuf *mp, int req_len)
715{
716	int len = req_len;
717	struct mbuf *m;
718	int count;
719
720	if ((m = mp) == NULL)
721		return;
722	if (len >= 0) {
723		/*
724		 * Trim from head.
725		 */
726		while (m != NULL && len > 0) {
727			if (m->m_len <= len) {
728				len -= m->m_len;
729				m->m_len = 0;
730				m = m->m_next;
731			} else {
732				m->m_len -= len;
733				m->m_data += len;
734				len = 0;
735			}
736		}
737		if (mp->m_flags & M_PKTHDR)
738			mp->m_pkthdr.len -= (req_len - len);
739	} else {
740		/*
741		 * Trim from tail.  Scan the mbuf chain,
742		 * calculating its length and finding the last mbuf.
743		 * If the adjustment only affects this mbuf, then just
744		 * adjust and return.  Otherwise, rescan and truncate
745		 * after the remaining size.
746		 */
747		len = -len;
748		count = 0;
749		for (;;) {
750			count += m->m_len;
751			if (m->m_next == (struct mbuf *)0)
752				break;
753			m = m->m_next;
754		}
755		if (m->m_len >= len) {
756			m->m_len -= len;
757			if (mp->m_flags & M_PKTHDR)
758				mp->m_pkthdr.len -= len;
759			return;
760		}
761		count -= len;
762		if (count < 0)
763			count = 0;
764		/*
765		 * Correct length for chain is "count".
766		 * Find the mbuf with last data, adjust its length,
767		 * and toss data from remaining mbufs on chain.
768		 */
769		m = mp;
770		if (m->m_flags & M_PKTHDR)
771			m->m_pkthdr.len = count;
772		for (; m; m = m->m_next) {
773			if (m->m_len >= count) {
774				m->m_len = count;
775				if (m->m_next != NULL) {
776					m_freem(m->m_next);
777					m->m_next = NULL;
778				}
779				break;
780			}
781			count -= m->m_len;
782		}
783	}
784}
785
786/*
787 * Rearange an mbuf chain so that len bytes are contiguous
788 * and in the data area of an mbuf (so that mtod will work
789 * for a structure of size len).  Returns the resulting
790 * mbuf chain on success, frees it and returns null on failure.
791 * If there is room, it will add up to max_protohdr-len extra bytes to the
792 * contiguous region in an attempt to avoid being called next time.
793 */
794struct mbuf *
795m_pullup(struct mbuf *n, int len)
796{
797	struct mbuf *m;
798	int count;
799	int space;
800
801	/*
802	 * If first mbuf has no cluster, and has room for len bytes
803	 * without shifting current data, pullup into it,
804	 * otherwise allocate a new mbuf to prepend to the chain.
805	 */
806	if ((n->m_flags & M_EXT) == 0 &&
807	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
808		if (n->m_len >= len)
809			return (n);
810		m = n;
811		n = n->m_next;
812		len -= m->m_len;
813	} else {
814		if (len > MHLEN)
815			goto bad;
816		m = m_get(M_NOWAIT, n->m_type);
817		if (m == NULL)
818			goto bad;
819		if (n->m_flags & M_PKTHDR)
820			m_move_pkthdr(m, n);
821	}
822	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
823	do {
824		count = min(min(max(len, max_protohdr), space), n->m_len);
825		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
826		  (u_int)count);
827		len -= count;
828		m->m_len += count;
829		n->m_len -= count;
830		space -= count;
831		if (n->m_len)
832			n->m_data += count;
833		else
834			n = m_free(n);
835	} while (len > 0 && n);
836	if (len > 0) {
837		(void) m_free(m);
838		goto bad;
839	}
840	m->m_next = n;
841	return (m);
842bad:
843	m_freem(n);
844	return (NULL);
845}
846
847/*
848 * Like m_pullup(), except a new mbuf is always allocated, and we allow
849 * the amount of empty space before the data in the new mbuf to be specified
850 * (in the event that the caller expects to prepend later).
851 */
852struct mbuf *
853m_copyup(struct mbuf *n, int len, int dstoff)
854{
855	struct mbuf *m;
856	int count, space;
857
858	if (len > (MHLEN - dstoff))
859		goto bad;
860	m = m_get(M_NOWAIT, n->m_type);
861	if (m == NULL)
862		goto bad;
863	if (n->m_flags & M_PKTHDR)
864		m_move_pkthdr(m, n);
865	m->m_data += dstoff;
866	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
867	do {
868		count = min(min(max(len, max_protohdr), space), n->m_len);
869		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
870		    (unsigned)count);
871		len -= count;
872		m->m_len += count;
873		n->m_len -= count;
874		space -= count;
875		if (n->m_len)
876			n->m_data += count;
877		else
878			n = m_free(n);
879	} while (len > 0 && n);
880	if (len > 0) {
881		(void) m_free(m);
882		goto bad;
883	}
884	m->m_next = n;
885	return (m);
886 bad:
887	m_freem(n);
888	return (NULL);
889}
890
891/*
892 * Partition an mbuf chain in two pieces, returning the tail --
893 * all but the first len0 bytes.  In case of failure, it returns NULL and
894 * attempts to restore the chain to its original state.
895 *
896 * Note that the resulting mbufs might be read-only, because the new
897 * mbuf can end up sharing an mbuf cluster with the original mbuf if
898 * the "breaking point" happens to lie within a cluster mbuf. Use the
899 * M_WRITABLE() macro to check for this case.
900 */
901struct mbuf *
902m_split(struct mbuf *m0, int len0, int wait)
903{
904	struct mbuf *m, *n;
905	u_int len = len0, remain;
906
907	MBUF_CHECKSLEEP(wait);
908	for (m = m0; m && len > m->m_len; m = m->m_next)
909		len -= m->m_len;
910	if (m == NULL)
911		return (NULL);
912	remain = m->m_len - len;
913	if (m0->m_flags & M_PKTHDR && remain == 0) {
914		n = m_gethdr(wait, m0->m_type);
915		if (n == NULL)
916			return (NULL);
917		n->m_next = m->m_next;
918		m->m_next = NULL;
919		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
920		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
921		m0->m_pkthdr.len = len0;
922		return (n);
923	} else if (m0->m_flags & M_PKTHDR) {
924		n = m_gethdr(wait, m0->m_type);
925		if (n == NULL)
926			return (NULL);
927		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
928		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
929		m0->m_pkthdr.len = len0;
930		if (m->m_flags & M_EXT)
931			goto extpacket;
932		if (remain > MHLEN) {
933			/* m can't be the lead packet */
934			M_ALIGN(n, 0);
935			n->m_next = m_split(m, len, wait);
936			if (n->m_next == NULL) {
937				(void) m_free(n);
938				return (NULL);
939			} else {
940				n->m_len = 0;
941				return (n);
942			}
943		} else
944			M_ALIGN(n, remain);
945	} else if (remain == 0) {
946		n = m->m_next;
947		m->m_next = NULL;
948		return (n);
949	} else {
950		n = m_get(wait, m->m_type);
951		if (n == NULL)
952			return (NULL);
953		M_ALIGN(n, remain);
954	}
955extpacket:
956	if (m->m_flags & M_EXT) {
957		n->m_data = m->m_data + len;
958		mb_dupcl(n, m);
959	} else {
960		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
961	}
962	n->m_len = remain;
963	m->m_len = len;
964	n->m_next = m->m_next;
965	m->m_next = NULL;
966	return (n);
967}
968/*
969 * Routine to copy from device local memory into mbufs.
970 * Note that `off' argument is offset into first mbuf of target chain from
971 * which to begin copying the data to.
972 */
973struct mbuf *
974m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
975    void (*copy)(char *from, caddr_t to, u_int len))
976{
977	struct mbuf *m;
978	struct mbuf *top = NULL, **mp = &top;
979	int len;
980
981	if (off < 0 || off > MHLEN)
982		return (NULL);
983
984	while (totlen > 0) {
985		if (top == NULL) {	/* First one, must be PKTHDR */
986			if (totlen + off >= MINCLSIZE) {
987				m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
988				len = MCLBYTES;
989			} else {
990				m = m_gethdr(M_NOWAIT, MT_DATA);
991				len = MHLEN;
992
993				/* Place initial small packet/header at end of mbuf */
994				if (m && totlen + off + max_linkhdr <= MHLEN) {
995					m->m_data += max_linkhdr;
996					len -= max_linkhdr;
997				}
998			}
999			if (m == NULL)
1000				return NULL;
1001			m->m_pkthdr.rcvif = ifp;
1002			m->m_pkthdr.len = totlen;
1003		} else {
1004			if (totlen + off >= MINCLSIZE) {
1005				m = m_getcl(M_NOWAIT, MT_DATA, 0);
1006				len = MCLBYTES;
1007			} else {
1008				m = m_get(M_NOWAIT, MT_DATA);
1009				len = MLEN;
1010			}
1011			if (m == NULL) {
1012				m_freem(top);
1013				return NULL;
1014			}
1015		}
1016		if (off) {
1017			m->m_data += off;
1018			len -= off;
1019			off = 0;
1020		}
1021		m->m_len = len = min(totlen, len);
1022		if (copy)
1023			copy(buf, mtod(m, caddr_t), (u_int)len);
1024		else
1025			bcopy(buf, mtod(m, caddr_t), (u_int)len);
1026		buf += len;
1027		*mp = m;
1028		mp = &m->m_next;
1029		totlen -= len;
1030	}
1031	return (top);
1032}
1033
1034/*
1035 * Copy data from a buffer back into the indicated mbuf chain,
1036 * starting "off" bytes from the beginning, extending the mbuf
1037 * chain if necessary.
1038 */
1039void
1040m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1041{
1042	int mlen;
1043	struct mbuf *m = m0, *n;
1044	int totlen = 0;
1045
1046	if (m0 == NULL)
1047		return;
1048	while (off > (mlen = m->m_len)) {
1049		off -= mlen;
1050		totlen += mlen;
1051		if (m->m_next == NULL) {
1052			n = m_get(M_NOWAIT, m->m_type);
1053			if (n == NULL)
1054				goto out;
1055			bzero(mtod(n, caddr_t), MLEN);
1056			n->m_len = min(MLEN, len + off);
1057			m->m_next = n;
1058		}
1059		m = m->m_next;
1060	}
1061	while (len > 0) {
1062		if (m->m_next == NULL && (len > m->m_len - off)) {
1063			m->m_len += min(len - (m->m_len - off),
1064			    M_TRAILINGSPACE(m));
1065		}
1066		mlen = min (m->m_len - off, len);
1067		bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1068		cp += mlen;
1069		len -= mlen;
1070		mlen += off;
1071		off = 0;
1072		totlen += mlen;
1073		if (len == 0)
1074			break;
1075		if (m->m_next == NULL) {
1076			n = m_get(M_NOWAIT, m->m_type);
1077			if (n == NULL)
1078				break;
1079			n->m_len = min(MLEN, len);
1080			m->m_next = n;
1081		}
1082		m = m->m_next;
1083	}
1084out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1085		m->m_pkthdr.len = totlen;
1086}
1087
1088/*
1089 * Append the specified data to the indicated mbuf chain,
1090 * Extend the mbuf chain if the new data does not fit in
1091 * existing space.
1092 *
1093 * Return 1 if able to complete the job; otherwise 0.
1094 */
1095int
1096m_append(struct mbuf *m0, int len, c_caddr_t cp)
1097{
1098	struct mbuf *m, *n;
1099	int remainder, space;
1100
1101	for (m = m0; m->m_next != NULL; m = m->m_next)
1102		;
1103	remainder = len;
1104	space = M_TRAILINGSPACE(m);
1105	if (space > 0) {
1106		/*
1107		 * Copy into available space.
1108		 */
1109		if (space > remainder)
1110			space = remainder;
1111		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1112		m->m_len += space;
1113		cp += space, remainder -= space;
1114	}
1115	while (remainder > 0) {
1116		/*
1117		 * Allocate a new mbuf; could check space
1118		 * and allocate a cluster instead.
1119		 */
1120		n = m_get(M_NOWAIT, m->m_type);
1121		if (n == NULL)
1122			break;
1123		n->m_len = min(MLEN, remainder);
1124		bcopy(cp, mtod(n, caddr_t), n->m_len);
1125		cp += n->m_len, remainder -= n->m_len;
1126		m->m_next = n;
1127		m = n;
1128	}
1129	if (m0->m_flags & M_PKTHDR)
1130		m0->m_pkthdr.len += len - remainder;
1131	return (remainder == 0);
1132}
1133
1134/*
1135 * Apply function f to the data in an mbuf chain starting "off" bytes from
1136 * the beginning, continuing for "len" bytes.
1137 */
1138int
1139m_apply(struct mbuf *m, int off, int len,
1140    int (*f)(void *, void *, u_int), void *arg)
1141{
1142	u_int count;
1143	int rval;
1144
1145	KASSERT(off >= 0, ("m_apply, negative off %d", off));
1146	KASSERT(len >= 0, ("m_apply, negative len %d", len));
1147	while (off > 0) {
1148		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1149		if (off < m->m_len)
1150			break;
1151		off -= m->m_len;
1152		m = m->m_next;
1153	}
1154	while (len > 0) {
1155		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1156		count = min(m->m_len - off, len);
1157		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1158		if (rval)
1159			return (rval);
1160		len -= count;
1161		off = 0;
1162		m = m->m_next;
1163	}
1164	return (0);
1165}
1166
1167/*
1168 * Return a pointer to mbuf/offset of location in mbuf chain.
1169 */
1170struct mbuf *
1171m_getptr(struct mbuf *m, int loc, int *off)
1172{
1173
1174	while (loc >= 0) {
1175		/* Normal end of search. */
1176		if (m->m_len > loc) {
1177			*off = loc;
1178			return (m);
1179		} else {
1180			loc -= m->m_len;
1181			if (m->m_next == NULL) {
1182				if (loc == 0) {
1183					/* Point at the end of valid data. */
1184					*off = m->m_len;
1185					return (m);
1186				}
1187				return (NULL);
1188			}
1189			m = m->m_next;
1190		}
1191	}
1192	return (NULL);
1193}
1194
1195void
1196m_print(const struct mbuf *m, int maxlen)
1197{
1198	int len;
1199	int pdata;
1200	const struct mbuf *m2;
1201
1202	if (m == NULL) {
1203		printf("mbuf: %p\n", m);
1204		return;
1205	}
1206
1207	if (m->m_flags & M_PKTHDR)
1208		len = m->m_pkthdr.len;
1209	else
1210		len = -1;
1211	m2 = m;
1212	while (m2 != NULL && (len == -1 || len)) {
1213		pdata = m2->m_len;
1214		if (maxlen != -1 && pdata > maxlen)
1215			pdata = maxlen;
1216		printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1217		    m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1218		    "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1219		    "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1220		if (pdata)
1221			printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1222		if (len != -1)
1223			len -= m2->m_len;
1224		m2 = m2->m_next;
1225	}
1226	if (len > 0)
1227		printf("%d bytes unaccounted for.\n", len);
1228	return;
1229}
1230
1231u_int
1232m_fixhdr(struct mbuf *m0)
1233{
1234	u_int len;
1235
1236	len = m_length(m0, NULL);
1237	m0->m_pkthdr.len = len;
1238	return (len);
1239}
1240
1241u_int
1242m_length(struct mbuf *m0, struct mbuf **last)
1243{
1244	struct mbuf *m;
1245	u_int len;
1246
1247	len = 0;
1248	for (m = m0; m != NULL; m = m->m_next) {
1249		len += m->m_len;
1250		if (m->m_next == NULL)
1251			break;
1252	}
1253	if (last != NULL)
1254		*last = m;
1255	return (len);
1256}
1257
1258/*
1259 * Defragment a mbuf chain, returning the shortest possible
1260 * chain of mbufs and clusters.  If allocation fails and
1261 * this cannot be completed, NULL will be returned, but
1262 * the passed in chain will be unchanged.  Upon success,
1263 * the original chain will be freed, and the new chain
1264 * will be returned.
1265 *
1266 * If a non-packet header is passed in, the original
1267 * mbuf (chain?) will be returned unharmed.
1268 */
1269struct mbuf *
1270m_defrag(struct mbuf *m0, int how)
1271{
1272	struct mbuf *m_new = NULL, *m_final = NULL;
1273	int progress = 0, length;
1274
1275	MBUF_CHECKSLEEP(how);
1276	if (!(m0->m_flags & M_PKTHDR))
1277		return (m0);
1278
1279	m_fixhdr(m0); /* Needed sanity check */
1280
1281#ifdef MBUF_STRESS_TEST
1282	if (m_defragrandomfailures) {
1283		int temp = arc4random() & 0xff;
1284		if (temp == 0xba)
1285			goto nospace;
1286	}
1287#endif
1288
1289	if (m0->m_pkthdr.len > MHLEN)
1290		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1291	else
1292		m_final = m_gethdr(how, MT_DATA);
1293
1294	if (m_final == NULL)
1295		goto nospace;
1296
1297	if (m_dup_pkthdr(m_final, m0, how) == 0)
1298		goto nospace;
1299
1300	m_new = m_final;
1301
1302	while (progress < m0->m_pkthdr.len) {
1303		length = m0->m_pkthdr.len - progress;
1304		if (length > MCLBYTES)
1305			length = MCLBYTES;
1306
1307		if (m_new == NULL) {
1308			if (length > MLEN)
1309				m_new = m_getcl(how, MT_DATA, 0);
1310			else
1311				m_new = m_get(how, MT_DATA);
1312			if (m_new == NULL)
1313				goto nospace;
1314		}
1315
1316		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1317		progress += length;
1318		m_new->m_len = length;
1319		if (m_new != m_final)
1320			m_cat(m_final, m_new);
1321		m_new = NULL;
1322	}
1323#ifdef MBUF_STRESS_TEST
1324	if (m0->m_next == NULL)
1325		m_defraguseless++;
1326#endif
1327	m_freem(m0);
1328	m0 = m_final;
1329#ifdef MBUF_STRESS_TEST
1330	m_defragpackets++;
1331	m_defragbytes += m0->m_pkthdr.len;
1332#endif
1333	return (m0);
1334nospace:
1335#ifdef MBUF_STRESS_TEST
1336	m_defragfailure++;
1337#endif
1338	if (m_final)
1339		m_freem(m_final);
1340	return (NULL);
1341}
1342
1343/*
1344 * Defragment an mbuf chain, returning at most maxfrags separate
1345 * mbufs+clusters.  If this is not possible NULL is returned and
1346 * the original mbuf chain is left in it's present (potentially
1347 * modified) state.  We use two techniques: collapsing consecutive
1348 * mbufs and replacing consecutive mbufs by a cluster.
1349 *
1350 * NB: this should really be named m_defrag but that name is taken
1351 */
1352struct mbuf *
1353m_collapse(struct mbuf *m0, int how, int maxfrags)
1354{
1355	struct mbuf *m, *n, *n2, **prev;
1356	u_int curfrags;
1357
1358	/*
1359	 * Calculate the current number of frags.
1360	 */
1361	curfrags = 0;
1362	for (m = m0; m != NULL; m = m->m_next)
1363		curfrags++;
1364	/*
1365	 * First, try to collapse mbufs.  Note that we always collapse
1366	 * towards the front so we don't need to deal with moving the
1367	 * pkthdr.  This may be suboptimal if the first mbuf has much
1368	 * less data than the following.
1369	 */
1370	m = m0;
1371again:
1372	for (;;) {
1373		n = m->m_next;
1374		if (n == NULL)
1375			break;
1376		if (M_WRITABLE(m) &&
1377		    n->m_len < M_TRAILINGSPACE(m)) {
1378			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1379				n->m_len);
1380			m->m_len += n->m_len;
1381			m->m_next = n->m_next;
1382			m_free(n);
1383			if (--curfrags <= maxfrags)
1384				return m0;
1385		} else
1386			m = n;
1387	}
1388	KASSERT(maxfrags > 1,
1389		("maxfrags %u, but normal collapse failed", maxfrags));
1390	/*
1391	 * Collapse consecutive mbufs to a cluster.
1392	 */
1393	prev = &m0->m_next;		/* NB: not the first mbuf */
1394	while ((n = *prev) != NULL) {
1395		if ((n2 = n->m_next) != NULL &&
1396		    n->m_len + n2->m_len < MCLBYTES) {
1397			m = m_getcl(how, MT_DATA, 0);
1398			if (m == NULL)
1399				goto bad;
1400			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1401			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1402				n2->m_len);
1403			m->m_len = n->m_len + n2->m_len;
1404			m->m_next = n2->m_next;
1405			*prev = m;
1406			m_free(n);
1407			m_free(n2);
1408			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
1409				return m0;
1410			/*
1411			 * Still not there, try the normal collapse
1412			 * again before we allocate another cluster.
1413			 */
1414			goto again;
1415		}
1416		prev = &n->m_next;
1417	}
1418	/*
1419	 * No place where we can collapse to a cluster; punt.
1420	 * This can occur if, for example, you request 2 frags
1421	 * but the packet requires that both be clusters (we
1422	 * never reallocate the first mbuf to avoid moving the
1423	 * packet header).
1424	 */
1425bad:
1426	return NULL;
1427}
1428
1429#ifdef MBUF_STRESS_TEST
1430
1431/*
1432 * Fragment an mbuf chain.  There's no reason you'd ever want to do
1433 * this in normal usage, but it's great for stress testing various
1434 * mbuf consumers.
1435 *
1436 * If fragmentation is not possible, the original chain will be
1437 * returned.
1438 *
1439 * Possible length values:
1440 * 0	 no fragmentation will occur
1441 * > 0	each fragment will be of the specified length
1442 * -1	each fragment will be the same random value in length
1443 * -2	each fragment's length will be entirely random
1444 * (Random values range from 1 to 256)
1445 */
1446struct mbuf *
1447m_fragment(struct mbuf *m0, int how, int length)
1448{
1449	struct mbuf *m_first, *m_last;
1450	int divisor = 255, progress = 0, fraglen;
1451
1452	if (!(m0->m_flags & M_PKTHDR))
1453		return (m0);
1454
1455	if (length == 0 || length < -2)
1456		return (m0);
1457	if (length > MCLBYTES)
1458		length = MCLBYTES;
1459	if (length < 0 && divisor > MCLBYTES)
1460		divisor = MCLBYTES;
1461	if (length == -1)
1462		length = 1 + (arc4random() % divisor);
1463	if (length > 0)
1464		fraglen = length;
1465
1466	m_fixhdr(m0); /* Needed sanity check */
1467
1468	m_first = m_getcl(how, MT_DATA, M_PKTHDR);
1469	if (m_first == NULL)
1470		goto nospace;
1471
1472	if (m_dup_pkthdr(m_first, m0, how) == 0)
1473		goto nospace;
1474
1475	m_last = m_first;
1476
1477	while (progress < m0->m_pkthdr.len) {
1478		if (length == -2)
1479			fraglen = 1 + (arc4random() % divisor);
1480		if (fraglen > m0->m_pkthdr.len - progress)
1481			fraglen = m0->m_pkthdr.len - progress;
1482
1483		if (progress != 0) {
1484			struct mbuf *m_new = m_getcl(how, MT_DATA, 0);
1485			if (m_new == NULL)
1486				goto nospace;
1487
1488			m_last->m_next = m_new;
1489			m_last = m_new;
1490		}
1491
1492		m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t));
1493		progress += fraglen;
1494		m_last->m_len = fraglen;
1495	}
1496	m_freem(m0);
1497	m0 = m_first;
1498	return (m0);
1499nospace:
1500	if (m_first)
1501		m_freem(m_first);
1502	/* Return the original chain on failure */
1503	return (m0);
1504}
1505
1506#endif
1507
1508/*
1509 * Copy the contents of uio into a properly sized mbuf chain.
1510 */
1511struct mbuf *
1512m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1513{
1514	struct mbuf *m, *mb;
1515	int error, length;
1516	ssize_t total;
1517	int progress = 0;
1518
1519	/*
1520	 * len can be zero or an arbitrary large value bound by
1521	 * the total data supplied by the uio.
1522	 */
1523	if (len > 0)
1524		total = min(uio->uio_resid, len);
1525	else
1526		total = uio->uio_resid;
1527
1528	/*
1529	 * The smallest unit returned by m_getm2() is a single mbuf
1530	 * with pkthdr.  We can't align past it.
1531	 */
1532	if (align >= MHLEN)
1533		return (NULL);
1534
1535	/*
1536	 * Give us the full allocation or nothing.
1537	 * If len is zero return the smallest empty mbuf.
1538	 */
1539	m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1540	if (m == NULL)
1541		return (NULL);
1542	m->m_data += align;
1543
1544	/* Fill all mbufs with uio data and update header information. */
1545	for (mb = m; mb != NULL; mb = mb->m_next) {
1546		length = min(M_TRAILINGSPACE(mb), total - progress);
1547
1548		error = uiomove(mtod(mb, void *), length, uio);
1549		if (error) {
1550			m_freem(m);
1551			return (NULL);
1552		}
1553
1554		mb->m_len = length;
1555		progress += length;
1556		if (flags & M_PKTHDR)
1557			m->m_pkthdr.len += length;
1558	}
1559	KASSERT(progress == total, ("%s: progress != total", __func__));
1560
1561	return (m);
1562}
1563
1564/*
1565 * Copy an mbuf chain into a uio limited by len if set.
1566 */
1567int
1568m_mbuftouio(struct uio *uio, struct mbuf *m, int len)
1569{
1570	int error, length, total;
1571	int progress = 0;
1572
1573	if (len > 0)
1574		total = min(uio->uio_resid, len);
1575	else
1576		total = uio->uio_resid;
1577
1578	/* Fill the uio with data from the mbufs. */
1579	for (; m != NULL; m = m->m_next) {
1580		length = min(m->m_len, total - progress);
1581
1582		error = uiomove(mtod(m, void *), length, uio);
1583		if (error)
1584			return (error);
1585
1586		progress += length;
1587	}
1588
1589	return (0);
1590}
1591
1592/*
1593 * Create a writable copy of the mbuf chain.  While doing this
1594 * we compact the chain with a goal of producing a chain with
1595 * at most two mbufs.  The second mbuf in this chain is likely
1596 * to be a cluster.  The primary purpose of this work is to create
1597 * a writable packet for encryption, compression, etc.  The
1598 * secondary goal is to linearize the data so the data can be
1599 * passed to crypto hardware in the most efficient manner possible.
1600 */
1601struct mbuf *
1602m_unshare(struct mbuf *m0, int how)
1603{
1604	struct mbuf *m, *mprev;
1605	struct mbuf *n, *mfirst, *mlast;
1606	int len, off;
1607
1608	mprev = NULL;
1609	for (m = m0; m != NULL; m = mprev->m_next) {
1610		/*
1611		 * Regular mbufs are ignored unless there's a cluster
1612		 * in front of it that we can use to coalesce.  We do
1613		 * the latter mainly so later clusters can be coalesced
1614		 * also w/o having to handle them specially (i.e. convert
1615		 * mbuf+cluster -> cluster).  This optimization is heavily
1616		 * influenced by the assumption that we're running over
1617		 * Ethernet where MCLBYTES is large enough that the max
1618		 * packet size will permit lots of coalescing into a
1619		 * single cluster.  This in turn permits efficient
1620		 * crypto operations, especially when using hardware.
1621		 */
1622		if ((m->m_flags & M_EXT) == 0) {
1623			if (mprev && (mprev->m_flags & M_EXT) &&
1624			    m->m_len <= M_TRAILINGSPACE(mprev)) {
1625				/* XXX: this ignores mbuf types */
1626				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1627				    mtod(m, caddr_t), m->m_len);
1628				mprev->m_len += m->m_len;
1629				mprev->m_next = m->m_next;	/* unlink from chain */
1630				m_free(m);			/* reclaim mbuf */
1631#if 0
1632				newipsecstat.ips_mbcoalesced++;
1633#endif
1634			} else {
1635				mprev = m;
1636			}
1637			continue;
1638		}
1639		/*
1640		 * Writable mbufs are left alone (for now).
1641		 */
1642		if (M_WRITABLE(m)) {
1643			mprev = m;
1644			continue;
1645		}
1646
1647		/*
1648		 * Not writable, replace with a copy or coalesce with
1649		 * the previous mbuf if possible (since we have to copy
1650		 * it anyway, we try to reduce the number of mbufs and
1651		 * clusters so that future work is easier).
1652		 */
1653		KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1654		/* NB: we only coalesce into a cluster or larger */
1655		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1656		    m->m_len <= M_TRAILINGSPACE(mprev)) {
1657			/* XXX: this ignores mbuf types */
1658			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1659			    mtod(m, caddr_t), m->m_len);
1660			mprev->m_len += m->m_len;
1661			mprev->m_next = m->m_next;	/* unlink from chain */
1662			m_free(m);			/* reclaim mbuf */
1663#if 0
1664			newipsecstat.ips_clcoalesced++;
1665#endif
1666			continue;
1667		}
1668
1669		/*
1670		 * Allocate new space to hold the copy and copy the data.
1671		 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1672		 * splitting them into clusters.  We could just malloc a
1673		 * buffer and make it external but too many device drivers
1674		 * don't know how to break up the non-contiguous memory when
1675		 * doing DMA.
1676		 */
1677		n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1678		if (n == NULL) {
1679			m_freem(m0);
1680			return (NULL);
1681		}
1682		if (m->m_flags & M_PKTHDR) {
1683			KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1684			    __func__, m0, m));
1685			m_move_pkthdr(n, m);
1686		}
1687		len = m->m_len;
1688		off = 0;
1689		mfirst = n;
1690		mlast = NULL;
1691		for (;;) {
1692			int cc = min(len, MCLBYTES);
1693			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1694			n->m_len = cc;
1695			if (mlast != NULL)
1696				mlast->m_next = n;
1697			mlast = n;
1698#if 0
1699			newipsecstat.ips_clcopied++;
1700#endif
1701
1702			len -= cc;
1703			if (len <= 0)
1704				break;
1705			off += cc;
1706
1707			n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1708			if (n == NULL) {
1709				m_freem(mfirst);
1710				m_freem(m0);
1711				return (NULL);
1712			}
1713		}
1714		n->m_next = m->m_next;
1715		if (mprev == NULL)
1716			m0 = mfirst;		/* new head of chain */
1717		else
1718			mprev->m_next = mfirst;	/* replace old mbuf */
1719		m_free(m);			/* release old mbuf */
1720		mprev = mfirst;
1721	}
1722	return (m0);
1723}
1724
1725#ifdef MBUF_PROFILING
1726
1727#define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
1728struct mbufprofile {
1729	uintmax_t wasted[MP_BUCKETS];
1730	uintmax_t used[MP_BUCKETS];
1731	uintmax_t segments[MP_BUCKETS];
1732} mbprof;
1733
1734#define MP_MAXDIGITS 21	/* strlen("16,000,000,000,000,000,000") == 21 */
1735#define MP_NUMLINES 6
1736#define MP_NUMSPERLINE 16
1737#define MP_EXTRABYTES 64	/* > strlen("used:\nwasted:\nsegments:\n") */
1738/* work out max space needed and add a bit of spare space too */
1739#define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
1740#define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
1741
1742char mbprofbuf[MP_BUFSIZE];
1743
1744void
1745m_profile(struct mbuf *m)
1746{
1747	int segments = 0;
1748	int used = 0;
1749	int wasted = 0;
1750
1751	while (m) {
1752		segments++;
1753		used += m->m_len;
1754		if (m->m_flags & M_EXT) {
1755			wasted += MHLEN - sizeof(m->m_ext) +
1756			    m->m_ext.ext_size - m->m_len;
1757		} else {
1758			if (m->m_flags & M_PKTHDR)
1759				wasted += MHLEN - m->m_len;
1760			else
1761				wasted += MLEN - m->m_len;
1762		}
1763		m = m->m_next;
1764	}
1765	/* be paranoid.. it helps */
1766	if (segments > MP_BUCKETS - 1)
1767		segments = MP_BUCKETS - 1;
1768	if (used > 100000)
1769		used = 100000;
1770	if (wasted > 100000)
1771		wasted = 100000;
1772	/* store in the appropriate bucket */
1773	/* don't bother locking. if it's slightly off, so what? */
1774	mbprof.segments[segments]++;
1775	mbprof.used[fls(used)]++;
1776	mbprof.wasted[fls(wasted)]++;
1777}
1778
1779static void
1780mbprof_textify(void)
1781{
1782	int offset;
1783	char *c;
1784	uint64_t *p;
1785
1786	p = &mbprof.wasted[0];
1787	c = mbprofbuf;
1788	offset = snprintf(c, MP_MAXLINE + 10,
1789	    "wasted:\n"
1790	    "%ju %ju %ju %ju %ju %ju %ju %ju "
1791	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1792	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1793	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1794#ifdef BIG_ARRAY
1795	p = &mbprof.wasted[16];
1796	c += offset;
1797	offset = snprintf(c, MP_MAXLINE,
1798	    "%ju %ju %ju %ju %ju %ju %ju %ju "
1799	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1800	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1801	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1802#endif
1803	p = &mbprof.used[0];
1804	c += offset;
1805	offset = snprintf(c, MP_MAXLINE + 10,
1806	    "used:\n"
1807	    "%ju %ju %ju %ju %ju %ju %ju %ju "
1808	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1809	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1810	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1811#ifdef BIG_ARRAY
1812	p = &mbprof.used[16];
1813	c += offset;
1814	offset = snprintf(c, MP_MAXLINE,
1815	    "%ju %ju %ju %ju %ju %ju %ju %ju "
1816	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1817	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1818	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1819#endif
1820	p = &mbprof.segments[0];
1821	c += offset;
1822	offset = snprintf(c, MP_MAXLINE + 10,
1823	    "segments:\n"
1824	    "%ju %ju %ju %ju %ju %ju %ju %ju "
1825	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1826	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1827	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1828#ifdef BIG_ARRAY
1829	p = &mbprof.segments[16];
1830	c += offset;
1831	offset = snprintf(c, MP_MAXLINE,
1832	    "%ju %ju %ju %ju %ju %ju %ju %ju "
1833	    "%ju %ju %ju %ju %ju %ju %ju %jju",
1834	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1835	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1836#endif
1837}
1838
1839static int
1840mbprof_handler(SYSCTL_HANDLER_ARGS)
1841{
1842	int error;
1843
1844	mbprof_textify();
1845	error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
1846	return (error);
1847}
1848
1849static int
1850mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
1851{
1852	int clear, error;
1853
1854	clear = 0;
1855	error = sysctl_handle_int(oidp, &clear, 0, req);
1856	if (error || !req->newptr)
1857		return (error);
1858
1859	if (clear) {
1860		bzero(&mbprof, sizeof(mbprof));
1861	}
1862
1863	return (error);
1864}
1865
1866
1867SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD,
1868	    NULL, 0, mbprof_handler, "A", "mbuf profiling statistics");
1869
1870SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW,
1871	    NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics");
1872#endif
1873
1874