uipc_mbuf.c revision 148552
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 148552 2005-07-30 01:32:16Z sam $");
34
35#include "opt_mac.h"
36#include "opt_param.h"
37#include "opt_mbuf_stress_test.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/mac.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/sysctl.h>
48#include <sys/domain.h>
49#include <sys/protosw.h>
50#include <sys/uio.h>
51
52int	max_linkhdr;
53int	max_protohdr;
54int	max_hdr;
55int	max_datalen;
56#ifdef MBUF_STRESS_TEST
57int	m_defragpackets;
58int	m_defragbytes;
59int	m_defraguseless;
60int	m_defragfailure;
61int	m_defragrandomfailures;
62#endif
63
64/*
65 * sysctl(8) exported objects
66 */
67SYSCTL_DECL(_kern_ipc);
68SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
69	   &max_linkhdr, 0, "");
70SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
71	   &max_protohdr, 0, "");
72SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
73SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
74	   &max_datalen, 0, "");
75#ifdef MBUF_STRESS_TEST
76SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
77	   &m_defragpackets, 0, "");
78SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
79	   &m_defragbytes, 0, "");
80SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
81	   &m_defraguseless, 0, "");
82SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
83	   &m_defragfailure, 0, "");
84SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
85	   &m_defragrandomfailures, 0, "");
86#endif
87
88/*
89 * Malloc-type for external ext_buf ref counts.
90 */
91static MALLOC_DEFINE(M_MBUF, "mbextcnt", "mbuf external ref counts");
92
93/*
94 * Allocate a given length worth of mbufs and/or clusters (whatever fits
95 * best) and return a pointer to the top of the allocated chain.  If an
96 * existing mbuf chain is provided, then we will append the new chain
97 * to the existing one but still return the top of the newly allocated
98 * chain.
99 */
100struct mbuf *
101m_getm(struct mbuf *m, int len, int how, short type)
102{
103	struct mbuf *mb, *top, *cur, *mtail;
104	int num, rem;
105	int i;
106
107	KASSERT(len >= 0, ("m_getm(): len is < 0"));
108
109	/* If m != NULL, we will append to the end of that chain. */
110	if (m != NULL)
111		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
112	else
113		mtail = NULL;
114
115	/*
116	 * Calculate how many mbufs+clusters ("packets") we need and how much
117	 * leftover there is after that and allocate the first mbuf+cluster
118	 * if required.
119	 */
120	num = len / MCLBYTES;
121	rem = len % MCLBYTES;
122	top = cur = NULL;
123	if (num > 0) {
124		if ((top = cur = m_getcl(how, type, 0)) == NULL)
125			goto failed;
126		top->m_len = 0;
127	}
128	num--;
129
130	for (i = 0; i < num; i++) {
131		mb = m_getcl(how, type, 0);
132		if (mb == NULL)
133			goto failed;
134		mb->m_len = 0;
135		cur = (cur->m_next = mb);
136	}
137	if (rem > 0) {
138		mb = (rem > MINCLSIZE) ?
139		    m_getcl(how, type, 0) : m_get(how, type);
140		if (mb == NULL)
141			goto failed;
142		mb->m_len = 0;
143		if (cur == NULL)
144			top = mb;
145		else
146			cur->m_next = mb;
147	}
148
149	if (mtail != NULL)
150		mtail->m_next = top;
151	return top;
152failed:
153	if (top != NULL)
154		m_freem(top);
155	return NULL;
156}
157
158/*
159 * Free an entire chain of mbufs and associated external buffers, if
160 * applicable.
161 */
162void
163m_freem(struct mbuf *mb)
164{
165
166	while (mb != NULL)
167		mb = m_free(mb);
168}
169
170/*-
171 * Configure a provided mbuf to refer to the provided external storage
172 * buffer and setup a reference count for said buffer.  If the setting
173 * up of the reference count fails, the M_EXT bit will not be set.  If
174 * successfull, the M_EXT bit is set in the mbuf's flags.
175 *
176 * Arguments:
177 *    mb     The existing mbuf to which to attach the provided buffer.
178 *    buf    The address of the provided external storage buffer.
179 *    size   The size of the provided buffer.
180 *    freef  A pointer to a routine that is responsible for freeing the
181 *           provided external storage buffer.
182 *    args   A pointer to an argument structure (of any type) to be passed
183 *           to the provided freef routine (may be NULL).
184 *    flags  Any other flags to be passed to the provided mbuf.
185 *    type   The type that the external storage buffer should be
186 *           labeled with.
187 *
188 * Returns:
189 *    Nothing.
190 */
191void
192m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
193    void (*freef)(void *, void *), void *args, int flags, int type)
194{
195	u_int *ref_cnt = NULL;
196
197	/* XXX Shouldn't be adding EXT_CLUSTER with this API */
198	if (type == EXT_CLUSTER)
199		ref_cnt = (u_int *)uma_find_refcnt(zone_clust,
200		    mb->m_ext.ext_buf);
201	else if (type == EXT_EXTREF)
202		ref_cnt = __DEVOLATILE(u_int *, mb->m_ext.ref_cnt);
203	mb->m_ext.ref_cnt = (ref_cnt == NULL) ?
204	    malloc(sizeof(u_int), M_MBUF, M_NOWAIT) : (u_int *)ref_cnt;
205	if (mb->m_ext.ref_cnt != NULL) {
206		*(mb->m_ext.ref_cnt) = 1;
207		mb->m_flags |= (M_EXT | flags);
208		mb->m_ext.ext_buf = buf;
209		mb->m_data = mb->m_ext.ext_buf;
210		mb->m_ext.ext_size = size;
211		mb->m_ext.ext_free = freef;
212		mb->m_ext.ext_args = args;
213		mb->m_ext.ext_type = type;
214        }
215}
216
217/*
218 * Non-directly-exported function to clean up after mbufs with M_EXT
219 * storage attached to them if the reference count hits 0.
220 */
221void
222mb_free_ext(struct mbuf *m)
223{
224	u_int cnt;
225	int dofree;
226
227	/* Account for lazy ref count assign. */
228	if (m->m_ext.ref_cnt == NULL)
229		dofree = 1;
230	else
231		dofree = 0;
232
233	/*
234	 * This is tricky.  We need to make sure to decrement the
235	 * refcount in a safe way but to also clean up if we're the
236	 * last reference.  This method seems to do it without race.
237	 */
238	while (dofree == 0) {
239		cnt = *(m->m_ext.ref_cnt);
240		if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) {
241			if (cnt == 1)
242				dofree = 1;
243			break;
244		}
245	}
246
247	if (dofree) {
248		/*
249		 * Do the free, should be safe.
250		 */
251		if (m->m_ext.ext_type == EXT_PACKET) {
252			uma_zfree(zone_pack, m);
253			return;
254		} else if (m->m_ext.ext_type == EXT_CLUSTER) {
255			uma_zfree(zone_clust, m->m_ext.ext_buf);
256			m->m_ext.ext_buf = NULL;
257		} else {
258			(*(m->m_ext.ext_free))(m->m_ext.ext_buf,
259			    m->m_ext.ext_args);
260			if (m->m_ext.ext_type != EXT_EXTREF) {
261				if (m->m_ext.ref_cnt != NULL)
262					free(__DEVOLATILE(u_int *,
263					    m->m_ext.ref_cnt), M_MBUF);
264				m->m_ext.ref_cnt = NULL;
265			}
266			m->m_ext.ext_buf = NULL;
267		}
268	}
269	uma_zfree(zone_mbuf, m);
270}
271
272/*
273 * "Move" mbuf pkthdr from "from" to "to".
274 * "from" must have M_PKTHDR set, and "to" must be empty.
275 */
276void
277m_move_pkthdr(struct mbuf *to, struct mbuf *from)
278{
279
280#if 0
281	/* see below for why these are not enabled */
282	M_ASSERTPKTHDR(to);
283	/* Note: with MAC, this may not be a good assertion. */
284	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
285	    ("m_move_pkthdr: to has tags"));
286#endif
287#ifdef MAC
288	/*
289	 * XXXMAC: It could be this should also occur for non-MAC?
290	 */
291	if (to->m_flags & M_PKTHDR)
292		m_tag_delete_chain(to, NULL);
293#endif
294	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
295	if ((to->m_flags & M_EXT) == 0)
296		to->m_data = to->m_pktdat;
297	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
298	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
299	from->m_flags &= ~M_PKTHDR;
300}
301
302/*
303 * Duplicate "from"'s mbuf pkthdr in "to".
304 * "from" must have M_PKTHDR set, and "to" must be empty.
305 * In particular, this does a deep copy of the packet tags.
306 */
307int
308m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
309{
310
311#if 0
312	/*
313	 * The mbuf allocator only initializes the pkthdr
314	 * when the mbuf is allocated with MGETHDR. Many users
315	 * (e.g. m_copy*, m_prepend) use MGET and then
316	 * smash the pkthdr as needed causing these
317	 * assertions to trip.  For now just disable them.
318	 */
319	M_ASSERTPKTHDR(to);
320	/* Note: with MAC, this may not be a good assertion. */
321	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
322#endif
323	MBUF_CHECKSLEEP(how);
324#ifdef MAC
325	if (to->m_flags & M_PKTHDR)
326		m_tag_delete_chain(to, NULL);
327#endif
328	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
329	if ((to->m_flags & M_EXT) == 0)
330		to->m_data = to->m_pktdat;
331	to->m_pkthdr = from->m_pkthdr;
332	SLIST_INIT(&to->m_pkthdr.tags);
333	return (m_tag_copy_chain(to, from, MBTOM(how)));
334}
335
336/*
337 * Lesser-used path for M_PREPEND:
338 * allocate new mbuf to prepend to chain,
339 * copy junk along.
340 */
341struct mbuf *
342m_prepend(struct mbuf *m, int len, int how)
343{
344	struct mbuf *mn;
345
346	if (m->m_flags & M_PKTHDR)
347		MGETHDR(mn, how, m->m_type);
348	else
349		MGET(mn, how, m->m_type);
350	if (mn == NULL) {
351		m_freem(m);
352		return (NULL);
353	}
354	if (m->m_flags & M_PKTHDR)
355		M_MOVE_PKTHDR(mn, m);
356	mn->m_next = m;
357	m = mn;
358	if (len < MHLEN)
359		MH_ALIGN(m, len);
360	m->m_len = len;
361	return (m);
362}
363
364/*
365 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
366 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
367 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
368 * Note that the copy is read-only, because clusters are not copied,
369 * only their reference counts are incremented.
370 */
371struct mbuf *
372m_copym(struct mbuf *m, int off0, int len, int wait)
373{
374	struct mbuf *n, **np;
375	int off = off0;
376	struct mbuf *top;
377	int copyhdr = 0;
378
379	KASSERT(off >= 0, ("m_copym, negative off %d", off));
380	KASSERT(len >= 0, ("m_copym, negative len %d", len));
381	MBUF_CHECKSLEEP(wait);
382	if (off == 0 && m->m_flags & M_PKTHDR)
383		copyhdr = 1;
384	while (off > 0) {
385		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
386		if (off < m->m_len)
387			break;
388		off -= m->m_len;
389		m = m->m_next;
390	}
391	np = &top;
392	top = 0;
393	while (len > 0) {
394		if (m == NULL) {
395			KASSERT(len == M_COPYALL,
396			    ("m_copym, length > size of mbuf chain"));
397			break;
398		}
399		if (copyhdr)
400			MGETHDR(n, wait, m->m_type);
401		else
402			MGET(n, wait, m->m_type);
403		*np = n;
404		if (n == NULL)
405			goto nospace;
406		if (copyhdr) {
407			if (!m_dup_pkthdr(n, m, wait))
408				goto nospace;
409			if (len == M_COPYALL)
410				n->m_pkthdr.len -= off0;
411			else
412				n->m_pkthdr.len = len;
413			copyhdr = 0;
414		}
415		n->m_len = min(len, m->m_len - off);
416		if (m->m_flags & M_EXT) {
417			n->m_data = m->m_data + off;
418			n->m_ext = m->m_ext;
419			n->m_flags |= M_EXT;
420			MEXT_ADD_REF(m);
421			n->m_ext.ref_cnt = m->m_ext.ref_cnt;
422		} else
423			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
424			    (u_int)n->m_len);
425		if (len != M_COPYALL)
426			len -= n->m_len;
427		off = 0;
428		m = m->m_next;
429		np = &n->m_next;
430	}
431	if (top == NULL)
432		mbstat.m_mcfail++;	/* XXX: No consistency. */
433
434	return (top);
435nospace:
436	m_freem(top);
437	mbstat.m_mcfail++;	/* XXX: No consistency. */
438	return (NULL);
439}
440
441/*
442 * Copy an entire packet, including header (which must be present).
443 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
444 * Note that the copy is read-only, because clusters are not copied,
445 * only their reference counts are incremented.
446 * Preserve alignment of the first mbuf so if the creator has left
447 * some room at the beginning (e.g. for inserting protocol headers)
448 * the copies still have the room available.
449 */
450struct mbuf *
451m_copypacket(struct mbuf *m, int how)
452{
453	struct mbuf *top, *n, *o;
454
455	MBUF_CHECKSLEEP(how);
456	MGET(n, how, m->m_type);
457	top = n;
458	if (n == NULL)
459		goto nospace;
460
461	if (!m_dup_pkthdr(n, m, how))
462		goto nospace;
463	n->m_len = m->m_len;
464	if (m->m_flags & M_EXT) {
465		n->m_data = m->m_data;
466		n->m_ext = m->m_ext;
467		n->m_flags |= M_EXT;
468		MEXT_ADD_REF(m);
469		n->m_ext.ref_cnt = m->m_ext.ref_cnt;
470	} else {
471		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
472		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
473	}
474
475	m = m->m_next;
476	while (m) {
477		MGET(o, how, m->m_type);
478		if (o == NULL)
479			goto nospace;
480
481		n->m_next = o;
482		n = n->m_next;
483
484		n->m_len = m->m_len;
485		if (m->m_flags & M_EXT) {
486			n->m_data = m->m_data;
487			n->m_ext = m->m_ext;
488			n->m_flags |= M_EXT;
489			MEXT_ADD_REF(m);
490			n->m_ext.ref_cnt = m->m_ext.ref_cnt;
491		} else {
492			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
493		}
494
495		m = m->m_next;
496	}
497	return top;
498nospace:
499	m_freem(top);
500	mbstat.m_mcfail++;	/* XXX: No consistency. */
501	return (NULL);
502}
503
504/*
505 * Copy data from an mbuf chain starting "off" bytes from the beginning,
506 * continuing for "len" bytes, into the indicated buffer.
507 */
508void
509m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
510{
511	u_int count;
512
513	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
514	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
515	while (off > 0) {
516		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
517		if (off < m->m_len)
518			break;
519		off -= m->m_len;
520		m = m->m_next;
521	}
522	while (len > 0) {
523		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
524		count = min(m->m_len - off, len);
525		bcopy(mtod(m, caddr_t) + off, cp, count);
526		len -= count;
527		cp += count;
528		off = 0;
529		m = m->m_next;
530	}
531}
532
533/*
534 * Copy a packet header mbuf chain into a completely new chain, including
535 * copying any mbuf clusters.  Use this instead of m_copypacket() when
536 * you need a writable copy of an mbuf chain.
537 */
538struct mbuf *
539m_dup(struct mbuf *m, int how)
540{
541	struct mbuf **p, *top = NULL;
542	int remain, moff, nsize;
543
544	MBUF_CHECKSLEEP(how);
545	/* Sanity check */
546	if (m == NULL)
547		return (NULL);
548	M_ASSERTPKTHDR(m);
549
550	/* While there's more data, get a new mbuf, tack it on, and fill it */
551	remain = m->m_pkthdr.len;
552	moff = 0;
553	p = &top;
554	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
555		struct mbuf *n;
556
557		/* Get the next new mbuf */
558		if (remain >= MINCLSIZE) {
559			n = m_getcl(how, m->m_type, 0);
560			nsize = MCLBYTES;
561		} else {
562			n = m_get(how, m->m_type);
563			nsize = MLEN;
564		}
565		if (n == NULL)
566			goto nospace;
567
568		if (top == NULL) {		/* First one, must be PKTHDR */
569			if (!m_dup_pkthdr(n, m, how)) {
570				m_free(n);
571				goto nospace;
572			}
573			nsize = MHLEN;
574		}
575		n->m_len = 0;
576
577		/* Link it into the new chain */
578		*p = n;
579		p = &n->m_next;
580
581		/* Copy data from original mbuf(s) into new mbuf */
582		while (n->m_len < nsize && m != NULL) {
583			int chunk = min(nsize - n->m_len, m->m_len - moff);
584
585			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
586			moff += chunk;
587			n->m_len += chunk;
588			remain -= chunk;
589			if (moff == m->m_len) {
590				m = m->m_next;
591				moff = 0;
592			}
593		}
594
595		/* Check correct total mbuf length */
596		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
597		    	("%s: bogus m_pkthdr.len", __func__));
598	}
599	return (top);
600
601nospace:
602	m_freem(top);
603	mbstat.m_mcfail++;	/* XXX: No consistency. */
604	return (NULL);
605}
606
607/*
608 * Concatenate mbuf chain n to m.
609 * Both chains must be of the same type (e.g. MT_DATA).
610 * Any m_pkthdr is not updated.
611 */
612void
613m_cat(struct mbuf *m, struct mbuf *n)
614{
615	while (m->m_next)
616		m = m->m_next;
617	while (n) {
618		if (m->m_flags & M_EXT ||
619		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
620			/* just join the two chains */
621			m->m_next = n;
622			return;
623		}
624		/* splat the data from one into the other */
625		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
626		    (u_int)n->m_len);
627		m->m_len += n->m_len;
628		n = m_free(n);
629	}
630}
631
632void
633m_adj(struct mbuf *mp, int req_len)
634{
635	int len = req_len;
636	struct mbuf *m;
637	int count;
638
639	if ((m = mp) == NULL)
640		return;
641	if (len >= 0) {
642		/*
643		 * Trim from head.
644		 */
645		while (m != NULL && len > 0) {
646			if (m->m_len <= len) {
647				len -= m->m_len;
648				m->m_len = 0;
649				m = m->m_next;
650			} else {
651				m->m_len -= len;
652				m->m_data += len;
653				len = 0;
654			}
655		}
656		m = mp;
657		if (mp->m_flags & M_PKTHDR)
658			m->m_pkthdr.len -= (req_len - len);
659	} else {
660		/*
661		 * Trim from tail.  Scan the mbuf chain,
662		 * calculating its length and finding the last mbuf.
663		 * If the adjustment only affects this mbuf, then just
664		 * adjust and return.  Otherwise, rescan and truncate
665		 * after the remaining size.
666		 */
667		len = -len;
668		count = 0;
669		for (;;) {
670			count += m->m_len;
671			if (m->m_next == (struct mbuf *)0)
672				break;
673			m = m->m_next;
674		}
675		if (m->m_len >= len) {
676			m->m_len -= len;
677			if (mp->m_flags & M_PKTHDR)
678				mp->m_pkthdr.len -= len;
679			return;
680		}
681		count -= len;
682		if (count < 0)
683			count = 0;
684		/*
685		 * Correct length for chain is "count".
686		 * Find the mbuf with last data, adjust its length,
687		 * and toss data from remaining mbufs on chain.
688		 */
689		m = mp;
690		if (m->m_flags & M_PKTHDR)
691			m->m_pkthdr.len = count;
692		for (; m; m = m->m_next) {
693			if (m->m_len >= count) {
694				m->m_len = count;
695				if (m->m_next != NULL) {
696					m_freem(m->m_next);
697					m->m_next = NULL;
698				}
699				break;
700			}
701			count -= m->m_len;
702		}
703	}
704}
705
706/*
707 * Rearange an mbuf chain so that len bytes are contiguous
708 * and in the data area of an mbuf (so that mtod and dtom
709 * will work for a structure of size len).  Returns the resulting
710 * mbuf chain on success, frees it and returns null on failure.
711 * If there is room, it will add up to max_protohdr-len extra bytes to the
712 * contiguous region in an attempt to avoid being called next time.
713 */
714struct mbuf *
715m_pullup(struct mbuf *n, int len)
716{
717	struct mbuf *m;
718	int count;
719	int space;
720
721	/*
722	 * If first mbuf has no cluster, and has room for len bytes
723	 * without shifting current data, pullup into it,
724	 * otherwise allocate a new mbuf to prepend to the chain.
725	 */
726	if ((n->m_flags & M_EXT) == 0 &&
727	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
728		if (n->m_len >= len)
729			return (n);
730		m = n;
731		n = n->m_next;
732		len -= m->m_len;
733	} else {
734		if (len > MHLEN)
735			goto bad;
736		MGET(m, M_DONTWAIT, n->m_type);
737		if (m == NULL)
738			goto bad;
739		m->m_len = 0;
740		if (n->m_flags & M_PKTHDR)
741			M_MOVE_PKTHDR(m, n);
742	}
743	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
744	do {
745		count = min(min(max(len, max_protohdr), space), n->m_len);
746		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
747		  (u_int)count);
748		len -= count;
749		m->m_len += count;
750		n->m_len -= count;
751		space -= count;
752		if (n->m_len)
753			n->m_data += count;
754		else
755			n = m_free(n);
756	} while (len > 0 && n);
757	if (len > 0) {
758		(void) m_free(m);
759		goto bad;
760	}
761	m->m_next = n;
762	return (m);
763bad:
764	m_freem(n);
765	mbstat.m_mpfail++;	/* XXX: No consistency. */
766	return (NULL);
767}
768
769/*
770 * Like m_pullup(), except a new mbuf is always allocated, and we allow
771 * the amount of empty space before the data in the new mbuf to be specified
772 * (in the event that the caller expects to prepend later).
773 */
774int MSFail;
775
776struct mbuf *
777m_copyup(struct mbuf *n, int len, int dstoff)
778{
779	struct mbuf *m;
780	int count, space;
781
782	if (len > (MHLEN - dstoff))
783		goto bad;
784	MGET(m, M_DONTWAIT, n->m_type);
785	if (m == NULL)
786		goto bad;
787	m->m_len = 0;
788	if (n->m_flags & M_PKTHDR)
789		M_MOVE_PKTHDR(m, n);
790	m->m_data += dstoff;
791	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
792	do {
793		count = min(min(max(len, max_protohdr), space), n->m_len);
794		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
795		    (unsigned)count);
796		len -= count;
797		m->m_len += count;
798		n->m_len -= count;
799		space -= count;
800		if (n->m_len)
801			n->m_data += count;
802		else
803			n = m_free(n);
804	} while (len > 0 && n);
805	if (len > 0) {
806		(void) m_free(m);
807		goto bad;
808	}
809	m->m_next = n;
810	return (m);
811 bad:
812	m_freem(n);
813	MSFail++;
814	return (NULL);
815}
816
817/*
818 * Partition an mbuf chain in two pieces, returning the tail --
819 * all but the first len0 bytes.  In case of failure, it returns NULL and
820 * attempts to restore the chain to its original state.
821 *
822 * Note that the resulting mbufs might be read-only, because the new
823 * mbuf can end up sharing an mbuf cluster with the original mbuf if
824 * the "breaking point" happens to lie within a cluster mbuf. Use the
825 * M_WRITABLE() macro to check for this case.
826 */
827struct mbuf *
828m_split(struct mbuf *m0, int len0, int wait)
829{
830	struct mbuf *m, *n;
831	u_int len = len0, remain;
832
833	MBUF_CHECKSLEEP(wait);
834	for (m = m0; m && len > m->m_len; m = m->m_next)
835		len -= m->m_len;
836	if (m == NULL)
837		return (NULL);
838	remain = m->m_len - len;
839	if (m0->m_flags & M_PKTHDR) {
840		MGETHDR(n, wait, m0->m_type);
841		if (n == NULL)
842			return (NULL);
843		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
844		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
845		m0->m_pkthdr.len = len0;
846		if (m->m_flags & M_EXT)
847			goto extpacket;
848		if (remain > MHLEN) {
849			/* m can't be the lead packet */
850			MH_ALIGN(n, 0);
851			n->m_next = m_split(m, len, wait);
852			if (n->m_next == NULL) {
853				(void) m_free(n);
854				return (NULL);
855			} else {
856				n->m_len = 0;
857				return (n);
858			}
859		} else
860			MH_ALIGN(n, remain);
861	} else if (remain == 0) {
862		n = m->m_next;
863		m->m_next = NULL;
864		return (n);
865	} else {
866		MGET(n, wait, m->m_type);
867		if (n == NULL)
868			return (NULL);
869		M_ALIGN(n, remain);
870	}
871extpacket:
872	if (m->m_flags & M_EXT) {
873		n->m_flags |= M_EXT;
874		n->m_ext = m->m_ext;
875		MEXT_ADD_REF(m);
876		n->m_ext.ref_cnt = m->m_ext.ref_cnt;
877		n->m_data = m->m_data + len;
878	} else {
879		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
880	}
881	n->m_len = remain;
882	m->m_len = len;
883	n->m_next = m->m_next;
884	m->m_next = NULL;
885	return (n);
886}
887/*
888 * Routine to copy from device local memory into mbufs.
889 * Note that `off' argument is offset into first mbuf of target chain from
890 * which to begin copying the data to.
891 */
892struct mbuf *
893m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
894	 void (*copy)(char *from, caddr_t to, u_int len))
895{
896	struct mbuf *m;
897	struct mbuf *top = NULL, **mp = &top;
898	int len;
899
900	if (off < 0 || off > MHLEN)
901		return (NULL);
902
903	while (totlen > 0) {
904		if (top == NULL) {	/* First one, must be PKTHDR */
905			if (totlen + off >= MINCLSIZE) {
906				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
907				len = MCLBYTES;
908			} else {
909				m = m_gethdr(M_DONTWAIT, MT_DATA);
910				len = MHLEN;
911
912				/* Place initial small packet/header at end of mbuf */
913				if (m && totlen + off + max_linkhdr <= MLEN) {
914					m->m_data += max_linkhdr;
915					len -= max_linkhdr;
916				}
917			}
918			if (m == NULL)
919				return NULL;
920			m->m_pkthdr.rcvif = ifp;
921			m->m_pkthdr.len = totlen;
922		} else {
923			if (totlen + off >= MINCLSIZE) {
924				m = m_getcl(M_DONTWAIT, MT_DATA, 0);
925				len = MCLBYTES;
926			} else {
927				m = m_get(M_DONTWAIT, MT_DATA);
928				len = MLEN;
929			}
930			if (m == NULL) {
931				m_freem(top);
932				return NULL;
933			}
934		}
935		if (off) {
936			m->m_data += off;
937			len -= off;
938			off = 0;
939		}
940		m->m_len = len = min(totlen, len);
941		if (copy)
942			copy(buf, mtod(m, caddr_t), (u_int)len);
943		else
944			bcopy(buf, mtod(m, caddr_t), (u_int)len);
945		buf += len;
946		*mp = m;
947		mp = &m->m_next;
948		totlen -= len;
949	}
950	return (top);
951}
952
953/*
954 * Copy data from a buffer back into the indicated mbuf chain,
955 * starting "off" bytes from the beginning, extending the mbuf
956 * chain if necessary.
957 */
958void
959m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
960{
961	int mlen;
962	struct mbuf *m = m0, *n;
963	int totlen = 0;
964
965	if (m0 == NULL)
966		return;
967	while (off > (mlen = m->m_len)) {
968		off -= mlen;
969		totlen += mlen;
970		if (m->m_next == NULL) {
971			n = m_get(M_DONTWAIT, m->m_type);
972			if (n == NULL)
973				goto out;
974			bzero(mtod(n, caddr_t), MLEN);
975			n->m_len = min(MLEN, len + off);
976			m->m_next = n;
977		}
978		m = m->m_next;
979	}
980	while (len > 0) {
981		mlen = min (m->m_len - off, len);
982		bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
983		cp += mlen;
984		len -= mlen;
985		mlen += off;
986		off = 0;
987		totlen += mlen;
988		if (len == 0)
989			break;
990		if (m->m_next == NULL) {
991			n = m_get(M_DONTWAIT, m->m_type);
992			if (n == NULL)
993				break;
994			n->m_len = min(MLEN, len);
995			m->m_next = n;
996		}
997		m = m->m_next;
998	}
999out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1000		m->m_pkthdr.len = totlen;
1001}
1002
1003/*
1004 * Append the specified data to the indicated mbuf chain,
1005 * Extend the mbuf chain if the new data does not fit in
1006 * existing space.
1007 *
1008 * Return 1 if able to complete the job; otherwise 0.
1009 */
1010int
1011m_append(struct mbuf *m0, int len, c_caddr_t cp)
1012{
1013	struct mbuf *m, *n;
1014	int remainder, space;
1015
1016	for (m = m0; m->m_next != NULL; m = m->m_next)
1017		;
1018	remainder = len;
1019	space = M_TRAILINGSPACE(m);
1020	if (space > 0) {
1021		/*
1022		 * Copy into available space.
1023		 */
1024		if (space > remainder)
1025			space = remainder;
1026		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1027		m->m_len += space;
1028		cp += space, remainder -= space;
1029	}
1030	while (remainder > 0) {
1031		/*
1032		 * Allocate a new mbuf; could check space
1033		 * and allocate a cluster instead.
1034		 */
1035		n = m_get(M_DONTWAIT, m->m_type);
1036		if (n == NULL)
1037			break;
1038		n->m_len = min(MLEN, remainder);
1039		bcopy(cp, mtod(n, caddr_t), n->m_len);
1040		cp += n->m_len, remainder -= n->m_len;
1041		m->m_next = n;
1042		m = n;
1043	}
1044	if (m0->m_flags & M_PKTHDR)
1045		m0->m_pkthdr.len += len - remainder;
1046	return (remainder == 0);
1047}
1048
1049/*
1050 * Apply function f to the data in an mbuf chain starting "off" bytes from
1051 * the beginning, continuing for "len" bytes.
1052 */
1053int
1054m_apply(struct mbuf *m, int off, int len,
1055    int (*f)(void *, void *, u_int), void *arg)
1056{
1057	u_int count;
1058	int rval;
1059
1060	KASSERT(off >= 0, ("m_apply, negative off %d", off));
1061	KASSERT(len >= 0, ("m_apply, negative len %d", len));
1062	while (off > 0) {
1063		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1064		if (off < m->m_len)
1065			break;
1066		off -= m->m_len;
1067		m = m->m_next;
1068	}
1069	while (len > 0) {
1070		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1071		count = min(m->m_len - off, len);
1072		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1073		if (rval)
1074			return (rval);
1075		len -= count;
1076		off = 0;
1077		m = m->m_next;
1078	}
1079	return (0);
1080}
1081
1082/*
1083 * Return a pointer to mbuf/offset of location in mbuf chain.
1084 */
1085struct mbuf *
1086m_getptr(struct mbuf *m, int loc, int *off)
1087{
1088
1089	while (loc >= 0) {
1090		/* Normal end of search. */
1091		if (m->m_len > loc) {
1092			*off = loc;
1093			return (m);
1094		} else {
1095			loc -= m->m_len;
1096			if (m->m_next == NULL) {
1097				if (loc == 0) {
1098					/* Point at the end of valid data. */
1099					*off = m->m_len;
1100					return (m);
1101				}
1102				return (NULL);
1103			}
1104			m = m->m_next;
1105		}
1106	}
1107	return (NULL);
1108}
1109
1110void
1111m_print(const struct mbuf *m, int maxlen)
1112{
1113	int len;
1114	int pdata;
1115	const struct mbuf *m2;
1116
1117	if (m->m_flags & M_PKTHDR)
1118		len = m->m_pkthdr.len;
1119	else
1120		len = -1;
1121	m2 = m;
1122	while (m2 != NULL && (len == -1 || len)) {
1123		pdata = m2->m_len;
1124		if (maxlen != -1 && pdata > maxlen)
1125			pdata = maxlen;
1126		printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1127		    m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1128		    "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1129		    "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1130		if (pdata)
1131			printf(", %*D\n", m2->m_len, (u_char *)m2->m_data, "-");
1132		if (len != -1)
1133			len -= m2->m_len;
1134		m2 = m2->m_next;
1135	}
1136	if (len > 0)
1137		printf("%d bytes unaccounted for.\n", len);
1138	return;
1139}
1140
1141u_int
1142m_fixhdr(struct mbuf *m0)
1143{
1144	u_int len;
1145
1146	len = m_length(m0, NULL);
1147	m0->m_pkthdr.len = len;
1148	return (len);
1149}
1150
1151u_int
1152m_length(struct mbuf *m0, struct mbuf **last)
1153{
1154	struct mbuf *m;
1155	u_int len;
1156
1157	len = 0;
1158	for (m = m0; m != NULL; m = m->m_next) {
1159		len += m->m_len;
1160		if (m->m_next == NULL)
1161			break;
1162	}
1163	if (last != NULL)
1164		*last = m;
1165	return (len);
1166}
1167
1168/*
1169 * Defragment a mbuf chain, returning the shortest possible
1170 * chain of mbufs and clusters.  If allocation fails and
1171 * this cannot be completed, NULL will be returned, but
1172 * the passed in chain will be unchanged.  Upon success,
1173 * the original chain will be freed, and the new chain
1174 * will be returned.
1175 *
1176 * If a non-packet header is passed in, the original
1177 * mbuf (chain?) will be returned unharmed.
1178 */
1179struct mbuf *
1180m_defrag(struct mbuf *m0, int how)
1181{
1182	struct mbuf *m_new = NULL, *m_final = NULL;
1183	int progress = 0, length;
1184
1185	MBUF_CHECKSLEEP(how);
1186	if (!(m0->m_flags & M_PKTHDR))
1187		return (m0);
1188
1189	m_fixhdr(m0); /* Needed sanity check */
1190
1191#ifdef MBUF_STRESS_TEST
1192	if (m_defragrandomfailures) {
1193		int temp = arc4random() & 0xff;
1194		if (temp == 0xba)
1195			goto nospace;
1196	}
1197#endif
1198
1199	if (m0->m_pkthdr.len > MHLEN)
1200		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1201	else
1202		m_final = m_gethdr(how, MT_DATA);
1203
1204	if (m_final == NULL)
1205		goto nospace;
1206
1207	if (m_dup_pkthdr(m_final, m0, how) == 0)
1208		goto nospace;
1209
1210	m_new = m_final;
1211
1212	while (progress < m0->m_pkthdr.len) {
1213		length = m0->m_pkthdr.len - progress;
1214		if (length > MCLBYTES)
1215			length = MCLBYTES;
1216
1217		if (m_new == NULL) {
1218			if (length > MLEN)
1219				m_new = m_getcl(how, MT_DATA, 0);
1220			else
1221				m_new = m_get(how, MT_DATA);
1222			if (m_new == NULL)
1223				goto nospace;
1224		}
1225
1226		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1227		progress += length;
1228		m_new->m_len = length;
1229		if (m_new != m_final)
1230			m_cat(m_final, m_new);
1231		m_new = NULL;
1232	}
1233#ifdef MBUF_STRESS_TEST
1234	if (m0->m_next == NULL)
1235		m_defraguseless++;
1236#endif
1237	m_freem(m0);
1238	m0 = m_final;
1239#ifdef MBUF_STRESS_TEST
1240	m_defragpackets++;
1241	m_defragbytes += m0->m_pkthdr.len;
1242#endif
1243	return (m0);
1244nospace:
1245#ifdef MBUF_STRESS_TEST
1246	m_defragfailure++;
1247#endif
1248	if (m_final)
1249		m_freem(m_final);
1250	return (NULL);
1251}
1252
1253#ifdef MBUF_STRESS_TEST
1254
1255/*
1256 * Fragment an mbuf chain.  There's no reason you'd ever want to do
1257 * this in normal usage, but it's great for stress testing various
1258 * mbuf consumers.
1259 *
1260 * If fragmentation is not possible, the original chain will be
1261 * returned.
1262 *
1263 * Possible length values:
1264 * 0	 no fragmentation will occur
1265 * > 0	each fragment will be of the specified length
1266 * -1	each fragment will be the same random value in length
1267 * -2	each fragment's length will be entirely random
1268 * (Random values range from 1 to 256)
1269 */
1270struct mbuf *
1271m_fragment(struct mbuf *m0, int how, int length)
1272{
1273	struct mbuf *m_new = NULL, *m_final = NULL;
1274	int progress = 0;
1275
1276	if (!(m0->m_flags & M_PKTHDR))
1277		return (m0);
1278
1279	if ((length == 0) || (length < -2))
1280		return (m0);
1281
1282	m_fixhdr(m0); /* Needed sanity check */
1283
1284	m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1285
1286	if (m_final == NULL)
1287		goto nospace;
1288
1289	if (m_dup_pkthdr(m_final, m0, how) == 0)
1290		goto nospace;
1291
1292	m_new = m_final;
1293
1294	if (length == -1)
1295		length = 1 + (arc4random() & 255);
1296
1297	while (progress < m0->m_pkthdr.len) {
1298		int fraglen;
1299
1300		if (length > 0)
1301			fraglen = length;
1302		else
1303			fraglen = 1 + (arc4random() & 255);
1304		if (fraglen > m0->m_pkthdr.len - progress)
1305			fraglen = m0->m_pkthdr.len - progress;
1306
1307		if (fraglen > MCLBYTES)
1308			fraglen = MCLBYTES;
1309
1310		if (m_new == NULL) {
1311			m_new = m_getcl(how, MT_DATA, 0);
1312			if (m_new == NULL)
1313				goto nospace;
1314		}
1315
1316		m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1317		progress += fraglen;
1318		m_new->m_len = fraglen;
1319		if (m_new != m_final)
1320			m_cat(m_final, m_new);
1321		m_new = NULL;
1322	}
1323	m_freem(m0);
1324	m0 = m_final;
1325	return (m0);
1326nospace:
1327	if (m_final)
1328		m_freem(m_final);
1329	/* Return the original chain on failure */
1330	return (m0);
1331}
1332
1333#endif
1334
1335struct mbuf *
1336m_uiotombuf(struct uio *uio, int how, int len, int align)
1337{
1338	struct mbuf *m_new = NULL, *m_final = NULL;
1339	int progress = 0, error = 0, length, total;
1340
1341	if (len > 0)
1342		total = min(uio->uio_resid, len);
1343	else
1344		total = uio->uio_resid;
1345	if (align >= MHLEN)
1346		goto nospace;
1347	if (total + align > MHLEN)
1348		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1349	else
1350		m_final = m_gethdr(how, MT_DATA);
1351	if (m_final == NULL)
1352		goto nospace;
1353	m_final->m_data += align;
1354	m_new = m_final;
1355	while (progress < total) {
1356		length = total - progress;
1357		if (length > MCLBYTES)
1358			length = MCLBYTES;
1359		if (m_new == NULL) {
1360			if (length > MLEN)
1361				m_new = m_getcl(how, MT_DATA, 0);
1362			else
1363				m_new = m_get(how, MT_DATA);
1364			if (m_new == NULL)
1365				goto nospace;
1366		}
1367		error = uiomove(mtod(m_new, void *), length, uio);
1368		if (error)
1369			goto nospace;
1370		progress += length;
1371		m_new->m_len = length;
1372		if (m_new != m_final)
1373			m_cat(m_final, m_new);
1374		m_new = NULL;
1375	}
1376	m_fixhdr(m_final);
1377	return (m_final);
1378nospace:
1379	if (m_new)
1380		m_free(m_new);
1381	if (m_final)
1382		m_freem(m_final);
1383	return (NULL);
1384}
1385
1386/*
1387 * Set the m_data pointer of a newly-allocated mbuf
1388 * to place an object of the specified size at the
1389 * end of the mbuf, longword aligned.
1390 */
1391void
1392m_align(struct mbuf *m, int len)
1393{
1394	int adjust;
1395
1396	if (m->m_flags & M_EXT)
1397		adjust = m->m_ext.ext_size - len;
1398	else if (m->m_flags & M_PKTHDR)
1399		adjust = MHLEN - len;
1400	else
1401		adjust = MLEN - len;
1402	m->m_data += adjust &~ (sizeof(long)-1);
1403}
1404