uipc_mbuf.c revision 166171
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 166171 2007-01-22 14:50:28Z andre $");
34
35#include "opt_mac.h"
36#include "opt_param.h"
37#include "opt_mbuf_stress_test.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mbuf.h>
46#include <sys/sysctl.h>
47#include <sys/domain.h>
48#include <sys/protosw.h>
49#include <sys/uio.h>
50
51#include <security/mac/mac_framework.h>
52
53int	max_linkhdr;
54int	max_protohdr;
55int	max_hdr;
56int	max_datalen;
57#ifdef MBUF_STRESS_TEST
58int	m_defragpackets;
59int	m_defragbytes;
60int	m_defraguseless;
61int	m_defragfailure;
62int	m_defragrandomfailures;
63#endif
64
65/*
66 * sysctl(8) exported objects
67 */
68SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
69	   &max_linkhdr, 0, "Size of largest link layer header");
70SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
71	   &max_protohdr, 0, "Size of largest protocol layer header");
72SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
73	   &max_hdr, 0, "Size of largest link plus protocol header");
74SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
75	   &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
76#ifdef MBUF_STRESS_TEST
77SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
78	   &m_defragpackets, 0, "");
79SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
80	   &m_defragbytes, 0, "");
81SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
82	   &m_defraguseless, 0, "");
83SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
84	   &m_defragfailure, 0, "");
85SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
86	   &m_defragrandomfailures, 0, "");
87#endif
88
89/*
90 * Allocate a given length worth of mbufs and/or clusters (whatever fits
91 * best) and return a pointer to the top of the allocated chain.  If an
92 * existing mbuf chain is provided, then we will append the new chain
93 * to the existing one but still return the top of the newly allocated
94 * chain.
95 */
96struct mbuf *
97m_getm2(struct mbuf *m, int len, int how, short type, int flags)
98{
99	struct mbuf *mb, *nm = NULL, *mtail = NULL;
100
101	KASSERT(len >= 0, ("%s: len is < 0", __func__));
102
103	/* Validate flags. */
104	flags &= (M_PKTHDR | M_EOR);
105
106	/* Packet header mbuf must be first in chain. */
107	if ((flags & M_PKTHDR) && m != NULL)
108		flags &= ~M_PKTHDR;
109
110	/* Loop and append maximum sized mbufs to the chain tail. */
111	while (len > 0) {
112		if (len > MCLBYTES)
113			mb = m_getjcl(how, type, (flags & M_PKTHDR),
114			    MJUMPAGESIZE);
115		else if (len >= MINCLSIZE)
116			mb = m_getcl(how, type, (flags & M_PKTHDR));
117		else if (flags & M_PKTHDR)
118			mb = m_gethdr(how, type);
119		else
120			mb = m_get(how, type);
121
122		/* Fail the whole operation if one mbuf can't be allocated. */
123		if (mb == NULL) {
124			if (nm != NULL)
125				m_freem(nm);
126			return (NULL);
127		}
128
129		/* Book keeping. */
130		len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
131			((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
132		if (mtail != NULL)
133			mtail->m_next = mb;
134		else
135			nm = mb;
136		mtail = mb;
137		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
138	}
139	if (flags & M_EOR)
140		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
141
142	/* If mbuf was supplied, append new chain to the end of it. */
143	if (m != NULL) {
144		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
145			;
146		mtail->m_next = nm;
147		mtail->m_flags &= ~M_EOR;
148	} else
149		m = nm;
150
151	return (m);
152}
153
154/*
155 * Free an entire chain of mbufs and associated external buffers, if
156 * applicable.
157 */
158void
159m_freem(struct mbuf *mb)
160{
161
162	while (mb != NULL)
163		mb = m_free(mb);
164}
165
166/*-
167 * Configure a provided mbuf to refer to the provided external storage
168 * buffer and setup a reference count for said buffer.  If the setting
169 * up of the reference count fails, the M_EXT bit will not be set.  If
170 * successfull, the M_EXT bit is set in the mbuf's flags.
171 *
172 * Arguments:
173 *    mb     The existing mbuf to which to attach the provided buffer.
174 *    buf    The address of the provided external storage buffer.
175 *    size   The size of the provided buffer.
176 *    freef  A pointer to a routine that is responsible for freeing the
177 *           provided external storage buffer.
178 *    args   A pointer to an argument structure (of any type) to be passed
179 *           to the provided freef routine (may be NULL).
180 *    flags  Any other flags to be passed to the provided mbuf.
181 *    type   The type that the external storage buffer should be
182 *           labeled with.
183 *
184 * Returns:
185 *    Nothing.
186 */
187void
188m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
189    void (*freef)(void *, void *), void *args, int flags, int type)
190{
191	KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
192
193	if (type != EXT_EXTREF)
194		mb->m_ext.ref_cnt = (u_int *)uma_zalloc(zone_ext_refcnt, M_NOWAIT);
195	if (mb->m_ext.ref_cnt != NULL) {
196		*(mb->m_ext.ref_cnt) = 1;
197		mb->m_flags |= (M_EXT | flags);
198		mb->m_ext.ext_buf = buf;
199		mb->m_data = mb->m_ext.ext_buf;
200		mb->m_ext.ext_size = size;
201		mb->m_ext.ext_free = freef;
202		mb->m_ext.ext_args = args;
203		mb->m_ext.ext_type = type;
204        }
205}
206
207/*
208 * Non-directly-exported function to clean up after mbufs with M_EXT
209 * storage attached to them if the reference count hits 1.
210 */
211void
212mb_free_ext(struct mbuf *m)
213{
214	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
215	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
216
217	/* Free attached storage if this mbuf is the only reference to it. */
218	if (*(m->m_ext.ref_cnt) == 1 ||
219	    atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
220		switch (m->m_ext.ext_type) {
221		case EXT_PACKET:	/* The packet zone is special. */
222			if (*(m->m_ext.ref_cnt) == 0)
223				*(m->m_ext.ref_cnt) = 1;
224			uma_zfree(zone_pack, m);
225			return;		/* Job done. */
226		case EXT_CLUSTER:
227			uma_zfree(zone_clust, m->m_ext.ext_buf);
228			break;
229		case EXT_JUMBOP:
230			uma_zfree(zone_jumbop, m->m_ext.ext_buf);
231			break;
232		case EXT_JUMBO9:
233			uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
234			break;
235		case EXT_JUMBO16:
236			uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
237			break;
238		case EXT_SFBUF:
239		case EXT_NET_DRV:
240		case EXT_MOD_TYPE:
241		case EXT_DISPOSABLE:
242			*(m->m_ext.ref_cnt) = 0;
243			uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
244				m->m_ext.ref_cnt));
245			/* FALLTHROUGH */
246		case EXT_EXTREF:
247			KASSERT(m->m_ext.ext_free != NULL,
248				("%s: ext_free not set", __func__));
249			(*(m->m_ext.ext_free))(m->m_ext.ext_buf,
250			    m->m_ext.ext_args);
251			break;
252		default:
253			KASSERT(m->m_ext.ext_type == 0,
254				("%s: unknown ext_type", __func__));
255		}
256	}
257	/*
258	 * Free this mbuf back to the mbuf zone with all m_ext
259	 * information purged.
260	 */
261	m->m_ext.ext_buf = NULL;
262	m->m_ext.ext_free = NULL;
263	m->m_ext.ext_args = NULL;
264	m->m_ext.ref_cnt = NULL;
265	m->m_ext.ext_size = 0;
266	m->m_ext.ext_type = 0;
267	m->m_flags &= ~M_EXT;
268	uma_zfree(zone_mbuf, m);
269}
270
271/*
272 * Attach the the cluster from *m to *n, set up m_ext in *n
273 * and bump the refcount of the cluster.
274 */
275static void
276mb_dupcl(struct mbuf *n, struct mbuf *m)
277{
278	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
279	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
280	KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
281
282	if (*(m->m_ext.ref_cnt) == 1)
283		*(m->m_ext.ref_cnt) += 1;
284	else
285		atomic_add_int(m->m_ext.ref_cnt, 1);
286	n->m_ext.ext_buf = m->m_ext.ext_buf;
287	n->m_ext.ext_free = m->m_ext.ext_free;
288	n->m_ext.ext_args = m->m_ext.ext_args;
289	n->m_ext.ext_size = m->m_ext.ext_size;
290	n->m_ext.ref_cnt = m->m_ext.ref_cnt;
291	n->m_ext.ext_type = m->m_ext.ext_type;
292	n->m_flags |= M_EXT;
293}
294
295/*
296 * Clean up mbuf (chain) from any tags and packet headers.
297 * If "all" is set then the first mbuf in the chain will be
298 * cleaned too.
299 */
300void
301m_demote(struct mbuf *m0, int all)
302{
303	struct mbuf *m;
304
305	for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
306		if (m->m_flags & M_PKTHDR) {
307			m_tag_delete_chain(m, NULL);
308			m->m_flags &= ~M_PKTHDR;
309			bzero(&m->m_pkthdr, sizeof(struct pkthdr));
310		}
311		if (m->m_type == MT_HEADER)
312			m->m_type = MT_DATA;
313		if (m != m0 && m->m_nextpkt != NULL)
314			m->m_nextpkt = NULL;
315		m->m_flags = m->m_flags & (M_EXT|M_EOR|M_RDONLY|M_FREELIST);
316	}
317}
318
319/*
320 * Sanity checks on mbuf (chain) for use in KASSERT() and general
321 * debugging.
322 * Returns 0 or panics when bad and 1 on all tests passed.
323 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
324 * blow up later.
325 */
326int
327m_sanity(struct mbuf *m0, int sanitize)
328{
329	struct mbuf *m;
330	caddr_t a, b;
331	int pktlen = 0;
332
333#define	M_SANITY_ACTION(s)	return (0)
334/* #define	M_SANITY_ACTION(s)	panic("mbuf %p: " s, m) */
335
336	for (m = m0; m != NULL; m = m->m_next) {
337		/*
338		 * Basic pointer checks.  If any of these fails then some
339		 * unrelated kernel memory before or after us is trashed.
340		 * No way to recover from that.
341		 */
342		a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf :
343			((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) :
344			 (caddr_t)(&m->m_dat)) );
345		b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size :
346			((m->m_flags & M_PKTHDR) ? MHLEN : MLEN)));
347		if ((caddr_t)m->m_data < a)
348			M_SANITY_ACTION("m_data outside mbuf data range left");
349		if ((caddr_t)m->m_data > b)
350			M_SANITY_ACTION("m_data outside mbuf data range right");
351		if ((caddr_t)m->m_data + m->m_len > b)
352			M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
353		if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.header) {
354			if ((caddr_t)m->m_pkthdr.header < a ||
355			    (caddr_t)m->m_pkthdr.header > b)
356				M_SANITY_ACTION("m_pkthdr.header outside mbuf data range");
357		}
358
359		/* m->m_nextpkt may only be set on first mbuf in chain. */
360		if (m != m0 && m->m_nextpkt != NULL) {
361			if (sanitize) {
362				m_freem(m->m_nextpkt);
363				m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
364			} else
365				M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
366		}
367
368		/* correct type correlations. */
369		if (m->m_type == MT_HEADER && !(m->m_flags & M_PKTHDR)) {
370			if (sanitize)
371				m->m_type = MT_DATA;
372			else
373				M_SANITY_ACTION("MT_HEADER set but not M_PKTHDR");
374		}
375
376		/* packet length (not mbuf length!) calculation */
377		if (m0->m_flags & M_PKTHDR)
378			pktlen += m->m_len;
379
380		/* m_tags may only be attached to first mbuf in chain. */
381		if (m != m0 && m->m_flags & M_PKTHDR &&
382		    !SLIST_EMPTY(&m->m_pkthdr.tags)) {
383			if (sanitize) {
384				m_tag_delete_chain(m, NULL);
385				/* put in 0xDEADC0DE perhaps? */
386			} else
387				M_SANITY_ACTION("m_tags on in-chain mbuf");
388		}
389
390		/* M_PKTHDR may only be set on first mbuf in chain */
391		if (m != m0 && m->m_flags & M_PKTHDR) {
392			if (sanitize) {
393				bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
394				m->m_flags &= ~M_PKTHDR;
395				/* put in 0xDEADCODE and leave hdr flag in */
396			} else
397				M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
398		}
399	}
400	m = m0;
401	if (pktlen && pktlen != m->m_pkthdr.len) {
402		if (sanitize)
403			m->m_pkthdr.len = 0;
404		else
405			M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
406	}
407	return 1;
408
409#undef	M_SANITY_ACTION
410}
411
412
413/*
414 * "Move" mbuf pkthdr from "from" to "to".
415 * "from" must have M_PKTHDR set, and "to" must be empty.
416 */
417void
418m_move_pkthdr(struct mbuf *to, struct mbuf *from)
419{
420
421#if 0
422	/* see below for why these are not enabled */
423	M_ASSERTPKTHDR(to);
424	/* Note: with MAC, this may not be a good assertion. */
425	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
426	    ("m_move_pkthdr: to has tags"));
427#endif
428#ifdef MAC
429	/*
430	 * XXXMAC: It could be this should also occur for non-MAC?
431	 */
432	if (to->m_flags & M_PKTHDR)
433		m_tag_delete_chain(to, NULL);
434#endif
435	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
436	if ((to->m_flags & M_EXT) == 0)
437		to->m_data = to->m_pktdat;
438	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
439	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
440	from->m_flags &= ~M_PKTHDR;
441}
442
443/*
444 * Duplicate "from"'s mbuf pkthdr in "to".
445 * "from" must have M_PKTHDR set, and "to" must be empty.
446 * In particular, this does a deep copy of the packet tags.
447 */
448int
449m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
450{
451
452#if 0
453	/*
454	 * The mbuf allocator only initializes the pkthdr
455	 * when the mbuf is allocated with MGETHDR. Many users
456	 * (e.g. m_copy*, m_prepend) use MGET and then
457	 * smash the pkthdr as needed causing these
458	 * assertions to trip.  For now just disable them.
459	 */
460	M_ASSERTPKTHDR(to);
461	/* Note: with MAC, this may not be a good assertion. */
462	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
463#endif
464	MBUF_CHECKSLEEP(how);
465#ifdef MAC
466	if (to->m_flags & M_PKTHDR)
467		m_tag_delete_chain(to, NULL);
468#endif
469	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
470	if ((to->m_flags & M_EXT) == 0)
471		to->m_data = to->m_pktdat;
472	to->m_pkthdr = from->m_pkthdr;
473	SLIST_INIT(&to->m_pkthdr.tags);
474	return (m_tag_copy_chain(to, from, MBTOM(how)));
475}
476
477/*
478 * Lesser-used path for M_PREPEND:
479 * allocate new mbuf to prepend to chain,
480 * copy junk along.
481 */
482struct mbuf *
483m_prepend(struct mbuf *m, int len, int how)
484{
485	struct mbuf *mn;
486
487	if (m->m_flags & M_PKTHDR)
488		MGETHDR(mn, how, m->m_type);
489	else
490		MGET(mn, how, m->m_type);
491	if (mn == NULL) {
492		m_freem(m);
493		return (NULL);
494	}
495	if (m->m_flags & M_PKTHDR)
496		M_MOVE_PKTHDR(mn, m);
497	mn->m_next = m;
498	m = mn;
499	if(m->m_flags & M_PKTHDR) {
500		if (len < MHLEN)
501			MH_ALIGN(m, len);
502	} else {
503		if (len < MLEN)
504			M_ALIGN(m, len);
505	}
506	m->m_len = len;
507	return (m);
508}
509
510/*
511 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
512 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
513 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
514 * Note that the copy is read-only, because clusters are not copied,
515 * only their reference counts are incremented.
516 */
517struct mbuf *
518m_copym(struct mbuf *m, int off0, int len, int wait)
519{
520	struct mbuf *n, **np;
521	int off = off0;
522	struct mbuf *top;
523	int copyhdr = 0;
524
525	KASSERT(off >= 0, ("m_copym, negative off %d", off));
526	KASSERT(len >= 0, ("m_copym, negative len %d", len));
527	MBUF_CHECKSLEEP(wait);
528	if (off == 0 && m->m_flags & M_PKTHDR)
529		copyhdr = 1;
530	while (off > 0) {
531		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
532		if (off < m->m_len)
533			break;
534		off -= m->m_len;
535		m = m->m_next;
536	}
537	np = &top;
538	top = 0;
539	while (len > 0) {
540		if (m == NULL) {
541			KASSERT(len == M_COPYALL,
542			    ("m_copym, length > size of mbuf chain"));
543			break;
544		}
545		if (copyhdr)
546			MGETHDR(n, wait, m->m_type);
547		else
548			MGET(n, wait, m->m_type);
549		*np = n;
550		if (n == NULL)
551			goto nospace;
552		if (copyhdr) {
553			if (!m_dup_pkthdr(n, m, wait))
554				goto nospace;
555			if (len == M_COPYALL)
556				n->m_pkthdr.len -= off0;
557			else
558				n->m_pkthdr.len = len;
559			copyhdr = 0;
560		}
561		n->m_len = min(len, m->m_len - off);
562		if (m->m_flags & M_EXT) {
563			n->m_data = m->m_data + off;
564			mb_dupcl(n, m);
565		} else
566			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
567			    (u_int)n->m_len);
568		if (len != M_COPYALL)
569			len -= n->m_len;
570		off = 0;
571		m = m->m_next;
572		np = &n->m_next;
573	}
574	if (top == NULL)
575		mbstat.m_mcfail++;	/* XXX: No consistency. */
576
577	return (top);
578nospace:
579	m_freem(top);
580	mbstat.m_mcfail++;	/* XXX: No consistency. */
581	return (NULL);
582}
583
584/*
585 * Returns mbuf chain with new head for the prepending case.
586 * Copies from mbuf (chain) n from off for len to mbuf (chain) m
587 * either prepending or appending the data.
588 * The resulting mbuf (chain) m is fully writeable.
589 * m is destination (is made writeable)
590 * n is source, off is offset in source, len is len from offset
591 * dir, 0 append, 1 prepend
592 * how, wait or nowait
593 */
594
595static int
596m_bcopyxxx(void *s, void *t, u_int len)
597{
598	bcopy(s, t, (size_t)len);
599	return 0;
600}
601
602struct mbuf *
603m_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
604    int prep, int how)
605{
606	struct mbuf *mm, *x, *z, *prev = NULL;
607	caddr_t p;
608	int i, nlen = 0;
609	caddr_t buf[MLEN];
610
611	KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
612	KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
613	KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
614	KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
615
616	mm = m;
617	if (!prep) {
618		while(mm->m_next) {
619			prev = mm;
620			mm = mm->m_next;
621		}
622	}
623	for (z = n; z != NULL; z = z->m_next)
624		nlen += z->m_len;
625	if (len == M_COPYALL)
626		len = nlen - off;
627	if (off + len > nlen || len < 1)
628		return NULL;
629
630	if (!M_WRITABLE(mm)) {
631		/* XXX: Use proper m_xxx function instead. */
632		x = m_getcl(how, MT_DATA, mm->m_flags);
633		if (x == NULL)
634			return NULL;
635		bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
636		p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
637		x->m_data = p;
638		mm->m_next = NULL;
639		if (mm != m)
640			prev->m_next = x;
641		m_free(mm);
642		mm = x;
643	}
644
645	/*
646	 * Append/prepend the data.  Allocating mbufs as necessary.
647	 */
648	/* Shortcut if enough free space in first/last mbuf. */
649	if (!prep && M_TRAILINGSPACE(mm) >= len) {
650		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
651			 mm->m_len);
652		mm->m_len += len;
653		mm->m_pkthdr.len += len;
654		return m;
655	}
656	if (prep && M_LEADINGSPACE(mm) >= len) {
657		mm->m_data = mtod(mm, caddr_t) - len;
658		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t));
659		mm->m_len += len;
660		mm->m_pkthdr.len += len;
661		return mm;
662	}
663
664	/* Expand first/last mbuf to cluster if possible. */
665	if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) {
666		bcopy(mm->m_data, &buf, mm->m_len);
667		m_clget(mm, how);
668		if (!(mm->m_flags & M_EXT))
669			return NULL;
670		bcopy(&buf, mm->m_ext.ext_buf, mm->m_len);
671		mm->m_data = mm->m_ext.ext_buf;
672		mm->m_pkthdr.header = NULL;
673	}
674	if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) {
675		bcopy(mm->m_data, &buf, mm->m_len);
676		m_clget(mm, how);
677		if (!(mm->m_flags & M_EXT))
678			return NULL;
679		bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf +
680		       mm->m_ext.ext_size - mm->m_len, mm->m_len);
681		mm->m_data = (caddr_t)mm->m_ext.ext_buf +
682			      mm->m_ext.ext_size - mm->m_len;
683		mm->m_pkthdr.header = NULL;
684	}
685
686	/* Append/prepend as many mbuf (clusters) as necessary to fit len. */
687	if (!prep && len > M_TRAILINGSPACE(mm)) {
688		if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA))
689			return NULL;
690	}
691	if (prep && len > M_LEADINGSPACE(mm)) {
692		if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA)))
693			return NULL;
694		i = 0;
695		for (x = z; x != NULL; x = x->m_next) {
696			i += x->m_flags & M_EXT ? x->m_ext.ext_size :
697			      (x->m_flags & M_PKTHDR ? MHLEN : MLEN);
698			if (!x->m_next)
699				break;
700		}
701		z->m_data += i - len;
702		m_move_pkthdr(mm, z);
703		x->m_next = mm;
704		mm = z;
705	}
706
707	/* Seek to start position in source mbuf. Optimization for long chains. */
708	while (off > 0) {
709		if (off < n->m_len)
710			break;
711		off -= n->m_len;
712		n = n->m_next;
713	}
714
715	/* Copy data into target mbuf. */
716	z = mm;
717	while (len > 0) {
718		KASSERT(z != NULL, ("m_copymdata, falling off target edge"));
719		i = M_TRAILINGSPACE(z);
720		m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len);
721		z->m_len += i;
722		/* fixup pkthdr.len if necessary */
723		if ((prep ? mm : m)->m_flags & M_PKTHDR)
724			(prep ? mm : m)->m_pkthdr.len += i;
725		off += i;
726		len -= i;
727		z = z->m_next;
728	}
729	return (prep ? mm : m);
730}
731
732/*
733 * Copy an entire packet, including header (which must be present).
734 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
735 * Note that the copy is read-only, because clusters are not copied,
736 * only their reference counts are incremented.
737 * Preserve alignment of the first mbuf so if the creator has left
738 * some room at the beginning (e.g. for inserting protocol headers)
739 * the copies still have the room available.
740 */
741struct mbuf *
742m_copypacket(struct mbuf *m, int how)
743{
744	struct mbuf *top, *n, *o;
745
746	MBUF_CHECKSLEEP(how);
747	MGET(n, how, m->m_type);
748	top = n;
749	if (n == NULL)
750		goto nospace;
751
752	if (!m_dup_pkthdr(n, m, how))
753		goto nospace;
754	n->m_len = m->m_len;
755	if (m->m_flags & M_EXT) {
756		n->m_data = m->m_data;
757		mb_dupcl(n, m);
758	} else {
759		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
760		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
761	}
762
763	m = m->m_next;
764	while (m) {
765		MGET(o, how, m->m_type);
766		if (o == NULL)
767			goto nospace;
768
769		n->m_next = o;
770		n = n->m_next;
771
772		n->m_len = m->m_len;
773		if (m->m_flags & M_EXT) {
774			n->m_data = m->m_data;
775			mb_dupcl(n, m);
776		} else {
777			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
778		}
779
780		m = m->m_next;
781	}
782	return top;
783nospace:
784	m_freem(top);
785	mbstat.m_mcfail++;	/* XXX: No consistency. */
786	return (NULL);
787}
788
789/*
790 * Copy data from an mbuf chain starting "off" bytes from the beginning,
791 * continuing for "len" bytes, into the indicated buffer.
792 */
793void
794m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
795{
796	u_int count;
797
798	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
799	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
800	while (off > 0) {
801		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
802		if (off < m->m_len)
803			break;
804		off -= m->m_len;
805		m = m->m_next;
806	}
807	while (len > 0) {
808		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
809		count = min(m->m_len - off, len);
810		bcopy(mtod(m, caddr_t) + off, cp, count);
811		len -= count;
812		cp += count;
813		off = 0;
814		m = m->m_next;
815	}
816}
817
818/*
819 * Copy a packet header mbuf chain into a completely new chain, including
820 * copying any mbuf clusters.  Use this instead of m_copypacket() when
821 * you need a writable copy of an mbuf chain.
822 */
823struct mbuf *
824m_dup(struct mbuf *m, int how)
825{
826	struct mbuf **p, *top = NULL;
827	int remain, moff, nsize;
828
829	MBUF_CHECKSLEEP(how);
830	/* Sanity check */
831	if (m == NULL)
832		return (NULL);
833	M_ASSERTPKTHDR(m);
834
835	/* While there's more data, get a new mbuf, tack it on, and fill it */
836	remain = m->m_pkthdr.len;
837	moff = 0;
838	p = &top;
839	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
840		struct mbuf *n;
841
842		/* Get the next new mbuf */
843		if (remain >= MINCLSIZE) {
844			n = m_getcl(how, m->m_type, 0);
845			nsize = MCLBYTES;
846		} else {
847			n = m_get(how, m->m_type);
848			nsize = MLEN;
849		}
850		if (n == NULL)
851			goto nospace;
852
853		if (top == NULL) {		/* First one, must be PKTHDR */
854			if (!m_dup_pkthdr(n, m, how)) {
855				m_free(n);
856				goto nospace;
857			}
858			if ((n->m_flags & M_EXT) == 0)
859				nsize = MHLEN;
860		}
861		n->m_len = 0;
862
863		/* Link it into the new chain */
864		*p = n;
865		p = &n->m_next;
866
867		/* Copy data from original mbuf(s) into new mbuf */
868		while (n->m_len < nsize && m != NULL) {
869			int chunk = min(nsize - n->m_len, m->m_len - moff);
870
871			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
872			moff += chunk;
873			n->m_len += chunk;
874			remain -= chunk;
875			if (moff == m->m_len) {
876				m = m->m_next;
877				moff = 0;
878			}
879		}
880
881		/* Check correct total mbuf length */
882		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
883		    	("%s: bogus m_pkthdr.len", __func__));
884	}
885	return (top);
886
887nospace:
888	m_freem(top);
889	mbstat.m_mcfail++;	/* XXX: No consistency. */
890	return (NULL);
891}
892
893/*
894 * Concatenate mbuf chain n to m.
895 * Both chains must be of the same type (e.g. MT_DATA).
896 * Any m_pkthdr is not updated.
897 */
898void
899m_cat(struct mbuf *m, struct mbuf *n)
900{
901	while (m->m_next)
902		m = m->m_next;
903	while (n) {
904		if (m->m_flags & M_EXT ||
905		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
906			/* just join the two chains */
907			m->m_next = n;
908			return;
909		}
910		/* splat the data from one into the other */
911		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
912		    (u_int)n->m_len);
913		m->m_len += n->m_len;
914		n = m_free(n);
915	}
916}
917
918void
919m_adj(struct mbuf *mp, int req_len)
920{
921	int len = req_len;
922	struct mbuf *m;
923	int count;
924
925	if ((m = mp) == NULL)
926		return;
927	if (len >= 0) {
928		/*
929		 * Trim from head.
930		 */
931		while (m != NULL && len > 0) {
932			if (m->m_len <= len) {
933				len -= m->m_len;
934				m->m_len = 0;
935				m = m->m_next;
936			} else {
937				m->m_len -= len;
938				m->m_data += len;
939				len = 0;
940			}
941		}
942		m = mp;
943		if (mp->m_flags & M_PKTHDR)
944			m->m_pkthdr.len -= (req_len - len);
945	} else {
946		/*
947		 * Trim from tail.  Scan the mbuf chain,
948		 * calculating its length and finding the last mbuf.
949		 * If the adjustment only affects this mbuf, then just
950		 * adjust and return.  Otherwise, rescan and truncate
951		 * after the remaining size.
952		 */
953		len = -len;
954		count = 0;
955		for (;;) {
956			count += m->m_len;
957			if (m->m_next == (struct mbuf *)0)
958				break;
959			m = m->m_next;
960		}
961		if (m->m_len >= len) {
962			m->m_len -= len;
963			if (mp->m_flags & M_PKTHDR)
964				mp->m_pkthdr.len -= len;
965			return;
966		}
967		count -= len;
968		if (count < 0)
969			count = 0;
970		/*
971		 * Correct length for chain is "count".
972		 * Find the mbuf with last data, adjust its length,
973		 * and toss data from remaining mbufs on chain.
974		 */
975		m = mp;
976		if (m->m_flags & M_PKTHDR)
977			m->m_pkthdr.len = count;
978		for (; m; m = m->m_next) {
979			if (m->m_len >= count) {
980				m->m_len = count;
981				if (m->m_next != NULL) {
982					m_freem(m->m_next);
983					m->m_next = NULL;
984				}
985				break;
986			}
987			count -= m->m_len;
988		}
989	}
990}
991
992/*
993 * Rearange an mbuf chain so that len bytes are contiguous
994 * and in the data area of an mbuf (so that mtod and dtom
995 * will work for a structure of size len).  Returns the resulting
996 * mbuf chain on success, frees it and returns null on failure.
997 * If there is room, it will add up to max_protohdr-len extra bytes to the
998 * contiguous region in an attempt to avoid being called next time.
999 */
1000struct mbuf *
1001m_pullup(struct mbuf *n, int len)
1002{
1003	struct mbuf *m;
1004	int count;
1005	int space;
1006
1007	/*
1008	 * If first mbuf has no cluster, and has room for len bytes
1009	 * without shifting current data, pullup into it,
1010	 * otherwise allocate a new mbuf to prepend to the chain.
1011	 */
1012	if ((n->m_flags & M_EXT) == 0 &&
1013	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1014		if (n->m_len >= len)
1015			return (n);
1016		m = n;
1017		n = n->m_next;
1018		len -= m->m_len;
1019	} else {
1020		if (len > MHLEN)
1021			goto bad;
1022		MGET(m, M_DONTWAIT, n->m_type);
1023		if (m == NULL)
1024			goto bad;
1025		m->m_len = 0;
1026		if (n->m_flags & M_PKTHDR)
1027			M_MOVE_PKTHDR(m, n);
1028	}
1029	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1030	do {
1031		count = min(min(max(len, max_protohdr), space), n->m_len);
1032		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1033		  (u_int)count);
1034		len -= count;
1035		m->m_len += count;
1036		n->m_len -= count;
1037		space -= count;
1038		if (n->m_len)
1039			n->m_data += count;
1040		else
1041			n = m_free(n);
1042	} while (len > 0 && n);
1043	if (len > 0) {
1044		(void) m_free(m);
1045		goto bad;
1046	}
1047	m->m_next = n;
1048	return (m);
1049bad:
1050	m_freem(n);
1051	mbstat.m_mpfail++;	/* XXX: No consistency. */
1052	return (NULL);
1053}
1054
1055/*
1056 * Like m_pullup(), except a new mbuf is always allocated, and we allow
1057 * the amount of empty space before the data in the new mbuf to be specified
1058 * (in the event that the caller expects to prepend later).
1059 */
1060int MSFail;
1061
1062struct mbuf *
1063m_copyup(struct mbuf *n, int len, int dstoff)
1064{
1065	struct mbuf *m;
1066	int count, space;
1067
1068	if (len > (MHLEN - dstoff))
1069		goto bad;
1070	MGET(m, M_DONTWAIT, n->m_type);
1071	if (m == NULL)
1072		goto bad;
1073	m->m_len = 0;
1074	if (n->m_flags & M_PKTHDR)
1075		M_MOVE_PKTHDR(m, n);
1076	m->m_data += dstoff;
1077	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1078	do {
1079		count = min(min(max(len, max_protohdr), space), n->m_len);
1080		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1081		    (unsigned)count);
1082		len -= count;
1083		m->m_len += count;
1084		n->m_len -= count;
1085		space -= count;
1086		if (n->m_len)
1087			n->m_data += count;
1088		else
1089			n = m_free(n);
1090	} while (len > 0 && n);
1091	if (len > 0) {
1092		(void) m_free(m);
1093		goto bad;
1094	}
1095	m->m_next = n;
1096	return (m);
1097 bad:
1098	m_freem(n);
1099	MSFail++;
1100	return (NULL);
1101}
1102
1103/*
1104 * Partition an mbuf chain in two pieces, returning the tail --
1105 * all but the first len0 bytes.  In case of failure, it returns NULL and
1106 * attempts to restore the chain to its original state.
1107 *
1108 * Note that the resulting mbufs might be read-only, because the new
1109 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1110 * the "breaking point" happens to lie within a cluster mbuf. Use the
1111 * M_WRITABLE() macro to check for this case.
1112 */
1113struct mbuf *
1114m_split(struct mbuf *m0, int len0, int wait)
1115{
1116	struct mbuf *m, *n;
1117	u_int len = len0, remain;
1118
1119	MBUF_CHECKSLEEP(wait);
1120	for (m = m0; m && len > m->m_len; m = m->m_next)
1121		len -= m->m_len;
1122	if (m == NULL)
1123		return (NULL);
1124	remain = m->m_len - len;
1125	if (m0->m_flags & M_PKTHDR) {
1126		MGETHDR(n, wait, m0->m_type);
1127		if (n == NULL)
1128			return (NULL);
1129		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1130		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1131		m0->m_pkthdr.len = len0;
1132		if (m->m_flags & M_EXT)
1133			goto extpacket;
1134		if (remain > MHLEN) {
1135			/* m can't be the lead packet */
1136			MH_ALIGN(n, 0);
1137			n->m_next = m_split(m, len, wait);
1138			if (n->m_next == NULL) {
1139				(void) m_free(n);
1140				return (NULL);
1141			} else {
1142				n->m_len = 0;
1143				return (n);
1144			}
1145		} else
1146			MH_ALIGN(n, remain);
1147	} else if (remain == 0) {
1148		n = m->m_next;
1149		m->m_next = NULL;
1150		return (n);
1151	} else {
1152		MGET(n, wait, m->m_type);
1153		if (n == NULL)
1154			return (NULL);
1155		M_ALIGN(n, remain);
1156	}
1157extpacket:
1158	if (m->m_flags & M_EXT) {
1159		n->m_data = m->m_data + len;
1160		mb_dupcl(n, m);
1161	} else {
1162		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1163	}
1164	n->m_len = remain;
1165	m->m_len = len;
1166	n->m_next = m->m_next;
1167	m->m_next = NULL;
1168	return (n);
1169}
1170/*
1171 * Routine to copy from device local memory into mbufs.
1172 * Note that `off' argument is offset into first mbuf of target chain from
1173 * which to begin copying the data to.
1174 */
1175struct mbuf *
1176m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1177	 void (*copy)(char *from, caddr_t to, u_int len))
1178{
1179	struct mbuf *m;
1180	struct mbuf *top = NULL, **mp = &top;
1181	int len;
1182
1183	if (off < 0 || off > MHLEN)
1184		return (NULL);
1185
1186	while (totlen > 0) {
1187		if (top == NULL) {	/* First one, must be PKTHDR */
1188			if (totlen + off >= MINCLSIZE) {
1189				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1190				len = MCLBYTES;
1191			} else {
1192				m = m_gethdr(M_DONTWAIT, MT_DATA);
1193				len = MHLEN;
1194
1195				/* Place initial small packet/header at end of mbuf */
1196				if (m && totlen + off + max_linkhdr <= MLEN) {
1197					m->m_data += max_linkhdr;
1198					len -= max_linkhdr;
1199				}
1200			}
1201			if (m == NULL)
1202				return NULL;
1203			m->m_pkthdr.rcvif = ifp;
1204			m->m_pkthdr.len = totlen;
1205		} else {
1206			if (totlen + off >= MINCLSIZE) {
1207				m = m_getcl(M_DONTWAIT, MT_DATA, 0);
1208				len = MCLBYTES;
1209			} else {
1210				m = m_get(M_DONTWAIT, MT_DATA);
1211				len = MLEN;
1212			}
1213			if (m == NULL) {
1214				m_freem(top);
1215				return NULL;
1216			}
1217		}
1218		if (off) {
1219			m->m_data += off;
1220			len -= off;
1221			off = 0;
1222		}
1223		m->m_len = len = min(totlen, len);
1224		if (copy)
1225			copy(buf, mtod(m, caddr_t), (u_int)len);
1226		else
1227			bcopy(buf, mtod(m, caddr_t), (u_int)len);
1228		buf += len;
1229		*mp = m;
1230		mp = &m->m_next;
1231		totlen -= len;
1232	}
1233	return (top);
1234}
1235
1236/*
1237 * Copy data from a buffer back into the indicated mbuf chain,
1238 * starting "off" bytes from the beginning, extending the mbuf
1239 * chain if necessary.
1240 */
1241void
1242m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1243{
1244	int mlen;
1245	struct mbuf *m = m0, *n;
1246	int totlen = 0;
1247
1248	if (m0 == NULL)
1249		return;
1250	while (off > (mlen = m->m_len)) {
1251		off -= mlen;
1252		totlen += mlen;
1253		if (m->m_next == NULL) {
1254			n = m_get(M_DONTWAIT, m->m_type);
1255			if (n == NULL)
1256				goto out;
1257			bzero(mtod(n, caddr_t), MLEN);
1258			n->m_len = min(MLEN, len + off);
1259			m->m_next = n;
1260		}
1261		m = m->m_next;
1262	}
1263	while (len > 0) {
1264		mlen = min (m->m_len - off, len);
1265		bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1266		cp += mlen;
1267		len -= mlen;
1268		mlen += off;
1269		off = 0;
1270		totlen += mlen;
1271		if (len == 0)
1272			break;
1273		if (m->m_next == NULL) {
1274			n = m_get(M_DONTWAIT, m->m_type);
1275			if (n == NULL)
1276				break;
1277			n->m_len = min(MLEN, len);
1278			m->m_next = n;
1279		}
1280		m = m->m_next;
1281	}
1282out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1283		m->m_pkthdr.len = totlen;
1284}
1285
1286/*
1287 * Append the specified data to the indicated mbuf chain,
1288 * Extend the mbuf chain if the new data does not fit in
1289 * existing space.
1290 *
1291 * Return 1 if able to complete the job; otherwise 0.
1292 */
1293int
1294m_append(struct mbuf *m0, int len, c_caddr_t cp)
1295{
1296	struct mbuf *m, *n;
1297	int remainder, space;
1298
1299	for (m = m0; m->m_next != NULL; m = m->m_next)
1300		;
1301	remainder = len;
1302	space = M_TRAILINGSPACE(m);
1303	if (space > 0) {
1304		/*
1305		 * Copy into available space.
1306		 */
1307		if (space > remainder)
1308			space = remainder;
1309		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1310		m->m_len += space;
1311		cp += space, remainder -= space;
1312	}
1313	while (remainder > 0) {
1314		/*
1315		 * Allocate a new mbuf; could check space
1316		 * and allocate a cluster instead.
1317		 */
1318		n = m_get(M_DONTWAIT, m->m_type);
1319		if (n == NULL)
1320			break;
1321		n->m_len = min(MLEN, remainder);
1322		bcopy(cp, mtod(n, caddr_t), n->m_len);
1323		cp += n->m_len, remainder -= n->m_len;
1324		m->m_next = n;
1325		m = n;
1326	}
1327	if (m0->m_flags & M_PKTHDR)
1328		m0->m_pkthdr.len += len - remainder;
1329	return (remainder == 0);
1330}
1331
1332/*
1333 * Apply function f to the data in an mbuf chain starting "off" bytes from
1334 * the beginning, continuing for "len" bytes.
1335 */
1336int
1337m_apply(struct mbuf *m, int off, int len,
1338    int (*f)(void *, void *, u_int), void *arg)
1339{
1340	u_int count;
1341	int rval;
1342
1343	KASSERT(off >= 0, ("m_apply, negative off %d", off));
1344	KASSERT(len >= 0, ("m_apply, negative len %d", len));
1345	while (off > 0) {
1346		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1347		if (off < m->m_len)
1348			break;
1349		off -= m->m_len;
1350		m = m->m_next;
1351	}
1352	while (len > 0) {
1353		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1354		count = min(m->m_len - off, len);
1355		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1356		if (rval)
1357			return (rval);
1358		len -= count;
1359		off = 0;
1360		m = m->m_next;
1361	}
1362	return (0);
1363}
1364
1365/*
1366 * Return a pointer to mbuf/offset of location in mbuf chain.
1367 */
1368struct mbuf *
1369m_getptr(struct mbuf *m, int loc, int *off)
1370{
1371
1372	while (loc >= 0) {
1373		/* Normal end of search. */
1374		if (m->m_len > loc) {
1375			*off = loc;
1376			return (m);
1377		} else {
1378			loc -= m->m_len;
1379			if (m->m_next == NULL) {
1380				if (loc == 0) {
1381					/* Point at the end of valid data. */
1382					*off = m->m_len;
1383					return (m);
1384				}
1385				return (NULL);
1386			}
1387			m = m->m_next;
1388		}
1389	}
1390	return (NULL);
1391}
1392
1393void
1394m_print(const struct mbuf *m, int maxlen)
1395{
1396	int len;
1397	int pdata;
1398	const struct mbuf *m2;
1399
1400	if (m->m_flags & M_PKTHDR)
1401		len = m->m_pkthdr.len;
1402	else
1403		len = -1;
1404	m2 = m;
1405	while (m2 != NULL && (len == -1 || len)) {
1406		pdata = m2->m_len;
1407		if (maxlen != -1 && pdata > maxlen)
1408			pdata = maxlen;
1409		printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1410		    m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1411		    "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1412		    "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1413		if (pdata)
1414			printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1415		if (len != -1)
1416			len -= m2->m_len;
1417		m2 = m2->m_next;
1418	}
1419	if (len > 0)
1420		printf("%d bytes unaccounted for.\n", len);
1421	return;
1422}
1423
1424u_int
1425m_fixhdr(struct mbuf *m0)
1426{
1427	u_int len;
1428
1429	len = m_length(m0, NULL);
1430	m0->m_pkthdr.len = len;
1431	return (len);
1432}
1433
1434u_int
1435m_length(struct mbuf *m0, struct mbuf **last)
1436{
1437	struct mbuf *m;
1438	u_int len;
1439
1440	len = 0;
1441	for (m = m0; m != NULL; m = m->m_next) {
1442		len += m->m_len;
1443		if (m->m_next == NULL)
1444			break;
1445	}
1446	if (last != NULL)
1447		*last = m;
1448	return (len);
1449}
1450
1451/*
1452 * Defragment a mbuf chain, returning the shortest possible
1453 * chain of mbufs and clusters.  If allocation fails and
1454 * this cannot be completed, NULL will be returned, but
1455 * the passed in chain will be unchanged.  Upon success,
1456 * the original chain will be freed, and the new chain
1457 * will be returned.
1458 *
1459 * If a non-packet header is passed in, the original
1460 * mbuf (chain?) will be returned unharmed.
1461 */
1462struct mbuf *
1463m_defrag(struct mbuf *m0, int how)
1464{
1465	struct mbuf *m_new = NULL, *m_final = NULL;
1466	int progress = 0, length;
1467
1468	MBUF_CHECKSLEEP(how);
1469	if (!(m0->m_flags & M_PKTHDR))
1470		return (m0);
1471
1472	m_fixhdr(m0); /* Needed sanity check */
1473
1474#ifdef MBUF_STRESS_TEST
1475	if (m_defragrandomfailures) {
1476		int temp = arc4random() & 0xff;
1477		if (temp == 0xba)
1478			goto nospace;
1479	}
1480#endif
1481
1482	if (m0->m_pkthdr.len > MHLEN)
1483		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1484	else
1485		m_final = m_gethdr(how, MT_DATA);
1486
1487	if (m_final == NULL)
1488		goto nospace;
1489
1490	if (m_dup_pkthdr(m_final, m0, how) == 0)
1491		goto nospace;
1492
1493	m_new = m_final;
1494
1495	while (progress < m0->m_pkthdr.len) {
1496		length = m0->m_pkthdr.len - progress;
1497		if (length > MCLBYTES)
1498			length = MCLBYTES;
1499
1500		if (m_new == NULL) {
1501			if (length > MLEN)
1502				m_new = m_getcl(how, MT_DATA, 0);
1503			else
1504				m_new = m_get(how, MT_DATA);
1505			if (m_new == NULL)
1506				goto nospace;
1507		}
1508
1509		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1510		progress += length;
1511		m_new->m_len = length;
1512		if (m_new != m_final)
1513			m_cat(m_final, m_new);
1514		m_new = NULL;
1515	}
1516#ifdef MBUF_STRESS_TEST
1517	if (m0->m_next == NULL)
1518		m_defraguseless++;
1519#endif
1520	m_freem(m0);
1521	m0 = m_final;
1522#ifdef MBUF_STRESS_TEST
1523	m_defragpackets++;
1524	m_defragbytes += m0->m_pkthdr.len;
1525#endif
1526	return (m0);
1527nospace:
1528#ifdef MBUF_STRESS_TEST
1529	m_defragfailure++;
1530#endif
1531	if (m_final)
1532		m_freem(m_final);
1533	return (NULL);
1534}
1535
1536#ifdef MBUF_STRESS_TEST
1537
1538/*
1539 * Fragment an mbuf chain.  There's no reason you'd ever want to do
1540 * this in normal usage, but it's great for stress testing various
1541 * mbuf consumers.
1542 *
1543 * If fragmentation is not possible, the original chain will be
1544 * returned.
1545 *
1546 * Possible length values:
1547 * 0	 no fragmentation will occur
1548 * > 0	each fragment will be of the specified length
1549 * -1	each fragment will be the same random value in length
1550 * -2	each fragment's length will be entirely random
1551 * (Random values range from 1 to 256)
1552 */
1553struct mbuf *
1554m_fragment(struct mbuf *m0, int how, int length)
1555{
1556	struct mbuf *m_new = NULL, *m_final = NULL;
1557	int progress = 0;
1558
1559	if (!(m0->m_flags & M_PKTHDR))
1560		return (m0);
1561
1562	if ((length == 0) || (length < -2))
1563		return (m0);
1564
1565	m_fixhdr(m0); /* Needed sanity check */
1566
1567	m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1568
1569	if (m_final == NULL)
1570		goto nospace;
1571
1572	if (m_dup_pkthdr(m_final, m0, how) == 0)
1573		goto nospace;
1574
1575	m_new = m_final;
1576
1577	if (length == -1)
1578		length = 1 + (arc4random() & 255);
1579
1580	while (progress < m0->m_pkthdr.len) {
1581		int fraglen;
1582
1583		if (length > 0)
1584			fraglen = length;
1585		else
1586			fraglen = 1 + (arc4random() & 255);
1587		if (fraglen > m0->m_pkthdr.len - progress)
1588			fraglen = m0->m_pkthdr.len - progress;
1589
1590		if (fraglen > MCLBYTES)
1591			fraglen = MCLBYTES;
1592
1593		if (m_new == NULL) {
1594			m_new = m_getcl(how, MT_DATA, 0);
1595			if (m_new == NULL)
1596				goto nospace;
1597		}
1598
1599		m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1600		progress += fraglen;
1601		m_new->m_len = fraglen;
1602		if (m_new != m_final)
1603			m_cat(m_final, m_new);
1604		m_new = NULL;
1605	}
1606	m_freem(m0);
1607	m0 = m_final;
1608	return (m0);
1609nospace:
1610	if (m_final)
1611		m_freem(m_final);
1612	/* Return the original chain on failure */
1613	return (m0);
1614}
1615
1616#endif
1617
1618/*
1619 * Copy the contents of uio into a properly sized mbuf chain.
1620 */
1621struct mbuf *
1622m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1623{
1624	struct mbuf *m, *mb;
1625	int error, length, total;
1626	int progress = 0;
1627
1628	/*
1629	 * len can be zero or an arbitrary large value bound by
1630	 * the total data supplied by the uio.
1631	 */
1632	if (len > 0)
1633		total = min(uio->uio_resid, len);
1634	else
1635		total = uio->uio_resid;
1636
1637	/*
1638	 * The smallest unit returned by m_getm2() is a single mbuf
1639	 * with pkthdr.  We can't align past it.  Align align itself.
1640	 */
1641	if (align)
1642		align &= ~(sizeof(long) - 1);
1643	if (align >= MHLEN)
1644		return (NULL);
1645
1646	/*
1647	 * Give us the full allocation or nothing.
1648	 * If len is zero return the smallest empty mbuf.
1649	 */
1650	m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1651	if (m == NULL)
1652		return (NULL);
1653	m->m_data += align;
1654
1655	/* Fill all mbufs with uio data and update header information. */
1656	for (mb = m; mb != NULL; mb = mb->m_next) {
1657		length = min(M_TRAILINGSPACE(mb), total - progress);
1658
1659		error = uiomove(mtod(mb, void *), length, uio);
1660		if (error) {
1661			m_freem(m);
1662			return (NULL);
1663		}
1664
1665		mb->m_len = length;
1666		progress += length;
1667		if (flags & M_PKTHDR)
1668			m->m_pkthdr.len += length;
1669	}
1670	KASSERT(progress == total, ("%s: progress != total", __func__));
1671
1672	return (m);
1673}
1674
1675/*
1676 * Set the m_data pointer of a newly-allocated mbuf
1677 * to place an object of the specified size at the
1678 * end of the mbuf, longword aligned.
1679 */
1680void
1681m_align(struct mbuf *m, int len)
1682{
1683	int adjust;
1684
1685	if (m->m_flags & M_EXT)
1686		adjust = m->m_ext.ext_size - len;
1687	else if (m->m_flags & M_PKTHDR)
1688		adjust = MHLEN - len;
1689	else
1690		adjust = MLEN - len;
1691	m->m_data += adjust &~ (sizeof(long)-1);
1692}
1693
1694/*
1695 * Create a writable copy of the mbuf chain.  While doing this
1696 * we compact the chain with a goal of producing a chain with
1697 * at most two mbufs.  The second mbuf in this chain is likely
1698 * to be a cluster.  The primary purpose of this work is to create
1699 * a writable packet for encryption, compression, etc.  The
1700 * secondary goal is to linearize the data so the data can be
1701 * passed to crypto hardware in the most efficient manner possible.
1702 */
1703struct mbuf *
1704m_unshare(struct mbuf *m0, int how)
1705{
1706	struct mbuf *m, *mprev;
1707	struct mbuf *n, *mfirst, *mlast;
1708	int len, off;
1709
1710	mprev = NULL;
1711	for (m = m0; m != NULL; m = mprev->m_next) {
1712		/*
1713		 * Regular mbufs are ignored unless there's a cluster
1714		 * in front of it that we can use to coalesce.  We do
1715		 * the latter mainly so later clusters can be coalesced
1716		 * also w/o having to handle them specially (i.e. convert
1717		 * mbuf+cluster -> cluster).  This optimization is heavily
1718		 * influenced by the assumption that we're running over
1719		 * Ethernet where MCLBYTES is large enough that the max
1720		 * packet size will permit lots of coalescing into a
1721		 * single cluster.  This in turn permits efficient
1722		 * crypto operations, especially when using hardware.
1723		 */
1724		if ((m->m_flags & M_EXT) == 0) {
1725			if (mprev && (mprev->m_flags & M_EXT) &&
1726			    m->m_len <= M_TRAILINGSPACE(mprev)) {
1727				/* XXX: this ignores mbuf types */
1728				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1729				       mtod(m, caddr_t), m->m_len);
1730				mprev->m_len += m->m_len;
1731				mprev->m_next = m->m_next;	/* unlink from chain */
1732				m_free(m);			/* reclaim mbuf */
1733#if 0
1734				newipsecstat.ips_mbcoalesced++;
1735#endif
1736			} else {
1737				mprev = m;
1738			}
1739			continue;
1740		}
1741		/*
1742		 * Writable mbufs are left alone (for now).
1743		 */
1744		if (M_WRITABLE(m)) {
1745			mprev = m;
1746			continue;
1747		}
1748
1749		/*
1750		 * Not writable, replace with a copy or coalesce with
1751		 * the previous mbuf if possible (since we have to copy
1752		 * it anyway, we try to reduce the number of mbufs and
1753		 * clusters so that future work is easier).
1754		 */
1755		KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1756		/* NB: we only coalesce into a cluster or larger */
1757		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1758		    m->m_len <= M_TRAILINGSPACE(mprev)) {
1759			/* XXX: this ignores mbuf types */
1760			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1761			       mtod(m, caddr_t), m->m_len);
1762			mprev->m_len += m->m_len;
1763			mprev->m_next = m->m_next;	/* unlink from chain */
1764			m_free(m);			/* reclaim mbuf */
1765#if 0
1766			newipsecstat.ips_clcoalesced++;
1767#endif
1768			continue;
1769		}
1770
1771		/*
1772		 * Allocate new space to hold the copy...
1773		 */
1774		/* XXX why can M_PKTHDR be set past the first mbuf? */
1775		if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
1776			/*
1777			 * NB: if a packet header is present we must
1778			 * allocate the mbuf separately from any cluster
1779			 * because M_MOVE_PKTHDR will smash the data
1780			 * pointer and drop the M_EXT marker.
1781			 */
1782			MGETHDR(n, how, m->m_type);
1783			if (n == NULL) {
1784				m_freem(m0);
1785				return (NULL);
1786			}
1787			M_MOVE_PKTHDR(n, m);
1788			MCLGET(n, how);
1789			if ((n->m_flags & M_EXT) == 0) {
1790				m_free(n);
1791				m_freem(m0);
1792				return (NULL);
1793			}
1794		} else {
1795			n = m_getcl(how, m->m_type, m->m_flags);
1796			if (n == NULL) {
1797				m_freem(m0);
1798				return (NULL);
1799			}
1800		}
1801		/*
1802		 * ... and copy the data.  We deal with jumbo mbufs
1803		 * (i.e. m_len > MCLBYTES) by splitting them into
1804		 * clusters.  We could just malloc a buffer and make
1805		 * it external but too many device drivers don't know
1806		 * how to break up the non-contiguous memory when
1807		 * doing DMA.
1808		 */
1809		len = m->m_len;
1810		off = 0;
1811		mfirst = n;
1812		mlast = NULL;
1813		for (;;) {
1814			int cc = min(len, MCLBYTES);
1815			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1816			n->m_len = cc;
1817			if (mlast != NULL)
1818				mlast->m_next = n;
1819			mlast = n;
1820#if 0
1821			newipsecstat.ips_clcopied++;
1822#endif
1823
1824			len -= cc;
1825			if (len <= 0)
1826				break;
1827			off += cc;
1828
1829			n = m_getcl(how, m->m_type, m->m_flags);
1830			if (n == NULL) {
1831				m_freem(mfirst);
1832				m_freem(m0);
1833				return (NULL);
1834			}
1835		}
1836		n->m_next = m->m_next;
1837		if (mprev == NULL)
1838			m0 = mfirst;		/* new head of chain */
1839		else
1840			mprev->m_next = mfirst;	/* replace old mbuf */
1841		m_free(m);			/* release old mbuf */
1842		mprev = mfirst;
1843	}
1844	return (m0);
1845}
1846