uipc_mbuf.c revision 177599
1139804Simp/*-
21541Srgrimes * Copyright (c) 1982, 1986, 1988, 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes *
51541Srgrimes * Redistribution and use in source and binary forms, with or without
61541Srgrimes * modification, are permitted provided that the following conditions
71541Srgrimes * are met:
81541Srgrimes * 1. Redistributions of source code must retain the above copyright
91541Srgrimes *    notice, this list of conditions and the following disclaimer.
101541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
111541Srgrimes *    notice, this list of conditions and the following disclaimer in the
121541Srgrimes *    documentation and/or other materials provided with the distribution.
131541Srgrimes * 4. Neither the name of the University nor the names of its contributors
141541Srgrimes *    may be used to endorse or promote products derived from this software
151541Srgrimes *    without specific prior written permission.
161541Srgrimes *
171541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
181541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
191541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
201541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
211541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
221541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
231541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
241541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
251541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
261541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
271541Srgrimes * SUCH DAMAGE.
281541Srgrimes *
291541Srgrimes *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
301541Srgrimes */
311541Srgrimes
32116182Sobrien#include <sys/cdefs.h>
33116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 177599 2008-03-25 09:39:02Z ru $");
34116182Sobrien
35101007Srwatson#include "opt_mac.h"
3677572Sobrien#include "opt_param.h"
37113490Ssilby#include "opt_mbuf_stress_test.h"
38101007Srwatson
391541Srgrimes#include <sys/param.h>
401541Srgrimes#include <sys/systm.h>
4176166Smarkm#include <sys/kernel.h>
42125296Ssilby#include <sys/limits.h>
4376166Smarkm#include <sys/lock.h>
4432036Sbde#include <sys/malloc.h>
451541Srgrimes#include <sys/mbuf.h>
4623081Swollman#include <sys/sysctl.h>
471541Srgrimes#include <sys/domain.h>
481541Srgrimes#include <sys/protosw.h>
49125296Ssilby#include <sys/uio.h>
5076166Smarkm
51163606Srwatson#include <security/mac/mac_framework.h>
52163606Srwatson
539759Sbdeint	max_linkhdr;
549759Sbdeint	max_protohdr;
559759Sbdeint	max_hdr;
569759Sbdeint	max_datalen;
57116455Ssilby#ifdef MBUF_STRESS_TEST
58112777Ssilbyint	m_defragpackets;
59112777Ssilbyint	m_defragbytes;
60112777Ssilbyint	m_defraguseless;
61112777Ssilbyint	m_defragfailure;
62113490Ssilbyint	m_defragrandomfailures;
63113490Ssilby#endif
641541Srgrimes
6566475Sbmilekic/*
6666475Sbmilekic * sysctl(8) exported objects
6766475Sbmilekic */
68155820SandreSYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
69155820Sandre	   &max_linkhdr, 0, "Size of largest link layer header");
70155820SandreSYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
71155820Sandre	   &max_protohdr, 0, "Size of largest protocol layer header");
72155820SandreSYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
73155820Sandre	   &max_hdr, 0, "Size of largest link plus protocol header");
74155820SandreSYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
75155820Sandre	   &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
76116455Ssilby#ifdef MBUF_STRESS_TEST
77112777SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
78112777Ssilby	   &m_defragpackets, 0, "");
79112777SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
80112777Ssilby	   &m_defragbytes, 0, "");
81112777SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
82112777Ssilby	   &m_defraguseless, 0, "");
83112777SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
84112777Ssilby	   &m_defragfailure, 0, "");
85113490SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
86113490Ssilby	   &m_defragrandomfailures, 0, "");
87113490Ssilby#endif
8875112Sbmilekic
891541Srgrimes/*
90129906Sbmilekic * Allocate a given length worth of mbufs and/or clusters (whatever fits
91129906Sbmilekic * best) and return a pointer to the top of the allocated chain.  If an
92129906Sbmilekic * existing mbuf chain is provided, then we will append the new chain
93129906Sbmilekic * to the existing one but still return the top of the newly allocated
94129906Sbmilekic * chain.
95129906Sbmilekic */
96129906Sbmilekicstruct mbuf *
97163915Sandrem_getm2(struct mbuf *m, int len, int how, short type, int flags)
98129906Sbmilekic{
99163915Sandre	struct mbuf *mb, *nm = NULL, *mtail = NULL;
100129906Sbmilekic
101163915Sandre	KASSERT(len >= 0, ("%s: len is < 0", __func__));
102129906Sbmilekic
103163915Sandre	/* Validate flags. */
104163915Sandre	flags &= (M_PKTHDR | M_EOR);
105129906Sbmilekic
106163915Sandre	/* Packet header mbuf must be first in chain. */
107163915Sandre	if ((flags & M_PKTHDR) && m != NULL)
108163915Sandre		flags &= ~M_PKTHDR;
109129906Sbmilekic
110163915Sandre	/* Loop and append maximum sized mbufs to the chain tail. */
111163915Sandre	while (len > 0) {
112163915Sandre		if (len > MCLBYTES)
113163915Sandre			mb = m_getjcl(how, type, (flags & M_PKTHDR),
114163915Sandre			    MJUMPAGESIZE);
115163915Sandre		else if (len >= MINCLSIZE)
116163915Sandre			mb = m_getcl(how, type, (flags & M_PKTHDR));
117163915Sandre		else if (flags & M_PKTHDR)
118163915Sandre			mb = m_gethdr(how, type);
119129906Sbmilekic		else
120163915Sandre			mb = m_get(how, type);
121163915Sandre
122163915Sandre		/* Fail the whole operation if one mbuf can't be allocated. */
123163915Sandre		if (mb == NULL) {
124163915Sandre			if (nm != NULL)
125163915Sandre				m_freem(nm);
126163915Sandre			return (NULL);
127163915Sandre		}
128163915Sandre
129163915Sandre		/* Book keeping. */
130163915Sandre		len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
131163915Sandre			((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
132163915Sandre		if (mtail != NULL)
133163915Sandre			mtail->m_next = mb;
134163915Sandre		else
135163915Sandre			nm = mb;
136163915Sandre		mtail = mb;
137163915Sandre		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
138129906Sbmilekic	}
139163915Sandre	if (flags & M_EOR)
140163915Sandre		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
141129906Sbmilekic
142163915Sandre	/* If mbuf was supplied, append new chain to the end of it. */
143163915Sandre	if (m != NULL) {
144163915Sandre		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
145163915Sandre			;
146163915Sandre		mtail->m_next = nm;
147163915Sandre		mtail->m_flags &= ~M_EOR;
148163915Sandre	} else
149163915Sandre		m = nm;
150163915Sandre
151163915Sandre	return (m);
152129906Sbmilekic}
153129906Sbmilekic
154129906Sbmilekic/*
155129906Sbmilekic * Free an entire chain of mbufs and associated external buffers, if
156129906Sbmilekic * applicable.
157129906Sbmilekic */
158129906Sbmilekicvoid
159129906Sbmilekicm_freem(struct mbuf *mb)
160129906Sbmilekic{
161129906Sbmilekic
162129906Sbmilekic	while (mb != NULL)
163129906Sbmilekic		mb = m_free(mb);
164129906Sbmilekic}
165129906Sbmilekic
166129906Sbmilekic/*-
167129906Sbmilekic * Configure a provided mbuf to refer to the provided external storage
168129906Sbmilekic * buffer and setup a reference count for said buffer.  If the setting
169129906Sbmilekic * up of the reference count fails, the M_EXT bit will not be set.  If
170129906Sbmilekic * successfull, the M_EXT bit is set in the mbuf's flags.
171129906Sbmilekic *
172129906Sbmilekic * Arguments:
173129906Sbmilekic *    mb     The existing mbuf to which to attach the provided buffer.
174129906Sbmilekic *    buf    The address of the provided external storage buffer.
175129906Sbmilekic *    size   The size of the provided buffer.
176129906Sbmilekic *    freef  A pointer to a routine that is responsible for freeing the
177129906Sbmilekic *           provided external storage buffer.
178129906Sbmilekic *    args   A pointer to an argument structure (of any type) to be passed
179129906Sbmilekic *           to the provided freef routine (may be NULL).
180129906Sbmilekic *    flags  Any other flags to be passed to the provided mbuf.
181129906Sbmilekic *    type   The type that the external storage buffer should be
182129906Sbmilekic *           labeled with.
183129906Sbmilekic *
184129906Sbmilekic * Returns:
185129906Sbmilekic *    Nothing.
186129906Sbmilekic */
187129906Sbmilekicvoid
188129906Sbmilekicm_extadd(struct mbuf *mb, caddr_t buf, u_int size,
189175872Sphk    void (*freef)(void *, void *), void *arg1, void *arg2, int flags, int type)
190129906Sbmilekic{
191151976Sandre	KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
192129906Sbmilekic
193151976Sandre	if (type != EXT_EXTREF)
194151976Sandre		mb->m_ext.ref_cnt = (u_int *)uma_zalloc(zone_ext_refcnt, M_NOWAIT);
195129906Sbmilekic	if (mb->m_ext.ref_cnt != NULL) {
196129906Sbmilekic		*(mb->m_ext.ref_cnt) = 1;
197129906Sbmilekic		mb->m_flags |= (M_EXT | flags);
198129906Sbmilekic		mb->m_ext.ext_buf = buf;
199129906Sbmilekic		mb->m_data = mb->m_ext.ext_buf;
200129906Sbmilekic		mb->m_ext.ext_size = size;
201129906Sbmilekic		mb->m_ext.ext_free = freef;
202175872Sphk		mb->m_ext.ext_arg1 = arg1;
203175872Sphk		mb->m_ext.ext_arg2 = arg2;
204129906Sbmilekic		mb->m_ext.ext_type = type;
205129906Sbmilekic        }
206129906Sbmilekic}
207129906Sbmilekic
208129906Sbmilekic/*
209129906Sbmilekic * Non-directly-exported function to clean up after mbufs with M_EXT
210151976Sandre * storage attached to them if the reference count hits 1.
211129906Sbmilekic */
212129906Sbmilekicvoid
213129906Sbmilekicmb_free_ext(struct mbuf *m)
214129906Sbmilekic{
215172463Skmacy	int skipmbuf;
216172463Skmacy
217151976Sandre	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
218151976Sandre	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
219129906Sbmilekic
220172463Skmacy
221172463Skmacy	/*
222172463Skmacy	 * check if the header is embedded in the cluster
223172463Skmacy	 */
224172463Skmacy	skipmbuf = (m->m_flags & M_NOFREE);
225172463Skmacy
226151976Sandre	/* Free attached storage if this mbuf is the only reference to it. */
227151976Sandre	if (*(m->m_ext.ref_cnt) == 1 ||
228162515Srrs	    atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
229151976Sandre		switch (m->m_ext.ext_type) {
230152101Sandre		case EXT_PACKET:	/* The packet zone is special. */
231152035Sandre			if (*(m->m_ext.ref_cnt) == 0)
232152035Sandre				*(m->m_ext.ref_cnt) = 1;
233151976Sandre			uma_zfree(zone_pack, m);
234151976Sandre			return;		/* Job done. */
235152101Sandre		case EXT_CLUSTER:
236152101Sandre			uma_zfree(zone_clust, m->m_ext.ext_buf);
237130289Sbmilekic			break;
238155780Sandre		case EXT_JUMBOP:
239155780Sandre			uma_zfree(zone_jumbop, m->m_ext.ext_buf);
240153232Sandre			break;
241151976Sandre		case EXT_JUMBO9:
242151976Sandre			uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
243151976Sandre			break;
244151976Sandre		case EXT_JUMBO16:
245151976Sandre			uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
246151976Sandre			break;
247151976Sandre		case EXT_SFBUF:
248151976Sandre		case EXT_NET_DRV:
249151976Sandre		case EXT_MOD_TYPE:
250151976Sandre		case EXT_DISPOSABLE:
251151976Sandre			*(m->m_ext.ref_cnt) = 0;
252151976Sandre			uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
253151976Sandre				m->m_ext.ref_cnt));
254151976Sandre			/* FALLTHROUGH */
255151976Sandre		case EXT_EXTREF:
256151976Sandre			KASSERT(m->m_ext.ext_free != NULL,
257151976Sandre				("%s: ext_free not set", __func__));
258175872Sphk			(*(m->m_ext.ext_free))(m->m_ext.ext_arg1,
259175872Sphk			    m->m_ext.ext_arg2);
260151976Sandre			break;
261151976Sandre		default:
262151976Sandre			KASSERT(m->m_ext.ext_type == 0,
263151976Sandre				("%s: unknown ext_type", __func__));
264141668Sbmilekic		}
265141668Sbmilekic	}
266172463Skmacy	if (skipmbuf)
267172463Skmacy		return;
268172463Skmacy
269151976Sandre	/*
270151976Sandre	 * Free this mbuf back to the mbuf zone with all m_ext
271151976Sandre	 * information purged.
272151976Sandre	 */
273151976Sandre	m->m_ext.ext_buf = NULL;
274151976Sandre	m->m_ext.ext_free = NULL;
275175872Sphk	m->m_ext.ext_arg1 = NULL;
276175872Sphk	m->m_ext.ext_arg2 = NULL;
277151976Sandre	m->m_ext.ref_cnt = NULL;
278151976Sandre	m->m_ext.ext_size = 0;
279151976Sandre	m->m_ext.ext_type = 0;
280151976Sandre	m->m_flags &= ~M_EXT;
281130357Sbmilekic	uma_zfree(zone_mbuf, m);
282129906Sbmilekic}
283129906Sbmilekic
284129906Sbmilekic/*
285151976Sandre * Attach the the cluster from *m to *n, set up m_ext in *n
286151976Sandre * and bump the refcount of the cluster.
287151976Sandre */
288151976Sandrestatic void
289151976Sandremb_dupcl(struct mbuf *n, struct mbuf *m)
290151976Sandre{
291151976Sandre	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
292151976Sandre	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
293151976Sandre	KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
294151976Sandre
295151976Sandre	if (*(m->m_ext.ref_cnt) == 1)
296151976Sandre		*(m->m_ext.ref_cnt) += 1;
297151976Sandre	else
298151976Sandre		atomic_add_int(m->m_ext.ref_cnt, 1);
299151976Sandre	n->m_ext.ext_buf = m->m_ext.ext_buf;
300151976Sandre	n->m_ext.ext_free = m->m_ext.ext_free;
301175872Sphk	n->m_ext.ext_arg1 = m->m_ext.ext_arg1;
302175872Sphk	n->m_ext.ext_arg2 = m->m_ext.ext_arg2;
303151976Sandre	n->m_ext.ext_size = m->m_ext.ext_size;
304151976Sandre	n->m_ext.ref_cnt = m->m_ext.ref_cnt;
305151976Sandre	n->m_ext.ext_type = m->m_ext.ext_type;
306151976Sandre	n->m_flags |= M_EXT;
307151976Sandre}
308151976Sandre
309151976Sandre/*
310149598Sandre * Clean up mbuf (chain) from any tags and packet headers.
311149647Sandre * If "all" is set then the first mbuf in the chain will be
312149647Sandre * cleaned too.
313149598Sandre */
314149598Sandrevoid
315149647Sandrem_demote(struct mbuf *m0, int all)
316149598Sandre{
317149598Sandre	struct mbuf *m;
318149598Sandre
319149598Sandre	for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
320149598Sandre		if (m->m_flags & M_PKTHDR) {
321149598Sandre			m_tag_delete_chain(m, NULL);
322149598Sandre			m->m_flags &= ~M_PKTHDR;
323149598Sandre			bzero(&m->m_pkthdr, sizeof(struct pkthdr));
324149598Sandre		}
325149643Sandre		if (m->m_type == MT_HEADER)
326149598Sandre			m->m_type = MT_DATA;
327149643Sandre		if (m != m0 && m->m_nextpkt != NULL)
328149598Sandre			m->m_nextpkt = NULL;
329149598Sandre		m->m_flags = m->m_flags & (M_EXT|M_EOR|M_RDONLY|M_FREELIST);
330149598Sandre	}
331149598Sandre}
332149598Sandre
333149598Sandre/*
334149648Sandre * Sanity checks on mbuf (chain) for use in KASSERT() and general
335149648Sandre * debugging.
336149648Sandre * Returns 0 or panics when bad and 1 on all tests passed.
337149648Sandre * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
338149648Sandre * blow up later.
339149599Sandre */
340149599Sandreint
341149599Sandrem_sanity(struct mbuf *m0, int sanitize)
342149599Sandre{
343149599Sandre	struct mbuf *m;
344149599Sandre	caddr_t a, b;
345149599Sandre	int pktlen = 0;
346149599Sandre
347168734Skmacy#ifdef INVARIANTS
348168734Skmacy#define	M_SANITY_ACTION(s)	panic("mbuf %p: " s, m)
349168734Skmacy#else
350168734Skmacy#define	M_SANITY_ACTION(s)	printf("mbuf %p: " s, m)
351168734Skmacy#endif
352149599Sandre
353149648Sandre	for (m = m0; m != NULL; m = m->m_next) {
354149599Sandre		/*
355149599Sandre		 * Basic pointer checks.  If any of these fails then some
356149599Sandre		 * unrelated kernel memory before or after us is trashed.
357149599Sandre		 * No way to recover from that.
358149599Sandre		 */
359149648Sandre		a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf :
360149648Sandre			((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) :
361149599Sandre			 (caddr_t)(&m->m_dat)) );
362149599Sandre		b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size :
363149648Sandre			((m->m_flags & M_PKTHDR) ? MHLEN : MLEN)));
364149599Sandre		if ((caddr_t)m->m_data < a)
365149599Sandre			M_SANITY_ACTION("m_data outside mbuf data range left");
366149599Sandre		if ((caddr_t)m->m_data > b)
367149599Sandre			M_SANITY_ACTION("m_data outside mbuf data range right");
368149599Sandre		if ((caddr_t)m->m_data + m->m_len > b)
369149599Sandre			M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
370149648Sandre		if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.header) {
371149599Sandre			if ((caddr_t)m->m_pkthdr.header < a ||
372149599Sandre			    (caddr_t)m->m_pkthdr.header > b)
373149599Sandre				M_SANITY_ACTION("m_pkthdr.header outside mbuf data range");
374149599Sandre		}
375149599Sandre
376149599Sandre		/* m->m_nextpkt may only be set on first mbuf in chain. */
377149648Sandre		if (m != m0 && m->m_nextpkt != NULL) {
378149599Sandre			if (sanitize) {
379149599Sandre				m_freem(m->m_nextpkt);
380149599Sandre				m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
381149599Sandre			} else
382149599Sandre				M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
383149599Sandre		}
384149599Sandre
385149599Sandre		/* packet length (not mbuf length!) calculation */
386149599Sandre		if (m0->m_flags & M_PKTHDR)
387149599Sandre			pktlen += m->m_len;
388149599Sandre
389149599Sandre		/* m_tags may only be attached to first mbuf in chain. */
390149599Sandre		if (m != m0 && m->m_flags & M_PKTHDR &&
391149599Sandre		    !SLIST_EMPTY(&m->m_pkthdr.tags)) {
392149599Sandre			if (sanitize) {
393149599Sandre				m_tag_delete_chain(m, NULL);
394149599Sandre				/* put in 0xDEADC0DE perhaps? */
395149648Sandre			} else
396149599Sandre				M_SANITY_ACTION("m_tags on in-chain mbuf");
397149599Sandre		}
398149599Sandre
399149599Sandre		/* M_PKTHDR may only be set on first mbuf in chain */
400149599Sandre		if (m != m0 && m->m_flags & M_PKTHDR) {
401149599Sandre			if (sanitize) {
402149599Sandre				bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
403149599Sandre				m->m_flags &= ~M_PKTHDR;
404149599Sandre				/* put in 0xDEADCODE and leave hdr flag in */
405149599Sandre			} else
406149599Sandre				M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
407149599Sandre		}
408149599Sandre	}
409149648Sandre	m = m0;
410149648Sandre	if (pktlen && pktlen != m->m_pkthdr.len) {
411149599Sandre		if (sanitize)
412149648Sandre			m->m_pkthdr.len = 0;
413149599Sandre		else
414149599Sandre			M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
415149599Sandre	}
416149648Sandre	return 1;
417149648Sandre
418149599Sandre#undef	M_SANITY_ACTION
419149599Sandre}
420149599Sandre
421149599Sandre
422149599Sandre/*
423108466Ssam * "Move" mbuf pkthdr from "from" to "to".
424100960Srwatson * "from" must have M_PKTHDR set, and "to" must be empty.
425100960Srwatson */
426100960Srwatsonvoid
427108466Ssamm_move_pkthdr(struct mbuf *to, struct mbuf *from)
428100960Srwatson{
429100960Srwatson
430100960Srwatson#if 0
431108466Ssam	/* see below for why these are not enabled */
432113255Sdes	M_ASSERTPKTHDR(to);
433113487Srwatson	/* Note: with MAC, this may not be a good assertion. */
434108466Ssam	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
435108466Ssam	    ("m_move_pkthdr: to has tags"));
436100960Srwatson#endif
437101007Srwatson#ifdef MAC
438113487Srwatson	/*
439113487Srwatson	 * XXXMAC: It could be this should also occur for non-MAC?
440113487Srwatson	 */
441101007Srwatson	if (to->m_flags & M_PKTHDR)
442113487Srwatson		m_tag_delete_chain(to, NULL);
443101007Srwatson#endif
444143302Ssam	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
445143302Ssam	if ((to->m_flags & M_EXT) == 0)
446143302Ssam		to->m_data = to->m_pktdat;
447108466Ssam	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
448108466Ssam	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
449108466Ssam	from->m_flags &= ~M_PKTHDR;
450108466Ssam}
451108466Ssam
452108466Ssam/*
453108466Ssam * Duplicate "from"'s mbuf pkthdr in "to".
454108466Ssam * "from" must have M_PKTHDR set, and "to" must be empty.
455108466Ssam * In particular, this does a deep copy of the packet tags.
456108466Ssam */
457108466Ssamint
458108466Ssamm_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
459108466Ssam{
460108466Ssam
461108466Ssam#if 0
462108466Ssam	/*
463108466Ssam	 * The mbuf allocator only initializes the pkthdr
464108466Ssam	 * when the mbuf is allocated with MGETHDR. Many users
465108466Ssam	 * (e.g. m_copy*, m_prepend) use MGET and then
466108466Ssam	 * smash the pkthdr as needed causing these
467108466Ssam	 * assertions to trip.  For now just disable them.
468108466Ssam	 */
469113255Sdes	M_ASSERTPKTHDR(to);
470113487Srwatson	/* Note: with MAC, this may not be a good assertion. */
471108466Ssam	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
472108466Ssam#endif
473132488Salfred	MBUF_CHECKSLEEP(how);
474108466Ssam#ifdef MAC
475108466Ssam	if (to->m_flags & M_PKTHDR)
476113487Srwatson		m_tag_delete_chain(to, NULL);
477108466Ssam#endif
478112733Ssilby	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
479112733Ssilby	if ((to->m_flags & M_EXT) == 0)
480112733Ssilby		to->m_data = to->m_pktdat;
481100960Srwatson	to->m_pkthdr = from->m_pkthdr;
482108466Ssam	SLIST_INIT(&to->m_pkthdr.tags);
483113480Srwatson	return (m_tag_copy_chain(to, from, MBTOM(how)));
484100960Srwatson}
485100960Srwatson
486100960Srwatson/*
4871541Srgrimes * Lesser-used path for M_PREPEND:
4881541Srgrimes * allocate new mbuf to prepend to chain,
4891541Srgrimes * copy junk along.
4901541Srgrimes */
4911541Srgrimesstruct mbuf *
49272356Sbmilekicm_prepend(struct mbuf *m, int len, int how)
4931541Srgrimes{
4941541Srgrimes	struct mbuf *mn;
4951541Srgrimes
496117770Ssilby	if (m->m_flags & M_PKTHDR)
497117770Ssilby		MGETHDR(mn, how, m->m_type);
498117770Ssilby	else
499117770Ssilby		MGET(mn, how, m->m_type);
50072356Sbmilekic	if (mn == NULL) {
5011541Srgrimes		m_freem(m);
50272356Sbmilekic		return (NULL);
5031541Srgrimes	}
504113487Srwatson	if (m->m_flags & M_PKTHDR)
505108466Ssam		M_MOVE_PKTHDR(mn, m);
5061541Srgrimes	mn->m_next = m;
5071541Srgrimes	m = mn;
508165447Srrs	if(m->m_flags & M_PKTHDR) {
509165447Srrs		if (len < MHLEN)
510165447Srrs			MH_ALIGN(m, len);
511165447Srrs	} else {
512165447Srrs		if (len < MLEN)
513165447Srrs			M_ALIGN(m, len);
514165447Srrs	}
5151541Srgrimes	m->m_len = len;
5161541Srgrimes	return (m);
5171541Srgrimes}
5181541Srgrimes
5191541Srgrimes/*
5201541Srgrimes * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
5211541Srgrimes * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
522177599Sru * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
52354002Sarchie * Note that the copy is read-only, because clusters are not copied,
52454002Sarchie * only their reference counts are incremented.
5251541Srgrimes */
5261541Srgrimesstruct mbuf *
52772356Sbmilekicm_copym(struct mbuf *m, int off0, int len, int wait)
5281541Srgrimes{
52972356Sbmilekic	struct mbuf *n, **np;
53072356Sbmilekic	int off = off0;
5311541Srgrimes	struct mbuf *top;
5321541Srgrimes	int copyhdr = 0;
5331541Srgrimes
53452201Salfred	KASSERT(off >= 0, ("m_copym, negative off %d", off));
53552201Salfred	KASSERT(len >= 0, ("m_copym, negative len %d", len));
536132488Salfred	MBUF_CHECKSLEEP(wait);
5371541Srgrimes	if (off == 0 && m->m_flags & M_PKTHDR)
5381541Srgrimes		copyhdr = 1;
5391541Srgrimes	while (off > 0) {
54052201Salfred		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
5411541Srgrimes		if (off < m->m_len)
5421541Srgrimes			break;
5431541Srgrimes		off -= m->m_len;
5441541Srgrimes		m = m->m_next;
5451541Srgrimes	}
5461541Srgrimes	np = &top;
5471541Srgrimes	top = 0;
5481541Srgrimes	while (len > 0) {
54972356Sbmilekic		if (m == NULL) {
55052201Salfred			KASSERT(len == M_COPYALL,
55152201Salfred			    ("m_copym, length > size of mbuf chain"));
5521541Srgrimes			break;
5531541Srgrimes		}
554117770Ssilby		if (copyhdr)
555117770Ssilby			MGETHDR(n, wait, m->m_type);
556117770Ssilby		else
557117770Ssilby			MGET(n, wait, m->m_type);
5581541Srgrimes		*np = n;
55972356Sbmilekic		if (n == NULL)
5601541Srgrimes			goto nospace;
5611541Srgrimes		if (copyhdr) {
562108466Ssam			if (!m_dup_pkthdr(n, m, wait))
563108466Ssam				goto nospace;
5641541Srgrimes			if (len == M_COPYALL)
5651541Srgrimes				n->m_pkthdr.len -= off0;
5661541Srgrimes			else
5671541Srgrimes				n->m_pkthdr.len = len;
5681541Srgrimes			copyhdr = 0;
5691541Srgrimes		}
5701541Srgrimes		n->m_len = min(len, m->m_len - off);
5711541Srgrimes		if (m->m_flags & M_EXT) {
5721541Srgrimes			n->m_data = m->m_data + off;
573151976Sandre			mb_dupcl(n, m);
5741541Srgrimes		} else
5751541Srgrimes			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
576103569Sbmilekic			    (u_int)n->m_len);
5771541Srgrimes		if (len != M_COPYALL)
5781541Srgrimes			len -= n->m_len;
5791541Srgrimes		off = 0;
5801541Srgrimes		m = m->m_next;
5811541Srgrimes		np = &n->m_next;
5821541Srgrimes	}
58378592Sbmilekic	if (top == NULL)
58478592Sbmilekic		mbstat.m_mcfail++;	/* XXX: No consistency. */
58578592Sbmilekic
5861541Srgrimes	return (top);
5871541Srgrimesnospace:
5881541Srgrimes	m_freem(top);
58978592Sbmilekic	mbstat.m_mcfail++;	/* XXX: No consistency. */
59072356Sbmilekic	return (NULL);
5911541Srgrimes}
5921541Srgrimes
5931541Srgrimes/*
594149602Sandre * Returns mbuf chain with new head for the prepending case.
595149602Sandre * Copies from mbuf (chain) n from off for len to mbuf (chain) m
596149602Sandre * either prepending or appending the data.
597149602Sandre * The resulting mbuf (chain) m is fully writeable.
598149602Sandre * m is destination (is made writeable)
599149602Sandre * n is source, off is offset in source, len is len from offset
600149602Sandre * dir, 0 append, 1 prepend
601149602Sandre * how, wait or nowait
602149602Sandre */
603149602Sandre
604149602Sandrestatic int
605149602Sandrem_bcopyxxx(void *s, void *t, u_int len)
606149602Sandre{
607149602Sandre	bcopy(s, t, (size_t)len);
608149602Sandre	return 0;
609149602Sandre}
610149602Sandre
611149602Sandrestruct mbuf *
612149602Sandrem_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
613149602Sandre    int prep, int how)
614149602Sandre{
615151976Sandre	struct mbuf *mm, *x, *z, *prev = NULL;
616149602Sandre	caddr_t p;
617151976Sandre	int i, nlen = 0;
618149602Sandre	caddr_t buf[MLEN];
619149602Sandre
620149602Sandre	KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
621149602Sandre	KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
622149602Sandre	KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
623149602Sandre	KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
624149602Sandre
625151976Sandre	mm = m;
626151976Sandre	if (!prep) {
627151976Sandre		while(mm->m_next) {
628151976Sandre			prev = mm;
629151976Sandre			mm = mm->m_next;
630149602Sandre		}
631149602Sandre	}
632149602Sandre	for (z = n; z != NULL; z = z->m_next)
633149602Sandre		nlen += z->m_len;
634149602Sandre	if (len == M_COPYALL)
635149602Sandre		len = nlen - off;
636149602Sandre	if (off + len > nlen || len < 1)
637149602Sandre		return NULL;
638149602Sandre
639151976Sandre	if (!M_WRITABLE(mm)) {
640151976Sandre		/* XXX: Use proper m_xxx function instead. */
641151976Sandre		x = m_getcl(how, MT_DATA, mm->m_flags);
642151976Sandre		if (x == NULL)
643151976Sandre			return NULL;
644151976Sandre		bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
645151976Sandre		p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
646151976Sandre		x->m_data = p;
647151976Sandre		mm->m_next = NULL;
648151976Sandre		if (mm != m)
649151976Sandre			prev->m_next = x;
650151976Sandre		m_free(mm);
651151976Sandre		mm = x;
652151976Sandre	}
653151976Sandre
654149602Sandre	/*
655149602Sandre	 * Append/prepend the data.  Allocating mbufs as necessary.
656149602Sandre	 */
657149602Sandre	/* Shortcut if enough free space in first/last mbuf. */
658149602Sandre	if (!prep && M_TRAILINGSPACE(mm) >= len) {
659149602Sandre		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
660149602Sandre			 mm->m_len);
661149602Sandre		mm->m_len += len;
662149602Sandre		mm->m_pkthdr.len += len;
663149602Sandre		return m;
664149602Sandre	}
665149602Sandre	if (prep && M_LEADINGSPACE(mm) >= len) {
666149602Sandre		mm->m_data = mtod(mm, caddr_t) - len;
667149602Sandre		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t));
668149602Sandre		mm->m_len += len;
669149602Sandre		mm->m_pkthdr.len += len;
670149602Sandre		return mm;
671149602Sandre	}
672149602Sandre
673149602Sandre	/* Expand first/last mbuf to cluster if possible. */
674149602Sandre	if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) {
675149602Sandre		bcopy(mm->m_data, &buf, mm->m_len);
676149602Sandre		m_clget(mm, how);
677149602Sandre		if (!(mm->m_flags & M_EXT))
678149602Sandre			return NULL;
679149602Sandre		bcopy(&buf, mm->m_ext.ext_buf, mm->m_len);
680149602Sandre		mm->m_data = mm->m_ext.ext_buf;
681149602Sandre		mm->m_pkthdr.header = NULL;
682149602Sandre	}
683149602Sandre	if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) {
684149602Sandre		bcopy(mm->m_data, &buf, mm->m_len);
685149602Sandre		m_clget(mm, how);
686149602Sandre		if (!(mm->m_flags & M_EXT))
687149602Sandre			return NULL;
688149602Sandre		bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf +
689149602Sandre		       mm->m_ext.ext_size - mm->m_len, mm->m_len);
690149602Sandre		mm->m_data = (caddr_t)mm->m_ext.ext_buf +
691149602Sandre			      mm->m_ext.ext_size - mm->m_len;
692149602Sandre		mm->m_pkthdr.header = NULL;
693149602Sandre	}
694149602Sandre
695149602Sandre	/* Append/prepend as many mbuf (clusters) as necessary to fit len. */
696149602Sandre	if (!prep && len > M_TRAILINGSPACE(mm)) {
697149602Sandre		if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA))
698149602Sandre			return NULL;
699149602Sandre	}
700149602Sandre	if (prep && len > M_LEADINGSPACE(mm)) {
701149602Sandre		if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA)))
702149602Sandre			return NULL;
703149602Sandre		i = 0;
704149602Sandre		for (x = z; x != NULL; x = x->m_next) {
705149602Sandre			i += x->m_flags & M_EXT ? x->m_ext.ext_size :
706149602Sandre			      (x->m_flags & M_PKTHDR ? MHLEN : MLEN);
707149602Sandre			if (!x->m_next)
708149602Sandre				break;
709149602Sandre		}
710149602Sandre		z->m_data += i - len;
711149602Sandre		m_move_pkthdr(mm, z);
712149602Sandre		x->m_next = mm;
713149602Sandre		mm = z;
714149602Sandre	}
715149602Sandre
716149602Sandre	/* Seek to start position in source mbuf. Optimization for long chains. */
717149602Sandre	while (off > 0) {
718149602Sandre		if (off < n->m_len)
719149602Sandre			break;
720149602Sandre		off -= n->m_len;
721149602Sandre		n = n->m_next;
722149602Sandre	}
723149602Sandre
724149602Sandre	/* Copy data into target mbuf. */
725149602Sandre	z = mm;
726149602Sandre	while (len > 0) {
727149602Sandre		KASSERT(z != NULL, ("m_copymdata, falling off target edge"));
728149602Sandre		i = M_TRAILINGSPACE(z);
729149602Sandre		m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len);
730149602Sandre		z->m_len += i;
731149602Sandre		/* fixup pkthdr.len if necessary */
732149602Sandre		if ((prep ? mm : m)->m_flags & M_PKTHDR)
733149602Sandre			(prep ? mm : m)->m_pkthdr.len += i;
734149602Sandre		off += i;
735149602Sandre		len -= i;
736149602Sandre		z = z->m_next;
737149602Sandre	}
738149602Sandre	return (prep ? mm : m);
739149602Sandre}
740149602Sandre
741149602Sandre/*
74215689Swollman * Copy an entire packet, including header (which must be present).
74315689Swollman * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
74454002Sarchie * Note that the copy is read-only, because clusters are not copied,
74554002Sarchie * only their reference counts are incremented.
74672750Sluigi * Preserve alignment of the first mbuf so if the creator has left
74772750Sluigi * some room at the beginning (e.g. for inserting protocol headers)
74872750Sluigi * the copies still have the room available.
74915689Swollman */
75015689Swollmanstruct mbuf *
75172356Sbmilekicm_copypacket(struct mbuf *m, int how)
75215689Swollman{
75315689Swollman	struct mbuf *top, *n, *o;
75415689Swollman
755132488Salfred	MBUF_CHECKSLEEP(how);
75615689Swollman	MGET(n, how, m->m_type);
75715689Swollman	top = n;
75872356Sbmilekic	if (n == NULL)
75915689Swollman		goto nospace;
76015689Swollman
761108466Ssam	if (!m_dup_pkthdr(n, m, how))
762108466Ssam		goto nospace;
76315689Swollman	n->m_len = m->m_len;
76415689Swollman	if (m->m_flags & M_EXT) {
76515689Swollman		n->m_data = m->m_data;
766151976Sandre		mb_dupcl(n, m);
76715689Swollman	} else {
76872750Sluigi		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
76915689Swollman		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
77015689Swollman	}
77115689Swollman
77215689Swollman	m = m->m_next;
77315689Swollman	while (m) {
77415689Swollman		MGET(o, how, m->m_type);
77572356Sbmilekic		if (o == NULL)
77615689Swollman			goto nospace;
77715689Swollman
77815689Swollman		n->m_next = o;
77915689Swollman		n = n->m_next;
78015689Swollman
78115689Swollman		n->m_len = m->m_len;
78215689Swollman		if (m->m_flags & M_EXT) {
78315689Swollman			n->m_data = m->m_data;
784151976Sandre			mb_dupcl(n, m);
78515689Swollman		} else {
78615689Swollman			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
78715689Swollman		}
78815689Swollman
78915689Swollman		m = m->m_next;
79015689Swollman	}
79115689Swollman	return top;
79215689Swollmannospace:
79315689Swollman	m_freem(top);
79478592Sbmilekic	mbstat.m_mcfail++;	/* XXX: No consistency. */
79572356Sbmilekic	return (NULL);
79615689Swollman}
79715689Swollman
79815689Swollman/*
7991541Srgrimes * Copy data from an mbuf chain starting "off" bytes from the beginning,
8001541Srgrimes * continuing for "len" bytes, into the indicated buffer.
8011541Srgrimes */
8021549Srgrimesvoid
80381907Sjulianm_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
8041541Srgrimes{
805103569Sbmilekic	u_int count;
8061541Srgrimes
80752201Salfred	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
80852201Salfred	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
8091541Srgrimes	while (off > 0) {
81052201Salfred		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
8111541Srgrimes		if (off < m->m_len)
8121541Srgrimes			break;
8131541Srgrimes		off -= m->m_len;
8141541Srgrimes		m = m->m_next;
8151541Srgrimes	}
8161541Srgrimes	while (len > 0) {
81752201Salfred		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
8181541Srgrimes		count = min(m->m_len - off, len);
8191541Srgrimes		bcopy(mtod(m, caddr_t) + off, cp, count);
8201541Srgrimes		len -= count;
8211541Srgrimes		cp += count;
8221541Srgrimes		off = 0;
8231541Srgrimes		m = m->m_next;
8241541Srgrimes	}
8251541Srgrimes}
8261541Srgrimes
8271541Srgrimes/*
82854002Sarchie * Copy a packet header mbuf chain into a completely new chain, including
82954002Sarchie * copying any mbuf clusters.  Use this instead of m_copypacket() when
83054002Sarchie * you need a writable copy of an mbuf chain.
83154002Sarchie */
83254002Sarchiestruct mbuf *
83372356Sbmilekicm_dup(struct mbuf *m, int how)
83454002Sarchie{
83554002Sarchie	struct mbuf **p, *top = NULL;
83654002Sarchie	int remain, moff, nsize;
83754002Sarchie
838132488Salfred	MBUF_CHECKSLEEP(how);
83954002Sarchie	/* Sanity check */
84054002Sarchie	if (m == NULL)
84172356Sbmilekic		return (NULL);
842113255Sdes	M_ASSERTPKTHDR(m);
84354002Sarchie
84454002Sarchie	/* While there's more data, get a new mbuf, tack it on, and fill it */
84554002Sarchie	remain = m->m_pkthdr.len;
84654002Sarchie	moff = 0;
84754002Sarchie	p = &top;
84854002Sarchie	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
84954002Sarchie		struct mbuf *n;
85054002Sarchie
85154002Sarchie		/* Get the next new mbuf */
852129906Sbmilekic		if (remain >= MINCLSIZE) {
853129906Sbmilekic			n = m_getcl(how, m->m_type, 0);
854129906Sbmilekic			nsize = MCLBYTES;
855129906Sbmilekic		} else {
856129906Sbmilekic			n = m_get(how, m->m_type);
857129906Sbmilekic			nsize = MLEN;
858129906Sbmilekic		}
85954002Sarchie		if (n == NULL)
86054002Sarchie			goto nospace;
861129906Sbmilekic
862129906Sbmilekic		if (top == NULL) {		/* First one, must be PKTHDR */
863129906Sbmilekic			if (!m_dup_pkthdr(n, m, how)) {
864129906Sbmilekic				m_free(n);
865108466Ssam				goto nospace;
866129906Sbmilekic			}
867153428Semaste			if ((n->m_flags & M_EXT) == 0)
868153428Semaste				nsize = MHLEN;
86954002Sarchie		}
87054002Sarchie		n->m_len = 0;
87154002Sarchie
87254002Sarchie		/* Link it into the new chain */
87354002Sarchie		*p = n;
87454002Sarchie		p = &n->m_next;
87554002Sarchie
87654002Sarchie		/* Copy data from original mbuf(s) into new mbuf */
87754002Sarchie		while (n->m_len < nsize && m != NULL) {
87854002Sarchie			int chunk = min(nsize - n->m_len, m->m_len - moff);
87954002Sarchie
88054002Sarchie			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
88154002Sarchie			moff += chunk;
88254002Sarchie			n->m_len += chunk;
88354002Sarchie			remain -= chunk;
88454002Sarchie			if (moff == m->m_len) {
88554002Sarchie				m = m->m_next;
88654002Sarchie				moff = 0;
88754002Sarchie			}
88854002Sarchie		}
88954002Sarchie
89054002Sarchie		/* Check correct total mbuf length */
89154002Sarchie		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
89287594Sobrien		    	("%s: bogus m_pkthdr.len", __func__));
89354002Sarchie	}
89454002Sarchie	return (top);
89554002Sarchie
89654002Sarchienospace:
89754002Sarchie	m_freem(top);
89878592Sbmilekic	mbstat.m_mcfail++;	/* XXX: No consistency. */
89972356Sbmilekic	return (NULL);
90054002Sarchie}
90154002Sarchie
90254002Sarchie/*
9031541Srgrimes * Concatenate mbuf chain n to m.
9041541Srgrimes * Both chains must be of the same type (e.g. MT_DATA).
9051541Srgrimes * Any m_pkthdr is not updated.
9061541Srgrimes */
9071549Srgrimesvoid
90872356Sbmilekicm_cat(struct mbuf *m, struct mbuf *n)
9091541Srgrimes{
9101541Srgrimes	while (m->m_next)
9111541Srgrimes		m = m->m_next;
9121541Srgrimes	while (n) {
9131541Srgrimes		if (m->m_flags & M_EXT ||
9141541Srgrimes		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
9151541Srgrimes			/* just join the two chains */
9161541Srgrimes			m->m_next = n;
9171541Srgrimes			return;
9181541Srgrimes		}
9191541Srgrimes		/* splat the data from one into the other */
9201541Srgrimes		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
9211541Srgrimes		    (u_int)n->m_len);
9221541Srgrimes		m->m_len += n->m_len;
9231541Srgrimes		n = m_free(n);
9241541Srgrimes	}
9251541Srgrimes}
9261541Srgrimes
9271549Srgrimesvoid
92872356Sbmilekicm_adj(struct mbuf *mp, int req_len)
9291541Srgrimes{
93072356Sbmilekic	int len = req_len;
93172356Sbmilekic	struct mbuf *m;
93272356Sbmilekic	int count;
9331541Srgrimes
9341541Srgrimes	if ((m = mp) == NULL)
9351541Srgrimes		return;
9361541Srgrimes	if (len >= 0) {
9371541Srgrimes		/*
9381541Srgrimes		 * Trim from head.
9391541Srgrimes		 */
9401541Srgrimes		while (m != NULL && len > 0) {
9411541Srgrimes			if (m->m_len <= len) {
9421541Srgrimes				len -= m->m_len;
9431541Srgrimes				m->m_len = 0;
9441541Srgrimes				m = m->m_next;
9451541Srgrimes			} else {
9461541Srgrimes				m->m_len -= len;
9471541Srgrimes				m->m_data += len;
9481541Srgrimes				len = 0;
9491541Srgrimes			}
9501541Srgrimes		}
9511541Srgrimes		m = mp;
9521541Srgrimes		if (mp->m_flags & M_PKTHDR)
9531541Srgrimes			m->m_pkthdr.len -= (req_len - len);
9541541Srgrimes	} else {
9551541Srgrimes		/*
9561541Srgrimes		 * Trim from tail.  Scan the mbuf chain,
9571541Srgrimes		 * calculating its length and finding the last mbuf.
9581541Srgrimes		 * If the adjustment only affects this mbuf, then just
9591541Srgrimes		 * adjust and return.  Otherwise, rescan and truncate
9601541Srgrimes		 * after the remaining size.
9611541Srgrimes		 */
9621541Srgrimes		len = -len;
9631541Srgrimes		count = 0;
9641541Srgrimes		for (;;) {
9651541Srgrimes			count += m->m_len;
9661541Srgrimes			if (m->m_next == (struct mbuf *)0)
9671541Srgrimes				break;
9681541Srgrimes			m = m->m_next;
9691541Srgrimes		}
9701541Srgrimes		if (m->m_len >= len) {
9711541Srgrimes			m->m_len -= len;
9721541Srgrimes			if (mp->m_flags & M_PKTHDR)
9731541Srgrimes				mp->m_pkthdr.len -= len;
9741541Srgrimes			return;
9751541Srgrimes		}
9761541Srgrimes		count -= len;
9771541Srgrimes		if (count < 0)
9781541Srgrimes			count = 0;
9791541Srgrimes		/*
9801541Srgrimes		 * Correct length for chain is "count".
9811541Srgrimes		 * Find the mbuf with last data, adjust its length,
9821541Srgrimes		 * and toss data from remaining mbufs on chain.
9831541Srgrimes		 */
9841541Srgrimes		m = mp;
9851541Srgrimes		if (m->m_flags & M_PKTHDR)
9861541Srgrimes			m->m_pkthdr.len = count;
9871541Srgrimes		for (; m; m = m->m_next) {
9881541Srgrimes			if (m->m_len >= count) {
9891541Srgrimes				m->m_len = count;
990142350Ssam				if (m->m_next != NULL) {
991142350Ssam					m_freem(m->m_next);
992142350Ssam					m->m_next = NULL;
993142350Ssam				}
9941541Srgrimes				break;
9951541Srgrimes			}
9961541Srgrimes			count -= m->m_len;
9971541Srgrimes		}
9981541Srgrimes	}
9991541Srgrimes}
10001541Srgrimes
10011541Srgrimes/*
10021541Srgrimes * Rearange an mbuf chain so that len bytes are contiguous
10031541Srgrimes * and in the data area of an mbuf (so that mtod and dtom
10041541Srgrimes * will work for a structure of size len).  Returns the resulting
10051541Srgrimes * mbuf chain on success, frees it and returns null on failure.
10061541Srgrimes * If there is room, it will add up to max_protohdr-len extra bytes to the
10071541Srgrimes * contiguous region in an attempt to avoid being called next time.
10081541Srgrimes */
10091541Srgrimesstruct mbuf *
101072356Sbmilekicm_pullup(struct mbuf *n, int len)
10111541Srgrimes{
101272356Sbmilekic	struct mbuf *m;
101372356Sbmilekic	int count;
10141541Srgrimes	int space;
10151541Srgrimes
10161541Srgrimes	/*
10171541Srgrimes	 * If first mbuf has no cluster, and has room for len bytes
10181541Srgrimes	 * without shifting current data, pullup into it,
10191541Srgrimes	 * otherwise allocate a new mbuf to prepend to the chain.
10201541Srgrimes	 */
10211541Srgrimes	if ((n->m_flags & M_EXT) == 0 &&
10221541Srgrimes	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
10231541Srgrimes		if (n->m_len >= len)
10241541Srgrimes			return (n);
10251541Srgrimes		m = n;
10261541Srgrimes		n = n->m_next;
10271541Srgrimes		len -= m->m_len;
10281541Srgrimes	} else {
10291541Srgrimes		if (len > MHLEN)
10301541Srgrimes			goto bad;
1031111119Simp		MGET(m, M_DONTWAIT, n->m_type);
103272356Sbmilekic		if (m == NULL)
10331541Srgrimes			goto bad;
10341541Srgrimes		m->m_len = 0;
1035108466Ssam		if (n->m_flags & M_PKTHDR)
1036108466Ssam			M_MOVE_PKTHDR(m, n);
10371541Srgrimes	}
10381541Srgrimes	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
10391541Srgrimes	do {
10401541Srgrimes		count = min(min(max(len, max_protohdr), space), n->m_len);
10411541Srgrimes		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1042103569Sbmilekic		  (u_int)count);
10431541Srgrimes		len -= count;
10441541Srgrimes		m->m_len += count;
10451541Srgrimes		n->m_len -= count;
10461541Srgrimes		space -= count;
10471541Srgrimes		if (n->m_len)
10481541Srgrimes			n->m_data += count;
10491541Srgrimes		else
10501541Srgrimes			n = m_free(n);
10511541Srgrimes	} while (len > 0 && n);
10521541Srgrimes	if (len > 0) {
10531541Srgrimes		(void) m_free(m);
10541541Srgrimes		goto bad;
10551541Srgrimes	}
10561541Srgrimes	m->m_next = n;
10571541Srgrimes	return (m);
10581541Srgrimesbad:
10591541Srgrimes	m_freem(n);
106078592Sbmilekic	mbstat.m_mpfail++;	/* XXX: No consistency. */
106172356Sbmilekic	return (NULL);
10621541Srgrimes}
10631541Srgrimes
10641541Srgrimes/*
1065143761Sjmg * Like m_pullup(), except a new mbuf is always allocated, and we allow
1066143761Sjmg * the amount of empty space before the data in the new mbuf to be specified
1067143761Sjmg * (in the event that the caller expects to prepend later).
1068143761Sjmg */
1069143761Sjmgint MSFail;
1070143761Sjmg
1071143761Sjmgstruct mbuf *
1072143761Sjmgm_copyup(struct mbuf *n, int len, int dstoff)
1073143761Sjmg{
1074143761Sjmg	struct mbuf *m;
1075143761Sjmg	int count, space;
1076143761Sjmg
1077143761Sjmg	if (len > (MHLEN - dstoff))
1078143761Sjmg		goto bad;
1079143761Sjmg	MGET(m, M_DONTWAIT, n->m_type);
1080143761Sjmg	if (m == NULL)
1081143761Sjmg		goto bad;
1082143761Sjmg	m->m_len = 0;
1083143761Sjmg	if (n->m_flags & M_PKTHDR)
1084143761Sjmg		M_MOVE_PKTHDR(m, n);
1085143761Sjmg	m->m_data += dstoff;
1086143761Sjmg	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1087143761Sjmg	do {
1088143761Sjmg		count = min(min(max(len, max_protohdr), space), n->m_len);
1089143761Sjmg		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1090143761Sjmg		    (unsigned)count);
1091143761Sjmg		len -= count;
1092143761Sjmg		m->m_len += count;
1093143761Sjmg		n->m_len -= count;
1094143761Sjmg		space -= count;
1095143761Sjmg		if (n->m_len)
1096143761Sjmg			n->m_data += count;
1097143761Sjmg		else
1098143761Sjmg			n = m_free(n);
1099143761Sjmg	} while (len > 0 && n);
1100143761Sjmg	if (len > 0) {
1101143761Sjmg		(void) m_free(m);
1102143761Sjmg		goto bad;
1103143761Sjmg	}
1104143761Sjmg	m->m_next = n;
1105143761Sjmg	return (m);
1106143761Sjmg bad:
1107143761Sjmg	m_freem(n);
1108143761Sjmg	MSFail++;
1109143761Sjmg	return (NULL);
1110143761Sjmg}
1111143761Sjmg
1112143761Sjmg/*
11131541Srgrimes * Partition an mbuf chain in two pieces, returning the tail --
11141541Srgrimes * all but the first len0 bytes.  In case of failure, it returns NULL and
11151541Srgrimes * attempts to restore the chain to its original state.
111697681Sarchie *
111797681Sarchie * Note that the resulting mbufs might be read-only, because the new
111897681Sarchie * mbuf can end up sharing an mbuf cluster with the original mbuf if
111997681Sarchie * the "breaking point" happens to lie within a cluster mbuf. Use the
112097681Sarchie * M_WRITABLE() macro to check for this case.
11211541Srgrimes */
11221541Srgrimesstruct mbuf *
112372356Sbmilekicm_split(struct mbuf *m0, int len0, int wait)
11241541Srgrimes{
112572356Sbmilekic	struct mbuf *m, *n;
1126103569Sbmilekic	u_int len = len0, remain;
11271541Srgrimes
1128132488Salfred	MBUF_CHECKSLEEP(wait);
11291541Srgrimes	for (m = m0; m && len > m->m_len; m = m->m_next)
11301541Srgrimes		len -= m->m_len;
113172356Sbmilekic	if (m == NULL)
113272356Sbmilekic		return (NULL);
11331541Srgrimes	remain = m->m_len - len;
11341541Srgrimes	if (m0->m_flags & M_PKTHDR) {
11351541Srgrimes		MGETHDR(n, wait, m0->m_type);
113672356Sbmilekic		if (n == NULL)
113772356Sbmilekic			return (NULL);
11381541Srgrimes		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
11391541Srgrimes		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
11401541Srgrimes		m0->m_pkthdr.len = len0;
11411541Srgrimes		if (m->m_flags & M_EXT)
11421541Srgrimes			goto extpacket;
11431541Srgrimes		if (remain > MHLEN) {
11441541Srgrimes			/* m can't be the lead packet */
11451541Srgrimes			MH_ALIGN(n, 0);
11461541Srgrimes			n->m_next = m_split(m, len, wait);
114772356Sbmilekic			if (n->m_next == NULL) {
11481541Srgrimes				(void) m_free(n);
114972356Sbmilekic				return (NULL);
115094471Shsu			} else {
115194471Shsu				n->m_len = 0;
11521541Srgrimes				return (n);
115394471Shsu			}
11541541Srgrimes		} else
11551541Srgrimes			MH_ALIGN(n, remain);
11561541Srgrimes	} else if (remain == 0) {
11571541Srgrimes		n = m->m_next;
115872356Sbmilekic		m->m_next = NULL;
11591541Srgrimes		return (n);
11601541Srgrimes	} else {
11611541Srgrimes		MGET(n, wait, m->m_type);
116272356Sbmilekic		if (n == NULL)
116372356Sbmilekic			return (NULL);
11641541Srgrimes		M_ALIGN(n, remain);
11651541Srgrimes	}
11661541Srgrimesextpacket:
11671541Srgrimes	if (m->m_flags & M_EXT) {
11681541Srgrimes		n->m_data = m->m_data + len;
1169151976Sandre		mb_dupcl(n, m);
11701541Srgrimes	} else {
11711541Srgrimes		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
11721541Srgrimes	}
11731541Srgrimes	n->m_len = remain;
11741541Srgrimes	m->m_len = len;
11751541Srgrimes	n->m_next = m->m_next;
117672356Sbmilekic	m->m_next = NULL;
11771541Srgrimes	return (n);
11781541Srgrimes}
11791541Srgrimes/*
11801541Srgrimes * Routine to copy from device local memory into mbufs.
118178508Sbmilekic * Note that `off' argument is offset into first mbuf of target chain from
118278508Sbmilekic * which to begin copying the data to.
11831541Srgrimes */
11841541Srgrimesstruct mbuf *
118578508Sbmilekicm_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1186169624Srwatson    void (*copy)(char *from, caddr_t to, u_int len))
11871541Srgrimes{
118872356Sbmilekic	struct mbuf *m;
1189129906Sbmilekic	struct mbuf *top = NULL, **mp = &top;
119078508Sbmilekic	int len;
11911541Srgrimes
119278508Sbmilekic	if (off < 0 || off > MHLEN)
119378508Sbmilekic		return (NULL);
119478508Sbmilekic
1195129906Sbmilekic	while (totlen > 0) {
1196129906Sbmilekic		if (top == NULL) {	/* First one, must be PKTHDR */
1197129906Sbmilekic			if (totlen + off >= MINCLSIZE) {
1198129906Sbmilekic				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1199129906Sbmilekic				len = MCLBYTES;
1200129906Sbmilekic			} else {
1201129906Sbmilekic				m = m_gethdr(M_DONTWAIT, MT_DATA);
1202129906Sbmilekic				len = MHLEN;
12031541Srgrimes
1204129906Sbmilekic				/* Place initial small packet/header at end of mbuf */
1205129906Sbmilekic				if (m && totlen + off + max_linkhdr <= MLEN) {
1206129906Sbmilekic					m->m_data += max_linkhdr;
1207129906Sbmilekic					len -= max_linkhdr;
1208129906Sbmilekic				}
1209129906Sbmilekic			}
1210129906Sbmilekic			if (m == NULL)
1211129906Sbmilekic				return NULL;
1212129906Sbmilekic			m->m_pkthdr.rcvif = ifp;
1213129906Sbmilekic			m->m_pkthdr.len = totlen;
1214129906Sbmilekic		} else {
1215129906Sbmilekic			if (totlen + off >= MINCLSIZE) {
1216129906Sbmilekic				m = m_getcl(M_DONTWAIT, MT_DATA, 0);
1217129906Sbmilekic				len = MCLBYTES;
1218129906Sbmilekic			} else {
1219129906Sbmilekic				m = m_get(M_DONTWAIT, MT_DATA);
1220129906Sbmilekic				len = MLEN;
1221129906Sbmilekic			}
122272356Sbmilekic			if (m == NULL) {
12231541Srgrimes				m_freem(top);
1224129906Sbmilekic				return NULL;
12251541Srgrimes			}
12261541Srgrimes		}
122778508Sbmilekic		if (off) {
122878508Sbmilekic			m->m_data += off;
122978508Sbmilekic			len -= off;
123078508Sbmilekic			off = 0;
123178508Sbmilekic		}
123278508Sbmilekic		m->m_len = len = min(totlen, len);
12331541Srgrimes		if (copy)
1234103569Sbmilekic			copy(buf, mtod(m, caddr_t), (u_int)len);
12351541Srgrimes		else
1236103569Sbmilekic			bcopy(buf, mtod(m, caddr_t), (u_int)len);
123778508Sbmilekic		buf += len;
12381541Srgrimes		*mp = m;
12391541Srgrimes		mp = &m->m_next;
12401541Srgrimes		totlen -= len;
12411541Srgrimes	}
12421541Srgrimes	return (top);
12431541Srgrimes}
12443352Sphk
12453352Sphk/*
12463352Sphk * Copy data from a buffer back into the indicated mbuf chain,
12473352Sphk * starting "off" bytes from the beginning, extending the mbuf
12483352Sphk * chain if necessary.
12493352Sphk */
12503352Sphkvoid
1251128402Sluigim_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
12523352Sphk{
125372356Sbmilekic	int mlen;
125472356Sbmilekic	struct mbuf *m = m0, *n;
12553352Sphk	int totlen = 0;
12563352Sphk
125772356Sbmilekic	if (m0 == NULL)
12583352Sphk		return;
12593352Sphk	while (off > (mlen = m->m_len)) {
12603352Sphk		off -= mlen;
12613352Sphk		totlen += mlen;
126272356Sbmilekic		if (m->m_next == NULL) {
1263129906Sbmilekic			n = m_get(M_DONTWAIT, m->m_type);
126472356Sbmilekic			if (n == NULL)
12653352Sphk				goto out;
1266129906Sbmilekic			bzero(mtod(n, caddr_t), MLEN);
12673352Sphk			n->m_len = min(MLEN, len + off);
12683352Sphk			m->m_next = n;
12693352Sphk		}
12703352Sphk		m = m->m_next;
12713352Sphk	}
12723352Sphk	while (len > 0) {
12733352Sphk		mlen = min (m->m_len - off, len);
1274103569Sbmilekic		bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
12753352Sphk		cp += mlen;
12763352Sphk		len -= mlen;
12773352Sphk		mlen += off;
12783352Sphk		off = 0;
12793352Sphk		totlen += mlen;
12803352Sphk		if (len == 0)
12813352Sphk			break;
128272356Sbmilekic		if (m->m_next == NULL) {
1283111119Simp			n = m_get(M_DONTWAIT, m->m_type);
128472356Sbmilekic			if (n == NULL)
12853352Sphk				break;
12863352Sphk			n->m_len = min(MLEN, len);
12873352Sphk			m->m_next = n;
12883352Sphk		}
12893352Sphk		m = m->m_next;
12903352Sphk	}
12913352Sphkout:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
12923352Sphk		m->m_pkthdr.len = totlen;
12933352Sphk}
129452756Sphk
1295123557Sbms/*
1296138541Ssam * Append the specified data to the indicated mbuf chain,
1297138541Ssam * Extend the mbuf chain if the new data does not fit in
1298138541Ssam * existing space.
1299138541Ssam *
1300138541Ssam * Return 1 if able to complete the job; otherwise 0.
1301138541Ssam */
1302138541Ssamint
1303138541Ssamm_append(struct mbuf *m0, int len, c_caddr_t cp)
1304138541Ssam{
1305138541Ssam	struct mbuf *m, *n;
1306138541Ssam	int remainder, space;
1307138541Ssam
1308138541Ssam	for (m = m0; m->m_next != NULL; m = m->m_next)
1309138541Ssam		;
1310138541Ssam	remainder = len;
1311138541Ssam	space = M_TRAILINGSPACE(m);
1312138541Ssam	if (space > 0) {
1313138541Ssam		/*
1314138541Ssam		 * Copy into available space.
1315138541Ssam		 */
1316138541Ssam		if (space > remainder)
1317138541Ssam			space = remainder;
1318138541Ssam		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1319138541Ssam		m->m_len += space;
1320138541Ssam		cp += space, remainder -= space;
1321138541Ssam	}
1322138541Ssam	while (remainder > 0) {
1323138541Ssam		/*
1324138541Ssam		 * Allocate a new mbuf; could check space
1325138541Ssam		 * and allocate a cluster instead.
1326138541Ssam		 */
1327138541Ssam		n = m_get(M_DONTWAIT, m->m_type);
1328138541Ssam		if (n == NULL)
1329138541Ssam			break;
1330138541Ssam		n->m_len = min(MLEN, remainder);
1331138894Ssam		bcopy(cp, mtod(n, caddr_t), n->m_len);
1332138894Ssam		cp += n->m_len, remainder -= n->m_len;
1333138541Ssam		m->m_next = n;
1334138541Ssam		m = n;
1335138541Ssam	}
1336138541Ssam	if (m0->m_flags & M_PKTHDR)
1337138541Ssam		m0->m_pkthdr.len += len - remainder;
1338138541Ssam	return (remainder == 0);
1339138541Ssam}
1340138541Ssam
1341138541Ssam/*
1342123557Sbms * Apply function f to the data in an mbuf chain starting "off" bytes from
1343123557Sbms * the beginning, continuing for "len" bytes.
1344123557Sbms */
1345123557Sbmsint
1346123557Sbmsm_apply(struct mbuf *m, int off, int len,
1347123564Sbms    int (*f)(void *, void *, u_int), void *arg)
1348123557Sbms{
1349123564Sbms	u_int count;
1350123557Sbms	int rval;
1351123557Sbms
1352123557Sbms	KASSERT(off >= 0, ("m_apply, negative off %d", off));
1353123557Sbms	KASSERT(len >= 0, ("m_apply, negative len %d", len));
1354123557Sbms	while (off > 0) {
1355123557Sbms		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1356123557Sbms		if (off < m->m_len)
1357123557Sbms			break;
1358123557Sbms		off -= m->m_len;
1359123557Sbms		m = m->m_next;
1360123557Sbms	}
1361123557Sbms	while (len > 0) {
1362123557Sbms		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1363123557Sbms		count = min(m->m_len - off, len);
1364123557Sbms		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1365123557Sbms		if (rval)
1366123557Sbms			return (rval);
1367123557Sbms		len -= count;
1368123557Sbms		off = 0;
1369123557Sbms		m = m->m_next;
1370123557Sbms	}
1371123557Sbms	return (0);
1372123557Sbms}
1373123557Sbms
1374123557Sbms/*
1375123557Sbms * Return a pointer to mbuf/offset of location in mbuf chain.
1376123557Sbms */
1377123557Sbmsstruct mbuf *
1378123557Sbmsm_getptr(struct mbuf *m, int loc, int *off)
1379123557Sbms{
1380123557Sbms
1381123557Sbms	while (loc >= 0) {
1382123564Sbms		/* Normal end of search. */
1383123557Sbms		if (m->m_len > loc) {
1384123557Sbms			*off = loc;
1385123557Sbms			return (m);
1386123557Sbms		} else {
1387123557Sbms			loc -= m->m_len;
1388123557Sbms			if (m->m_next == NULL) {
1389123557Sbms				if (loc == 0) {
1390123564Sbms					/* Point at the end of valid data. */
1391123557Sbms					*off = m->m_len;
1392123557Sbms					return (m);
1393123564Sbms				}
1394123564Sbms				return (NULL);
1395123564Sbms			}
1396123564Sbms			m = m->m_next;
1397123557Sbms		}
1398123557Sbms	}
1399123557Sbms	return (NULL);
1400123557Sbms}
1401123557Sbms
140252756Sphkvoid
1403135904Sjmgm_print(const struct mbuf *m, int maxlen)
140452756Sphk{
140552756Sphk	int len;
1406135904Sjmg	int pdata;
140754906Seivind	const struct mbuf *m2;
140852756Sphk
1409135904Sjmg	if (m->m_flags & M_PKTHDR)
1410135904Sjmg		len = m->m_pkthdr.len;
1411135904Sjmg	else
1412135904Sjmg		len = -1;
141352756Sphk	m2 = m;
1414135904Sjmg	while (m2 != NULL && (len == -1 || len)) {
1415135904Sjmg		pdata = m2->m_len;
1416135904Sjmg		if (maxlen != -1 && pdata > maxlen)
1417135904Sjmg			pdata = maxlen;
1418135904Sjmg		printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1419135904Sjmg		    m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1420135904Sjmg		    "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1421135904Sjmg		    "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1422135904Sjmg		if (pdata)
1423156700Sjmg			printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1424135904Sjmg		if (len != -1)
1425135904Sjmg			len -= m2->m_len;
142652756Sphk		m2 = m2->m_next;
142752756Sphk	}
1428135904Sjmg	if (len > 0)
1429135904Sjmg		printf("%d bytes unaccounted for.\n", len);
143052756Sphk	return;
143152756Sphk}
1432103540Sphk
1433103569Sbmilekicu_int
1434103540Sphkm_fixhdr(struct mbuf *m0)
1435103540Sphk{
1436103569Sbmilekic	u_int len;
1437103540Sphk
1438103544Sphk	len = m_length(m0, NULL);
1439103544Sphk	m0->m_pkthdr.len = len;
1440103544Sphk	return (len);
1441103544Sphk}
1442103544Sphk
1443103569Sbmilekicu_int
1444103544Sphkm_length(struct mbuf *m0, struct mbuf **last)
1445103544Sphk{
1446103544Sphk	struct mbuf *m;
1447103569Sbmilekic	u_int len;
1448103544Sphk
1449103544Sphk	len = 0;
1450103544Sphk	for (m = m0; m != NULL; m = m->m_next) {
1451103540Sphk		len += m->m_len;
1452103544Sphk		if (m->m_next == NULL)
1453103544Sphk			break;
1454103540Sphk	}
1455103544Sphk	if (last != NULL)
1456103544Sphk		*last = m;
1457103544Sphk	return (len);
1458103540Sphk}
1459112777Ssilby
1460112777Ssilby/*
1461112777Ssilby * Defragment a mbuf chain, returning the shortest possible
1462112777Ssilby * chain of mbufs and clusters.  If allocation fails and
1463112777Ssilby * this cannot be completed, NULL will be returned, but
1464112777Ssilby * the passed in chain will be unchanged.  Upon success,
1465112777Ssilby * the original chain will be freed, and the new chain
1466112777Ssilby * will be returned.
1467112777Ssilby *
1468112777Ssilby * If a non-packet header is passed in, the original
1469112777Ssilby * mbuf (chain?) will be returned unharmed.
1470112777Ssilby */
1471112777Ssilbystruct mbuf *
1472112777Ssilbym_defrag(struct mbuf *m0, int how)
1473112777Ssilby{
1474125472Ssilby	struct mbuf *m_new = NULL, *m_final = NULL;
1475125472Ssilby	int progress = 0, length;
1476112777Ssilby
1477132488Salfred	MBUF_CHECKSLEEP(how);
1478112777Ssilby	if (!(m0->m_flags & M_PKTHDR))
1479112777Ssilby		return (m0);
1480112777Ssilby
1481117770Ssilby	m_fixhdr(m0); /* Needed sanity check */
1482117770Ssilby
1483113490Ssilby#ifdef MBUF_STRESS_TEST
1484113490Ssilby	if (m_defragrandomfailures) {
1485113490Ssilby		int temp = arc4random() & 0xff;
1486113490Ssilby		if (temp == 0xba)
1487113490Ssilby			goto nospace;
1488113490Ssilby	}
1489113490Ssilby#endif
1490112777Ssilby
1491112777Ssilby	if (m0->m_pkthdr.len > MHLEN)
1492112777Ssilby		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1493112777Ssilby	else
1494112777Ssilby		m_final = m_gethdr(how, MT_DATA);
1495112777Ssilby
1496112777Ssilby	if (m_final == NULL)
1497112777Ssilby		goto nospace;
1498112777Ssilby
1499123740Speter	if (m_dup_pkthdr(m_final, m0, how) == 0)
1500112777Ssilby		goto nospace;
1501112777Ssilby
1502112777Ssilby	m_new = m_final;
1503112777Ssilby
1504112777Ssilby	while (progress < m0->m_pkthdr.len) {
1505112777Ssilby		length = m0->m_pkthdr.len - progress;
1506112777Ssilby		if (length > MCLBYTES)
1507112777Ssilby			length = MCLBYTES;
1508112777Ssilby
1509112777Ssilby		if (m_new == NULL) {
1510112777Ssilby			if (length > MLEN)
1511112777Ssilby				m_new = m_getcl(how, MT_DATA, 0);
1512112777Ssilby			else
1513112777Ssilby				m_new = m_get(how, MT_DATA);
1514112777Ssilby			if (m_new == NULL)
1515112777Ssilby				goto nospace;
1516112777Ssilby		}
1517112777Ssilby
1518112777Ssilby		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1519112777Ssilby		progress += length;
1520112777Ssilby		m_new->m_len = length;
1521112777Ssilby		if (m_new != m_final)
1522112777Ssilby			m_cat(m_final, m_new);
1523112777Ssilby		m_new = NULL;
1524112777Ssilby	}
1525116455Ssilby#ifdef MBUF_STRESS_TEST
1526112777Ssilby	if (m0->m_next == NULL)
1527112777Ssilby		m_defraguseless++;
1528116455Ssilby#endif
1529112777Ssilby	m_freem(m0);
1530112777Ssilby	m0 = m_final;
1531116455Ssilby#ifdef MBUF_STRESS_TEST
1532112777Ssilby	m_defragpackets++;
1533112777Ssilby	m_defragbytes += m0->m_pkthdr.len;
1534116455Ssilby#endif
1535112777Ssilby	return (m0);
1536112777Ssilbynospace:
1537116455Ssilby#ifdef MBUF_STRESS_TEST
1538112777Ssilby	m_defragfailure++;
1539116455Ssilby#endif
1540112777Ssilby	if (m_final)
1541112777Ssilby		m_freem(m_final);
1542112777Ssilby	return (NULL);
1543112777Ssilby}
1544119644Ssilby
1545175414Ssam/*
1546175414Ssam * Defragment an mbuf chain, returning at most maxfrags separate
1547175414Ssam * mbufs+clusters.  If this is not possible NULL is returned and
1548175414Ssam * the original mbuf chain is left in it's present (potentially
1549175414Ssam * modified) state.  We use two techniques: collapsing consecutive
1550175414Ssam * mbufs and replacing consecutive mbufs by a cluster.
1551175414Ssam *
1552175414Ssam * NB: this should really be named m_defrag but that name is taken
1553175414Ssam */
1554175414Ssamstruct mbuf *
1555175414Ssamm_collapse(struct mbuf *m0, int how, int maxfrags)
1556175414Ssam{
1557175414Ssam	struct mbuf *m, *n, *n2, **prev;
1558175414Ssam	u_int curfrags;
1559175414Ssam
1560175414Ssam	/*
1561175414Ssam	 * Calculate the current number of frags.
1562175414Ssam	 */
1563175414Ssam	curfrags = 0;
1564175414Ssam	for (m = m0; m != NULL; m = m->m_next)
1565175414Ssam		curfrags++;
1566175414Ssam	/*
1567175414Ssam	 * First, try to collapse mbufs.  Note that we always collapse
1568175414Ssam	 * towards the front so we don't need to deal with moving the
1569175414Ssam	 * pkthdr.  This may be suboptimal if the first mbuf has much
1570175414Ssam	 * less data than the following.
1571175414Ssam	 */
1572175414Ssam	m = m0;
1573175414Ssamagain:
1574175414Ssam	for (;;) {
1575175414Ssam		n = m->m_next;
1576175414Ssam		if (n == NULL)
1577175414Ssam			break;
1578175414Ssam		if ((m->m_flags & M_RDONLY) == 0 &&
1579175414Ssam		    n->m_len < M_TRAILINGSPACE(m)) {
1580175414Ssam			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1581175414Ssam				n->m_len);
1582175414Ssam			m->m_len += n->m_len;
1583175414Ssam			m->m_next = n->m_next;
1584175414Ssam			m_free(n);
1585175414Ssam			if (--curfrags <= maxfrags)
1586175414Ssam				return m0;
1587175414Ssam		} else
1588175414Ssam			m = n;
1589175414Ssam	}
1590175414Ssam	KASSERT(maxfrags > 1,
1591175414Ssam		("maxfrags %u, but normal collapse failed", maxfrags));
1592175414Ssam	/*
1593175414Ssam	 * Collapse consecutive mbufs to a cluster.
1594175414Ssam	 */
1595175414Ssam	prev = &m0->m_next;		/* NB: not the first mbuf */
1596175414Ssam	while ((n = *prev) != NULL) {
1597175414Ssam		if ((n2 = n->m_next) != NULL &&
1598175414Ssam		    n->m_len + n2->m_len < MCLBYTES) {
1599175414Ssam			m = m_getcl(how, MT_DATA, 0);
1600175414Ssam			if (m == NULL)
1601175414Ssam				goto bad;
1602175414Ssam			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1603175414Ssam			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1604175414Ssam				n2->m_len);
1605175414Ssam			m->m_len = n->m_len + n2->m_len;
1606175414Ssam			m->m_next = n2->m_next;
1607175414Ssam			*prev = m;
1608175414Ssam			m_free(n);
1609175414Ssam			m_free(n2);
1610175414Ssam			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
1611175414Ssam				return m0;
1612175414Ssam			/*
1613175414Ssam			 * Still not there, try the normal collapse
1614175414Ssam			 * again before we allocate another cluster.
1615175414Ssam			 */
1616175414Ssam			goto again;
1617175414Ssam		}
1618175414Ssam		prev = &n->m_next;
1619175414Ssam	}
1620175414Ssam	/*
1621175414Ssam	 * No place where we can collapse to a cluster; punt.
1622175414Ssam	 * This can occur if, for example, you request 2 frags
1623175414Ssam	 * but the packet requires that both be clusters (we
1624175414Ssam	 * never reallocate the first mbuf to avoid moving the
1625175414Ssam	 * packet header).
1626175414Ssam	 */
1627175414Ssambad:
1628175414Ssam	return NULL;
1629175414Ssam}
1630175414Ssam
1631119644Ssilby#ifdef MBUF_STRESS_TEST
1632119644Ssilby
1633119644Ssilby/*
1634119644Ssilby * Fragment an mbuf chain.  There's no reason you'd ever want to do
1635119644Ssilby * this in normal usage, but it's great for stress testing various
1636119644Ssilby * mbuf consumers.
1637119644Ssilby *
1638119644Ssilby * If fragmentation is not possible, the original chain will be
1639119644Ssilby * returned.
1640119644Ssilby *
1641119644Ssilby * Possible length values:
1642119644Ssilby * 0	 no fragmentation will occur
1643119644Ssilby * > 0	each fragment will be of the specified length
1644119644Ssilby * -1	each fragment will be the same random value in length
1645119644Ssilby * -2	each fragment's length will be entirely random
1646119644Ssilby * (Random values range from 1 to 256)
1647119644Ssilby */
1648119644Ssilbystruct mbuf *
1649119644Ssilbym_fragment(struct mbuf *m0, int how, int length)
1650119644Ssilby{
1651125472Ssilby	struct mbuf *m_new = NULL, *m_final = NULL;
1652125472Ssilby	int progress = 0;
1653119644Ssilby
1654119644Ssilby	if (!(m0->m_flags & M_PKTHDR))
1655119644Ssilby		return (m0);
1656119644Ssilby
1657119644Ssilby	if ((length == 0) || (length < -2))
1658119644Ssilby		return (m0);
1659119644Ssilby
1660119644Ssilby	m_fixhdr(m0); /* Needed sanity check */
1661119644Ssilby
1662119644Ssilby	m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1663119644Ssilby
1664119644Ssilby	if (m_final == NULL)
1665119644Ssilby		goto nospace;
1666119644Ssilby
1667123823Ssilby	if (m_dup_pkthdr(m_final, m0, how) == 0)
1668119644Ssilby		goto nospace;
1669119644Ssilby
1670119644Ssilby	m_new = m_final;
1671119644Ssilby
1672119644Ssilby	if (length == -1)
1673119644Ssilby		length = 1 + (arc4random() & 255);
1674119644Ssilby
1675119644Ssilby	while (progress < m0->m_pkthdr.len) {
1676119644Ssilby		int fraglen;
1677119644Ssilby
1678119644Ssilby		if (length > 0)
1679119644Ssilby			fraglen = length;
1680119644Ssilby		else
1681119644Ssilby			fraglen = 1 + (arc4random() & 255);
1682119644Ssilby		if (fraglen > m0->m_pkthdr.len - progress)
1683119644Ssilby			fraglen = m0->m_pkthdr.len - progress;
1684119644Ssilby
1685119644Ssilby		if (fraglen > MCLBYTES)
1686119644Ssilby			fraglen = MCLBYTES;
1687119644Ssilby
1688119644Ssilby		if (m_new == NULL) {
1689119644Ssilby			m_new = m_getcl(how, MT_DATA, 0);
1690119644Ssilby			if (m_new == NULL)
1691119644Ssilby				goto nospace;
1692119644Ssilby		}
1693119644Ssilby
1694119644Ssilby		m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1695119644Ssilby		progress += fraglen;
1696119644Ssilby		m_new->m_len = fraglen;
1697119644Ssilby		if (m_new != m_final)
1698119644Ssilby			m_cat(m_final, m_new);
1699119644Ssilby		m_new = NULL;
1700119644Ssilby	}
1701119644Ssilby	m_freem(m0);
1702119644Ssilby	m0 = m_final;
1703119644Ssilby	return (m0);
1704119644Ssilbynospace:
1705119644Ssilby	if (m_final)
1706119644Ssilby		m_freem(m_final);
1707119644Ssilby	/* Return the original chain on failure */
1708119644Ssilby	return (m0);
1709119644Ssilby}
1710119644Ssilby
1711119644Ssilby#endif
1712125296Ssilby
1713163915Sandre/*
1714163915Sandre * Copy the contents of uio into a properly sized mbuf chain.
1715163915Sandre */
1716125296Ssilbystruct mbuf *
1717163915Sandrem_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1718125296Ssilby{
1719163915Sandre	struct mbuf *m, *mb;
1720163915Sandre	int error, length, total;
1721163915Sandre	int progress = 0;
1722125296Ssilby
1723163915Sandre	/*
1724163915Sandre	 * len can be zero or an arbitrary large value bound by
1725163915Sandre	 * the total data supplied by the uio.
1726163915Sandre	 */
1727125296Ssilby	if (len > 0)
1728125296Ssilby		total = min(uio->uio_resid, len);
1729125296Ssilby	else
1730125296Ssilby		total = uio->uio_resid;
1731163915Sandre
1732163915Sandre	/*
1733163915Sandre	 * The smallest unit returned by m_getm2() is a single mbuf
1734163915Sandre	 * with pkthdr.  We can't align past it.  Align align itself.
1735163915Sandre	 */
1736163915Sandre	if (align)
1737163915Sandre		align &= ~(sizeof(long) - 1);
1738145883Semax	if (align >= MHLEN)
1739163915Sandre		return (NULL);
1740163915Sandre
1741166171Sandre	/*
1742166171Sandre	 * Give us the full allocation or nothing.
1743166171Sandre	 * If len is zero return the smallest empty mbuf.
1744166171Sandre	 */
1745166171Sandre	m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1746163915Sandre	if (m == NULL)
1747163915Sandre		return (NULL);
1748163915Sandre	m->m_data += align;
1749163915Sandre
1750163915Sandre	/* Fill all mbufs with uio data and update header information. */
1751163915Sandre	for (mb = m; mb != NULL; mb = mb->m_next) {
1752163915Sandre		length = min(M_TRAILINGSPACE(mb), total - progress);
1753163915Sandre
1754163915Sandre		error = uiomove(mtod(mb, void *), length, uio);
1755163915Sandre		if (error) {
1756163915Sandre			m_freem(m);
1757163915Sandre			return (NULL);
1758125296Ssilby		}
1759163915Sandre
1760163915Sandre		mb->m_len = length;
1761125296Ssilby		progress += length;
1762163915Sandre		if (flags & M_PKTHDR)
1763163915Sandre			m->m_pkthdr.len += length;
1764125296Ssilby	}
1765163915Sandre	KASSERT(progress == total, ("%s: progress != total", __func__));
1766163915Sandre
1767163915Sandre	return (m);
1768125296Ssilby}
1769148552Ssam
1770148552Ssam/*
1771148552Ssam * Set the m_data pointer of a newly-allocated mbuf
1772148552Ssam * to place an object of the specified size at the
1773148552Ssam * end of the mbuf, longword aligned.
1774148552Ssam */
1775148552Ssamvoid
1776148552Ssamm_align(struct mbuf *m, int len)
1777148552Ssam{
1778148552Ssam	int adjust;
1779148552Ssam
1780148552Ssam	if (m->m_flags & M_EXT)
1781148552Ssam		adjust = m->m_ext.ext_size - len;
1782148552Ssam	else if (m->m_flags & M_PKTHDR)
1783148552Ssam		adjust = MHLEN - len;
1784148552Ssam	else
1785148552Ssam		adjust = MLEN - len;
1786148552Ssam	m->m_data += adjust &~ (sizeof(long)-1);
1787148552Ssam}
1788156756Ssam
1789156756Ssam/*
1790156756Ssam * Create a writable copy of the mbuf chain.  While doing this
1791156756Ssam * we compact the chain with a goal of producing a chain with
1792156756Ssam * at most two mbufs.  The second mbuf in this chain is likely
1793156756Ssam * to be a cluster.  The primary purpose of this work is to create
1794156756Ssam * a writable packet for encryption, compression, etc.  The
1795156756Ssam * secondary goal is to linearize the data so the data can be
1796156756Ssam * passed to crypto hardware in the most efficient manner possible.
1797156756Ssam */
1798156756Ssamstruct mbuf *
1799156756Ssamm_unshare(struct mbuf *m0, int how)
1800156756Ssam{
1801156756Ssam	struct mbuf *m, *mprev;
1802156756Ssam	struct mbuf *n, *mfirst, *mlast;
1803156756Ssam	int len, off;
1804156756Ssam
1805156756Ssam	mprev = NULL;
1806156756Ssam	for (m = m0; m != NULL; m = mprev->m_next) {
1807156756Ssam		/*
1808156756Ssam		 * Regular mbufs are ignored unless there's a cluster
1809156756Ssam		 * in front of it that we can use to coalesce.  We do
1810156756Ssam		 * the latter mainly so later clusters can be coalesced
1811156756Ssam		 * also w/o having to handle them specially (i.e. convert
1812156756Ssam		 * mbuf+cluster -> cluster).  This optimization is heavily
1813156756Ssam		 * influenced by the assumption that we're running over
1814156756Ssam		 * Ethernet where MCLBYTES is large enough that the max
1815156756Ssam		 * packet size will permit lots of coalescing into a
1816156756Ssam		 * single cluster.  This in turn permits efficient
1817156756Ssam		 * crypto operations, especially when using hardware.
1818156756Ssam		 */
1819156756Ssam		if ((m->m_flags & M_EXT) == 0) {
1820156756Ssam			if (mprev && (mprev->m_flags & M_EXT) &&
1821156756Ssam			    m->m_len <= M_TRAILINGSPACE(mprev)) {
1822156756Ssam				/* XXX: this ignores mbuf types */
1823156756Ssam				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1824156756Ssam				       mtod(m, caddr_t), m->m_len);
1825156756Ssam				mprev->m_len += m->m_len;
1826156756Ssam				mprev->m_next = m->m_next;	/* unlink from chain */
1827156756Ssam				m_free(m);			/* reclaim mbuf */
1828156756Ssam#if 0
1829156756Ssam				newipsecstat.ips_mbcoalesced++;
1830156756Ssam#endif
1831156756Ssam			} else {
1832156756Ssam				mprev = m;
1833156756Ssam			}
1834156756Ssam			continue;
1835156756Ssam		}
1836156756Ssam		/*
1837156756Ssam		 * Writable mbufs are left alone (for now).
1838156756Ssam		 */
1839156756Ssam		if (M_WRITABLE(m)) {
1840156756Ssam			mprev = m;
1841156756Ssam			continue;
1842156756Ssam		}
1843156756Ssam
1844156756Ssam		/*
1845156756Ssam		 * Not writable, replace with a copy or coalesce with
1846156756Ssam		 * the previous mbuf if possible (since we have to copy
1847156756Ssam		 * it anyway, we try to reduce the number of mbufs and
1848156756Ssam		 * clusters so that future work is easier).
1849156756Ssam		 */
1850156756Ssam		KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1851156756Ssam		/* NB: we only coalesce into a cluster or larger */
1852156756Ssam		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1853156756Ssam		    m->m_len <= M_TRAILINGSPACE(mprev)) {
1854156756Ssam			/* XXX: this ignores mbuf types */
1855156756Ssam			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1856156756Ssam			       mtod(m, caddr_t), m->m_len);
1857156756Ssam			mprev->m_len += m->m_len;
1858156756Ssam			mprev->m_next = m->m_next;	/* unlink from chain */
1859156756Ssam			m_free(m);			/* reclaim mbuf */
1860156756Ssam#if 0
1861156756Ssam			newipsecstat.ips_clcoalesced++;
1862156756Ssam#endif
1863156756Ssam			continue;
1864156756Ssam		}
1865156756Ssam
1866156756Ssam		/*
1867156756Ssam		 * Allocate new space to hold the copy...
1868156756Ssam		 */
1869156756Ssam		/* XXX why can M_PKTHDR be set past the first mbuf? */
1870156756Ssam		if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
1871156756Ssam			/*
1872156756Ssam			 * NB: if a packet header is present we must
1873156756Ssam			 * allocate the mbuf separately from any cluster
1874156756Ssam			 * because M_MOVE_PKTHDR will smash the data
1875156756Ssam			 * pointer and drop the M_EXT marker.
1876156756Ssam			 */
1877156756Ssam			MGETHDR(n, how, m->m_type);
1878156756Ssam			if (n == NULL) {
1879156756Ssam				m_freem(m0);
1880156756Ssam				return (NULL);
1881156756Ssam			}
1882156756Ssam			M_MOVE_PKTHDR(n, m);
1883156756Ssam			MCLGET(n, how);
1884156756Ssam			if ((n->m_flags & M_EXT) == 0) {
1885156756Ssam				m_free(n);
1886156756Ssam				m_freem(m0);
1887156756Ssam				return (NULL);
1888156756Ssam			}
1889156756Ssam		} else {
1890156756Ssam			n = m_getcl(how, m->m_type, m->m_flags);
1891156756Ssam			if (n == NULL) {
1892156756Ssam				m_freem(m0);
1893156756Ssam				return (NULL);
1894156756Ssam			}
1895156756Ssam		}
1896156756Ssam		/*
1897156756Ssam		 * ... and copy the data.  We deal with jumbo mbufs
1898156756Ssam		 * (i.e. m_len > MCLBYTES) by splitting them into
1899156756Ssam		 * clusters.  We could just malloc a buffer and make
1900156756Ssam		 * it external but too many device drivers don't know
1901156756Ssam		 * how to break up the non-contiguous memory when
1902156756Ssam		 * doing DMA.
1903156756Ssam		 */
1904156756Ssam		len = m->m_len;
1905156756Ssam		off = 0;
1906156756Ssam		mfirst = n;
1907156756Ssam		mlast = NULL;
1908156756Ssam		for (;;) {
1909156756Ssam			int cc = min(len, MCLBYTES);
1910156756Ssam			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1911156756Ssam			n->m_len = cc;
1912156756Ssam			if (mlast != NULL)
1913156756Ssam				mlast->m_next = n;
1914156756Ssam			mlast = n;
1915156756Ssam#if 0
1916156756Ssam			newipsecstat.ips_clcopied++;
1917156756Ssam#endif
1918156756Ssam
1919156756Ssam			len -= cc;
1920156756Ssam			if (len <= 0)
1921156756Ssam				break;
1922156756Ssam			off += cc;
1923156756Ssam
1924156756Ssam			n = m_getcl(how, m->m_type, m->m_flags);
1925156756Ssam			if (n == NULL) {
1926156756Ssam				m_freem(mfirst);
1927156756Ssam				m_freem(m0);
1928156756Ssam				return (NULL);
1929156756Ssam			}
1930156756Ssam		}
1931156756Ssam		n->m_next = m->m_next;
1932156756Ssam		if (mprev == NULL)
1933156756Ssam			m0 = mfirst;		/* new head of chain */
1934156756Ssam		else
1935156756Ssam			mprev->m_next = mfirst;	/* replace old mbuf */
1936156756Ssam		m_free(m);			/* release old mbuf */
1937156756Ssam		mprev = mfirst;
1938156756Ssam	}
1939156756Ssam	return (m0);
1940156756Ssam}
1941