uipc_mbuf.c revision 175414
1139804Simp/*-
21541Srgrimes * Copyright (c) 1982, 1986, 1988, 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes *
51541Srgrimes * Redistribution and use in source and binary forms, with or without
61541Srgrimes * modification, are permitted provided that the following conditions
71541Srgrimes * are met:
81541Srgrimes * 1. Redistributions of source code must retain the above copyright
91541Srgrimes *    notice, this list of conditions and the following disclaimer.
101541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
111541Srgrimes *    notice, this list of conditions and the following disclaimer in the
121541Srgrimes *    documentation and/or other materials provided with the distribution.
131541Srgrimes * 4. Neither the name of the University nor the names of its contributors
141541Srgrimes *    may be used to endorse or promote products derived from this software
151541Srgrimes *    without specific prior written permission.
161541Srgrimes *
171541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
181541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
191541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
201541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
211541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
221541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
231541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
241541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
251541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
261541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
271541Srgrimes * SUCH DAMAGE.
281541Srgrimes *
291541Srgrimes *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
301541Srgrimes */
311541Srgrimes
32116182Sobrien#include <sys/cdefs.h>
33116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 175414 2008-01-17 21:25:09Z sam $");
34116182Sobrien
35101007Srwatson#include "opt_mac.h"
3677572Sobrien#include "opt_param.h"
37113490Ssilby#include "opt_mbuf_stress_test.h"
38101007Srwatson
391541Srgrimes#include <sys/param.h>
401541Srgrimes#include <sys/systm.h>
4176166Smarkm#include <sys/kernel.h>
42125296Ssilby#include <sys/limits.h>
4376166Smarkm#include <sys/lock.h>
4432036Sbde#include <sys/malloc.h>
451541Srgrimes#include <sys/mbuf.h>
4623081Swollman#include <sys/sysctl.h>
471541Srgrimes#include <sys/domain.h>
481541Srgrimes#include <sys/protosw.h>
49125296Ssilby#include <sys/uio.h>
5076166Smarkm
51163606Srwatson#include <security/mac/mac_framework.h>
52163606Srwatson
539759Sbdeint	max_linkhdr;
549759Sbdeint	max_protohdr;
559759Sbdeint	max_hdr;
569759Sbdeint	max_datalen;
57116455Ssilby#ifdef MBUF_STRESS_TEST
58112777Ssilbyint	m_defragpackets;
59112777Ssilbyint	m_defragbytes;
60112777Ssilbyint	m_defraguseless;
61112777Ssilbyint	m_defragfailure;
62113490Ssilbyint	m_defragrandomfailures;
63113490Ssilby#endif
641541Srgrimes
6566475Sbmilekic/*
6666475Sbmilekic * sysctl(8) exported objects
6766475Sbmilekic */
68155820SandreSYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
69155820Sandre	   &max_linkhdr, 0, "Size of largest link layer header");
70155820SandreSYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
71155820Sandre	   &max_protohdr, 0, "Size of largest protocol layer header");
72155820SandreSYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
73155820Sandre	   &max_hdr, 0, "Size of largest link plus protocol header");
74155820SandreSYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
75155820Sandre	   &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
76116455Ssilby#ifdef MBUF_STRESS_TEST
77112777SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
78112777Ssilby	   &m_defragpackets, 0, "");
79112777SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
80112777Ssilby	   &m_defragbytes, 0, "");
81112777SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
82112777Ssilby	   &m_defraguseless, 0, "");
83112777SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
84112777Ssilby	   &m_defragfailure, 0, "");
85113490SsilbySYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
86113490Ssilby	   &m_defragrandomfailures, 0, "");
87113490Ssilby#endif
8875112Sbmilekic
891541Srgrimes/*
90129906Sbmilekic * Allocate a given length worth of mbufs and/or clusters (whatever fits
91129906Sbmilekic * best) and return a pointer to the top of the allocated chain.  If an
92129906Sbmilekic * existing mbuf chain is provided, then we will append the new chain
93129906Sbmilekic * to the existing one but still return the top of the newly allocated
94129906Sbmilekic * chain.
95129906Sbmilekic */
96129906Sbmilekicstruct mbuf *
97163915Sandrem_getm2(struct mbuf *m, int len, int how, short type, int flags)
98129906Sbmilekic{
99163915Sandre	struct mbuf *mb, *nm = NULL, *mtail = NULL;
100129906Sbmilekic
101163915Sandre	KASSERT(len >= 0, ("%s: len is < 0", __func__));
102129906Sbmilekic
103163915Sandre	/* Validate flags. */
104163915Sandre	flags &= (M_PKTHDR | M_EOR);
105129906Sbmilekic
106163915Sandre	/* Packet header mbuf must be first in chain. */
107163915Sandre	if ((flags & M_PKTHDR) && m != NULL)
108163915Sandre		flags &= ~M_PKTHDR;
109129906Sbmilekic
110163915Sandre	/* Loop and append maximum sized mbufs to the chain tail. */
111163915Sandre	while (len > 0) {
112163915Sandre		if (len > MCLBYTES)
113163915Sandre			mb = m_getjcl(how, type, (flags & M_PKTHDR),
114163915Sandre			    MJUMPAGESIZE);
115163915Sandre		else if (len >= MINCLSIZE)
116163915Sandre			mb = m_getcl(how, type, (flags & M_PKTHDR));
117163915Sandre		else if (flags & M_PKTHDR)
118163915Sandre			mb = m_gethdr(how, type);
119129906Sbmilekic		else
120163915Sandre			mb = m_get(how, type);
121163915Sandre
122163915Sandre		/* Fail the whole operation if one mbuf can't be allocated. */
123163915Sandre		if (mb == NULL) {
124163915Sandre			if (nm != NULL)
125163915Sandre				m_freem(nm);
126163915Sandre			return (NULL);
127163915Sandre		}
128163915Sandre
129163915Sandre		/* Book keeping. */
130163915Sandre		len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
131163915Sandre			((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
132163915Sandre		if (mtail != NULL)
133163915Sandre			mtail->m_next = mb;
134163915Sandre		else
135163915Sandre			nm = mb;
136163915Sandre		mtail = mb;
137163915Sandre		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
138129906Sbmilekic	}
139163915Sandre	if (flags & M_EOR)
140163915Sandre		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
141129906Sbmilekic
142163915Sandre	/* If mbuf was supplied, append new chain to the end of it. */
143163915Sandre	if (m != NULL) {
144163915Sandre		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
145163915Sandre			;
146163915Sandre		mtail->m_next = nm;
147163915Sandre		mtail->m_flags &= ~M_EOR;
148163915Sandre	} else
149163915Sandre		m = nm;
150163915Sandre
151163915Sandre	return (m);
152129906Sbmilekic}
153129906Sbmilekic
154129906Sbmilekic/*
155129906Sbmilekic * Free an entire chain of mbufs and associated external buffers, if
156129906Sbmilekic * applicable.
157129906Sbmilekic */
158129906Sbmilekicvoid
159129906Sbmilekicm_freem(struct mbuf *mb)
160129906Sbmilekic{
161129906Sbmilekic
162129906Sbmilekic	while (mb != NULL)
163129906Sbmilekic		mb = m_free(mb);
164129906Sbmilekic}
165129906Sbmilekic
166129906Sbmilekic/*-
167129906Sbmilekic * Configure a provided mbuf to refer to the provided external storage
168129906Sbmilekic * buffer and setup a reference count for said buffer.  If the setting
169129906Sbmilekic * up of the reference count fails, the M_EXT bit will not be set.  If
170129906Sbmilekic * successfull, the M_EXT bit is set in the mbuf's flags.
171129906Sbmilekic *
172129906Sbmilekic * Arguments:
173129906Sbmilekic *    mb     The existing mbuf to which to attach the provided buffer.
174129906Sbmilekic *    buf    The address of the provided external storage buffer.
175129906Sbmilekic *    size   The size of the provided buffer.
176129906Sbmilekic *    freef  A pointer to a routine that is responsible for freeing the
177129906Sbmilekic *           provided external storage buffer.
178129906Sbmilekic *    args   A pointer to an argument structure (of any type) to be passed
179129906Sbmilekic *           to the provided freef routine (may be NULL).
180129906Sbmilekic *    flags  Any other flags to be passed to the provided mbuf.
181129906Sbmilekic *    type   The type that the external storage buffer should be
182129906Sbmilekic *           labeled with.
183129906Sbmilekic *
184129906Sbmilekic * Returns:
185129906Sbmilekic *    Nothing.
186129906Sbmilekic */
187129906Sbmilekicvoid
188129906Sbmilekicm_extadd(struct mbuf *mb, caddr_t buf, u_int size,
189129906Sbmilekic    void (*freef)(void *, void *), void *args, int flags, int type)
190129906Sbmilekic{
191151976Sandre	KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
192129906Sbmilekic
193151976Sandre	if (type != EXT_EXTREF)
194151976Sandre		mb->m_ext.ref_cnt = (u_int *)uma_zalloc(zone_ext_refcnt, M_NOWAIT);
195129906Sbmilekic	if (mb->m_ext.ref_cnt != NULL) {
196129906Sbmilekic		*(mb->m_ext.ref_cnt) = 1;
197129906Sbmilekic		mb->m_flags |= (M_EXT | flags);
198129906Sbmilekic		mb->m_ext.ext_buf = buf;
199129906Sbmilekic		mb->m_data = mb->m_ext.ext_buf;
200129906Sbmilekic		mb->m_ext.ext_size = size;
201129906Sbmilekic		mb->m_ext.ext_free = freef;
202129906Sbmilekic		mb->m_ext.ext_args = args;
203129906Sbmilekic		mb->m_ext.ext_type = type;
204129906Sbmilekic        }
205129906Sbmilekic}
206129906Sbmilekic
207129906Sbmilekic/*
208129906Sbmilekic * Non-directly-exported function to clean up after mbufs with M_EXT
209151976Sandre * storage attached to them if the reference count hits 1.
210129906Sbmilekic */
211129906Sbmilekicvoid
212129906Sbmilekicmb_free_ext(struct mbuf *m)
213129906Sbmilekic{
214172463Skmacy	int skipmbuf;
215172463Skmacy
216151976Sandre	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
217151976Sandre	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
218129906Sbmilekic
219172463Skmacy
220172463Skmacy	/*
221172463Skmacy	 * check if the header is embedded in the cluster
222172463Skmacy	 */
223172463Skmacy	skipmbuf = (m->m_flags & M_NOFREE);
224172463Skmacy
225151976Sandre	/* Free attached storage if this mbuf is the only reference to it. */
226151976Sandre	if (*(m->m_ext.ref_cnt) == 1 ||
227162515Srrs	    atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
228151976Sandre		switch (m->m_ext.ext_type) {
229152101Sandre		case EXT_PACKET:	/* The packet zone is special. */
230152035Sandre			if (*(m->m_ext.ref_cnt) == 0)
231152035Sandre				*(m->m_ext.ref_cnt) = 1;
232151976Sandre			uma_zfree(zone_pack, m);
233151976Sandre			return;		/* Job done. */
234152101Sandre		case EXT_CLUSTER:
235152101Sandre			uma_zfree(zone_clust, m->m_ext.ext_buf);
236130289Sbmilekic			break;
237155780Sandre		case EXT_JUMBOP:
238155780Sandre			uma_zfree(zone_jumbop, m->m_ext.ext_buf);
239153232Sandre			break;
240151976Sandre		case EXT_JUMBO9:
241151976Sandre			uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
242151976Sandre			break;
243151976Sandre		case EXT_JUMBO16:
244151976Sandre			uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
245151976Sandre			break;
246151976Sandre		case EXT_SFBUF:
247151976Sandre		case EXT_NET_DRV:
248151976Sandre		case EXT_MOD_TYPE:
249151976Sandre		case EXT_DISPOSABLE:
250151976Sandre			*(m->m_ext.ref_cnt) = 0;
251151976Sandre			uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
252151976Sandre				m->m_ext.ref_cnt));
253151976Sandre			/* FALLTHROUGH */
254151976Sandre		case EXT_EXTREF:
255151976Sandre			KASSERT(m->m_ext.ext_free != NULL,
256151976Sandre				("%s: ext_free not set", __func__));
257141668Sbmilekic			(*(m->m_ext.ext_free))(m->m_ext.ext_buf,
258141668Sbmilekic			    m->m_ext.ext_args);
259151976Sandre			break;
260151976Sandre		default:
261151976Sandre			KASSERT(m->m_ext.ext_type == 0,
262151976Sandre				("%s: unknown ext_type", __func__));
263141668Sbmilekic		}
264141668Sbmilekic	}
265172463Skmacy	if (skipmbuf)
266172463Skmacy		return;
267172463Skmacy
268151976Sandre	/*
269151976Sandre	 * Free this mbuf back to the mbuf zone with all m_ext
270151976Sandre	 * information purged.
271151976Sandre	 */
272151976Sandre	m->m_ext.ext_buf = NULL;
273151976Sandre	m->m_ext.ext_free = NULL;
274151976Sandre	m->m_ext.ext_args = NULL;
275151976Sandre	m->m_ext.ref_cnt = NULL;
276151976Sandre	m->m_ext.ext_size = 0;
277151976Sandre	m->m_ext.ext_type = 0;
278151976Sandre	m->m_flags &= ~M_EXT;
279130357Sbmilekic	uma_zfree(zone_mbuf, m);
280129906Sbmilekic}
281129906Sbmilekic
282129906Sbmilekic/*
283151976Sandre * Attach the the cluster from *m to *n, set up m_ext in *n
284151976Sandre * and bump the refcount of the cluster.
285151976Sandre */
286151976Sandrestatic void
287151976Sandremb_dupcl(struct mbuf *n, struct mbuf *m)
288151976Sandre{
289151976Sandre	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
290151976Sandre	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
291151976Sandre	KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
292151976Sandre
293151976Sandre	if (*(m->m_ext.ref_cnt) == 1)
294151976Sandre		*(m->m_ext.ref_cnt) += 1;
295151976Sandre	else
296151976Sandre		atomic_add_int(m->m_ext.ref_cnt, 1);
297151976Sandre	n->m_ext.ext_buf = m->m_ext.ext_buf;
298151976Sandre	n->m_ext.ext_free = m->m_ext.ext_free;
299151976Sandre	n->m_ext.ext_args = m->m_ext.ext_args;
300151976Sandre	n->m_ext.ext_size = m->m_ext.ext_size;
301151976Sandre	n->m_ext.ref_cnt = m->m_ext.ref_cnt;
302151976Sandre	n->m_ext.ext_type = m->m_ext.ext_type;
303151976Sandre	n->m_flags |= M_EXT;
304151976Sandre}
305151976Sandre
306151976Sandre/*
307149598Sandre * Clean up mbuf (chain) from any tags and packet headers.
308149647Sandre * If "all" is set then the first mbuf in the chain will be
309149647Sandre * cleaned too.
310149598Sandre */
311149598Sandrevoid
312149647Sandrem_demote(struct mbuf *m0, int all)
313149598Sandre{
314149598Sandre	struct mbuf *m;
315149598Sandre
316149598Sandre	for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
317149598Sandre		if (m->m_flags & M_PKTHDR) {
318149598Sandre			m_tag_delete_chain(m, NULL);
319149598Sandre			m->m_flags &= ~M_PKTHDR;
320149598Sandre			bzero(&m->m_pkthdr, sizeof(struct pkthdr));
321149598Sandre		}
322149643Sandre		if (m->m_type == MT_HEADER)
323149598Sandre			m->m_type = MT_DATA;
324149643Sandre		if (m != m0 && m->m_nextpkt != NULL)
325149598Sandre			m->m_nextpkt = NULL;
326149598Sandre		m->m_flags = m->m_flags & (M_EXT|M_EOR|M_RDONLY|M_FREELIST);
327149598Sandre	}
328149598Sandre}
329149598Sandre
330149598Sandre/*
331149648Sandre * Sanity checks on mbuf (chain) for use in KASSERT() and general
332149648Sandre * debugging.
333149648Sandre * Returns 0 or panics when bad and 1 on all tests passed.
334149648Sandre * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
335149648Sandre * blow up later.
336149599Sandre */
337149599Sandreint
338149599Sandrem_sanity(struct mbuf *m0, int sanitize)
339149599Sandre{
340149599Sandre	struct mbuf *m;
341149599Sandre	caddr_t a, b;
342149599Sandre	int pktlen = 0;
343149599Sandre
344168734Skmacy#ifdef INVARIANTS
345168734Skmacy#define	M_SANITY_ACTION(s)	panic("mbuf %p: " s, m)
346168734Skmacy#else
347168734Skmacy#define	M_SANITY_ACTION(s)	printf("mbuf %p: " s, m)
348168734Skmacy#endif
349149599Sandre
350149648Sandre	for (m = m0; m != NULL; m = m->m_next) {
351149599Sandre		/*
352149599Sandre		 * Basic pointer checks.  If any of these fails then some
353149599Sandre		 * unrelated kernel memory before or after us is trashed.
354149599Sandre		 * No way to recover from that.
355149599Sandre		 */
356149648Sandre		a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf :
357149648Sandre			((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) :
358149599Sandre			 (caddr_t)(&m->m_dat)) );
359149599Sandre		b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size :
360149648Sandre			((m->m_flags & M_PKTHDR) ? MHLEN : MLEN)));
361149599Sandre		if ((caddr_t)m->m_data < a)
362149599Sandre			M_SANITY_ACTION("m_data outside mbuf data range left");
363149599Sandre		if ((caddr_t)m->m_data > b)
364149599Sandre			M_SANITY_ACTION("m_data outside mbuf data range right");
365149599Sandre		if ((caddr_t)m->m_data + m->m_len > b)
366149599Sandre			M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
367149648Sandre		if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.header) {
368149599Sandre			if ((caddr_t)m->m_pkthdr.header < a ||
369149599Sandre			    (caddr_t)m->m_pkthdr.header > b)
370149599Sandre				M_SANITY_ACTION("m_pkthdr.header outside mbuf data range");
371149599Sandre		}
372149599Sandre
373149599Sandre		/* m->m_nextpkt may only be set on first mbuf in chain. */
374149648Sandre		if (m != m0 && m->m_nextpkt != NULL) {
375149599Sandre			if (sanitize) {
376149599Sandre				m_freem(m->m_nextpkt);
377149599Sandre				m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
378149599Sandre			} else
379149599Sandre				M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
380149599Sandre		}
381149599Sandre
382149599Sandre		/* packet length (not mbuf length!) calculation */
383149599Sandre		if (m0->m_flags & M_PKTHDR)
384149599Sandre			pktlen += m->m_len;
385149599Sandre
386149599Sandre		/* m_tags may only be attached to first mbuf in chain. */
387149599Sandre		if (m != m0 && m->m_flags & M_PKTHDR &&
388149599Sandre		    !SLIST_EMPTY(&m->m_pkthdr.tags)) {
389149599Sandre			if (sanitize) {
390149599Sandre				m_tag_delete_chain(m, NULL);
391149599Sandre				/* put in 0xDEADC0DE perhaps? */
392149648Sandre			} else
393149599Sandre				M_SANITY_ACTION("m_tags on in-chain mbuf");
394149599Sandre		}
395149599Sandre
396149599Sandre		/* M_PKTHDR may only be set on first mbuf in chain */
397149599Sandre		if (m != m0 && m->m_flags & M_PKTHDR) {
398149599Sandre			if (sanitize) {
399149599Sandre				bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
400149599Sandre				m->m_flags &= ~M_PKTHDR;
401149599Sandre				/* put in 0xDEADCODE and leave hdr flag in */
402149599Sandre			} else
403149599Sandre				M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
404149599Sandre		}
405149599Sandre	}
406149648Sandre	m = m0;
407149648Sandre	if (pktlen && pktlen != m->m_pkthdr.len) {
408149599Sandre		if (sanitize)
409149648Sandre			m->m_pkthdr.len = 0;
410149599Sandre		else
411149599Sandre			M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
412149599Sandre	}
413149648Sandre	return 1;
414149648Sandre
415149599Sandre#undef	M_SANITY_ACTION
416149599Sandre}
417149599Sandre
418149599Sandre
419149599Sandre/*
420108466Ssam * "Move" mbuf pkthdr from "from" to "to".
421100960Srwatson * "from" must have M_PKTHDR set, and "to" must be empty.
422100960Srwatson */
423100960Srwatsonvoid
424108466Ssamm_move_pkthdr(struct mbuf *to, struct mbuf *from)
425100960Srwatson{
426100960Srwatson
427100960Srwatson#if 0
428108466Ssam	/* see below for why these are not enabled */
429113255Sdes	M_ASSERTPKTHDR(to);
430113487Srwatson	/* Note: with MAC, this may not be a good assertion. */
431108466Ssam	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
432108466Ssam	    ("m_move_pkthdr: to has tags"));
433100960Srwatson#endif
434101007Srwatson#ifdef MAC
435113487Srwatson	/*
436113487Srwatson	 * XXXMAC: It could be this should also occur for non-MAC?
437113487Srwatson	 */
438101007Srwatson	if (to->m_flags & M_PKTHDR)
439113487Srwatson		m_tag_delete_chain(to, NULL);
440101007Srwatson#endif
441143302Ssam	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
442143302Ssam	if ((to->m_flags & M_EXT) == 0)
443143302Ssam		to->m_data = to->m_pktdat;
444108466Ssam	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
445108466Ssam	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
446108466Ssam	from->m_flags &= ~M_PKTHDR;
447108466Ssam}
448108466Ssam
449108466Ssam/*
450108466Ssam * Duplicate "from"'s mbuf pkthdr in "to".
451108466Ssam * "from" must have M_PKTHDR set, and "to" must be empty.
452108466Ssam * In particular, this does a deep copy of the packet tags.
453108466Ssam */
454108466Ssamint
455108466Ssamm_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
456108466Ssam{
457108466Ssam
458108466Ssam#if 0
459108466Ssam	/*
460108466Ssam	 * The mbuf allocator only initializes the pkthdr
461108466Ssam	 * when the mbuf is allocated with MGETHDR. Many users
462108466Ssam	 * (e.g. m_copy*, m_prepend) use MGET and then
463108466Ssam	 * smash the pkthdr as needed causing these
464108466Ssam	 * assertions to trip.  For now just disable them.
465108466Ssam	 */
466113255Sdes	M_ASSERTPKTHDR(to);
467113487Srwatson	/* Note: with MAC, this may not be a good assertion. */
468108466Ssam	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
469108466Ssam#endif
470132488Salfred	MBUF_CHECKSLEEP(how);
471108466Ssam#ifdef MAC
472108466Ssam	if (to->m_flags & M_PKTHDR)
473113487Srwatson		m_tag_delete_chain(to, NULL);
474108466Ssam#endif
475112733Ssilby	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
476112733Ssilby	if ((to->m_flags & M_EXT) == 0)
477112733Ssilby		to->m_data = to->m_pktdat;
478100960Srwatson	to->m_pkthdr = from->m_pkthdr;
479108466Ssam	SLIST_INIT(&to->m_pkthdr.tags);
480113480Srwatson	return (m_tag_copy_chain(to, from, MBTOM(how)));
481100960Srwatson}
482100960Srwatson
483100960Srwatson/*
4841541Srgrimes * Lesser-used path for M_PREPEND:
4851541Srgrimes * allocate new mbuf to prepend to chain,
4861541Srgrimes * copy junk along.
4871541Srgrimes */
4881541Srgrimesstruct mbuf *
48972356Sbmilekicm_prepend(struct mbuf *m, int len, int how)
4901541Srgrimes{
4911541Srgrimes	struct mbuf *mn;
4921541Srgrimes
493117770Ssilby	if (m->m_flags & M_PKTHDR)
494117770Ssilby		MGETHDR(mn, how, m->m_type);
495117770Ssilby	else
496117770Ssilby		MGET(mn, how, m->m_type);
49772356Sbmilekic	if (mn == NULL) {
4981541Srgrimes		m_freem(m);
49972356Sbmilekic		return (NULL);
5001541Srgrimes	}
501113487Srwatson	if (m->m_flags & M_PKTHDR)
502108466Ssam		M_MOVE_PKTHDR(mn, m);
5031541Srgrimes	mn->m_next = m;
5041541Srgrimes	m = mn;
505165447Srrs	if(m->m_flags & M_PKTHDR) {
506165447Srrs		if (len < MHLEN)
507165447Srrs			MH_ALIGN(m, len);
508165447Srrs	} else {
509165447Srrs		if (len < MLEN)
510165447Srrs			M_ALIGN(m, len);
511165447Srrs	}
5121541Srgrimes	m->m_len = len;
5131541Srgrimes	return (m);
5141541Srgrimes}
5151541Srgrimes
5161541Srgrimes/*
5171541Srgrimes * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
5181541Srgrimes * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
519111119Simp * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
52054002Sarchie * Note that the copy is read-only, because clusters are not copied,
52154002Sarchie * only their reference counts are incremented.
5221541Srgrimes */
5231541Srgrimesstruct mbuf *
52472356Sbmilekicm_copym(struct mbuf *m, int off0, int len, int wait)
5251541Srgrimes{
52672356Sbmilekic	struct mbuf *n, **np;
52772356Sbmilekic	int off = off0;
5281541Srgrimes	struct mbuf *top;
5291541Srgrimes	int copyhdr = 0;
5301541Srgrimes
53152201Salfred	KASSERT(off >= 0, ("m_copym, negative off %d", off));
53252201Salfred	KASSERT(len >= 0, ("m_copym, negative len %d", len));
533132488Salfred	MBUF_CHECKSLEEP(wait);
5341541Srgrimes	if (off == 0 && m->m_flags & M_PKTHDR)
5351541Srgrimes		copyhdr = 1;
5361541Srgrimes	while (off > 0) {
53752201Salfred		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
5381541Srgrimes		if (off < m->m_len)
5391541Srgrimes			break;
5401541Srgrimes		off -= m->m_len;
5411541Srgrimes		m = m->m_next;
5421541Srgrimes	}
5431541Srgrimes	np = &top;
5441541Srgrimes	top = 0;
5451541Srgrimes	while (len > 0) {
54672356Sbmilekic		if (m == NULL) {
54752201Salfred			KASSERT(len == M_COPYALL,
54852201Salfred			    ("m_copym, length > size of mbuf chain"));
5491541Srgrimes			break;
5501541Srgrimes		}
551117770Ssilby		if (copyhdr)
552117770Ssilby			MGETHDR(n, wait, m->m_type);
553117770Ssilby		else
554117770Ssilby			MGET(n, wait, m->m_type);
5551541Srgrimes		*np = n;
55672356Sbmilekic		if (n == NULL)
5571541Srgrimes			goto nospace;
5581541Srgrimes		if (copyhdr) {
559108466Ssam			if (!m_dup_pkthdr(n, m, wait))
560108466Ssam				goto nospace;
5611541Srgrimes			if (len == M_COPYALL)
5621541Srgrimes				n->m_pkthdr.len -= off0;
5631541Srgrimes			else
5641541Srgrimes				n->m_pkthdr.len = len;
5651541Srgrimes			copyhdr = 0;
5661541Srgrimes		}
5671541Srgrimes		n->m_len = min(len, m->m_len - off);
5681541Srgrimes		if (m->m_flags & M_EXT) {
5691541Srgrimes			n->m_data = m->m_data + off;
570151976Sandre			mb_dupcl(n, m);
5711541Srgrimes		} else
5721541Srgrimes			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
573103569Sbmilekic			    (u_int)n->m_len);
5741541Srgrimes		if (len != M_COPYALL)
5751541Srgrimes			len -= n->m_len;
5761541Srgrimes		off = 0;
5771541Srgrimes		m = m->m_next;
5781541Srgrimes		np = &n->m_next;
5791541Srgrimes	}
58078592Sbmilekic	if (top == NULL)
58178592Sbmilekic		mbstat.m_mcfail++;	/* XXX: No consistency. */
58278592Sbmilekic
5831541Srgrimes	return (top);
5841541Srgrimesnospace:
5851541Srgrimes	m_freem(top);
58678592Sbmilekic	mbstat.m_mcfail++;	/* XXX: No consistency. */
58772356Sbmilekic	return (NULL);
5881541Srgrimes}
5891541Srgrimes
5901541Srgrimes/*
591149602Sandre * Returns mbuf chain with new head for the prepending case.
592149602Sandre * Copies from mbuf (chain) n from off for len to mbuf (chain) m
593149602Sandre * either prepending or appending the data.
594149602Sandre * The resulting mbuf (chain) m is fully writeable.
595149602Sandre * m is destination (is made writeable)
596149602Sandre * n is source, off is offset in source, len is len from offset
597149602Sandre * dir, 0 append, 1 prepend
598149602Sandre * how, wait or nowait
599149602Sandre */
600149602Sandre
601149602Sandrestatic int
602149602Sandrem_bcopyxxx(void *s, void *t, u_int len)
603149602Sandre{
604149602Sandre	bcopy(s, t, (size_t)len);
605149602Sandre	return 0;
606149602Sandre}
607149602Sandre
608149602Sandrestruct mbuf *
609149602Sandrem_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
610149602Sandre    int prep, int how)
611149602Sandre{
612151976Sandre	struct mbuf *mm, *x, *z, *prev = NULL;
613149602Sandre	caddr_t p;
614151976Sandre	int i, nlen = 0;
615149602Sandre	caddr_t buf[MLEN];
616149602Sandre
617149602Sandre	KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
618149602Sandre	KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
619149602Sandre	KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
620149602Sandre	KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
621149602Sandre
622151976Sandre	mm = m;
623151976Sandre	if (!prep) {
624151976Sandre		while(mm->m_next) {
625151976Sandre			prev = mm;
626151976Sandre			mm = mm->m_next;
627149602Sandre		}
628149602Sandre	}
629149602Sandre	for (z = n; z != NULL; z = z->m_next)
630149602Sandre		nlen += z->m_len;
631149602Sandre	if (len == M_COPYALL)
632149602Sandre		len = nlen - off;
633149602Sandre	if (off + len > nlen || len < 1)
634149602Sandre		return NULL;
635149602Sandre
636151976Sandre	if (!M_WRITABLE(mm)) {
637151976Sandre		/* XXX: Use proper m_xxx function instead. */
638151976Sandre		x = m_getcl(how, MT_DATA, mm->m_flags);
639151976Sandre		if (x == NULL)
640151976Sandre			return NULL;
641151976Sandre		bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
642151976Sandre		p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
643151976Sandre		x->m_data = p;
644151976Sandre		mm->m_next = NULL;
645151976Sandre		if (mm != m)
646151976Sandre			prev->m_next = x;
647151976Sandre		m_free(mm);
648151976Sandre		mm = x;
649151976Sandre	}
650151976Sandre
651149602Sandre	/*
652149602Sandre	 * Append/prepend the data.  Allocating mbufs as necessary.
653149602Sandre	 */
654149602Sandre	/* Shortcut if enough free space in first/last mbuf. */
655149602Sandre	if (!prep && M_TRAILINGSPACE(mm) >= len) {
656149602Sandre		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
657149602Sandre			 mm->m_len);
658149602Sandre		mm->m_len += len;
659149602Sandre		mm->m_pkthdr.len += len;
660149602Sandre		return m;
661149602Sandre	}
662149602Sandre	if (prep && M_LEADINGSPACE(mm) >= len) {
663149602Sandre		mm->m_data = mtod(mm, caddr_t) - len;
664149602Sandre		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t));
665149602Sandre		mm->m_len += len;
666149602Sandre		mm->m_pkthdr.len += len;
667149602Sandre		return mm;
668149602Sandre	}
669149602Sandre
670149602Sandre	/* Expand first/last mbuf to cluster if possible. */
671149602Sandre	if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) {
672149602Sandre		bcopy(mm->m_data, &buf, mm->m_len);
673149602Sandre		m_clget(mm, how);
674149602Sandre		if (!(mm->m_flags & M_EXT))
675149602Sandre			return NULL;
676149602Sandre		bcopy(&buf, mm->m_ext.ext_buf, mm->m_len);
677149602Sandre		mm->m_data = mm->m_ext.ext_buf;
678149602Sandre		mm->m_pkthdr.header = NULL;
679149602Sandre	}
680149602Sandre	if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) {
681149602Sandre		bcopy(mm->m_data, &buf, mm->m_len);
682149602Sandre		m_clget(mm, how);
683149602Sandre		if (!(mm->m_flags & M_EXT))
684149602Sandre			return NULL;
685149602Sandre		bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf +
686149602Sandre		       mm->m_ext.ext_size - mm->m_len, mm->m_len);
687149602Sandre		mm->m_data = (caddr_t)mm->m_ext.ext_buf +
688149602Sandre			      mm->m_ext.ext_size - mm->m_len;
689149602Sandre		mm->m_pkthdr.header = NULL;
690149602Sandre	}
691149602Sandre
692149602Sandre	/* Append/prepend as many mbuf (clusters) as necessary to fit len. */
693149602Sandre	if (!prep && len > M_TRAILINGSPACE(mm)) {
694149602Sandre		if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA))
695149602Sandre			return NULL;
696149602Sandre	}
697149602Sandre	if (prep && len > M_LEADINGSPACE(mm)) {
698149602Sandre		if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA)))
699149602Sandre			return NULL;
700149602Sandre		i = 0;
701149602Sandre		for (x = z; x != NULL; x = x->m_next) {
702149602Sandre			i += x->m_flags & M_EXT ? x->m_ext.ext_size :
703149602Sandre			      (x->m_flags & M_PKTHDR ? MHLEN : MLEN);
704149602Sandre			if (!x->m_next)
705149602Sandre				break;
706149602Sandre		}
707149602Sandre		z->m_data += i - len;
708149602Sandre		m_move_pkthdr(mm, z);
709149602Sandre		x->m_next = mm;
710149602Sandre		mm = z;
711149602Sandre	}
712149602Sandre
713149602Sandre	/* Seek to start position in source mbuf. Optimization for long chains. */
714149602Sandre	while (off > 0) {
715149602Sandre		if (off < n->m_len)
716149602Sandre			break;
717149602Sandre		off -= n->m_len;
718149602Sandre		n = n->m_next;
719149602Sandre	}
720149602Sandre
721149602Sandre	/* Copy data into target mbuf. */
722149602Sandre	z = mm;
723149602Sandre	while (len > 0) {
724149602Sandre		KASSERT(z != NULL, ("m_copymdata, falling off target edge"));
725149602Sandre		i = M_TRAILINGSPACE(z);
726149602Sandre		m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len);
727149602Sandre		z->m_len += i;
728149602Sandre		/* fixup pkthdr.len if necessary */
729149602Sandre		if ((prep ? mm : m)->m_flags & M_PKTHDR)
730149602Sandre			(prep ? mm : m)->m_pkthdr.len += i;
731149602Sandre		off += i;
732149602Sandre		len -= i;
733149602Sandre		z = z->m_next;
734149602Sandre	}
735149602Sandre	return (prep ? mm : m);
736149602Sandre}
737149602Sandre
738149602Sandre/*
73915689Swollman * Copy an entire packet, including header (which must be present).
74015689Swollman * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
74154002Sarchie * Note that the copy is read-only, because clusters are not copied,
74254002Sarchie * only their reference counts are incremented.
74372750Sluigi * Preserve alignment of the first mbuf so if the creator has left
74472750Sluigi * some room at the beginning (e.g. for inserting protocol headers)
74572750Sluigi * the copies still have the room available.
74615689Swollman */
74715689Swollmanstruct mbuf *
74872356Sbmilekicm_copypacket(struct mbuf *m, int how)
74915689Swollman{
75015689Swollman	struct mbuf *top, *n, *o;
75115689Swollman
752132488Salfred	MBUF_CHECKSLEEP(how);
75315689Swollman	MGET(n, how, m->m_type);
75415689Swollman	top = n;
75572356Sbmilekic	if (n == NULL)
75615689Swollman		goto nospace;
75715689Swollman
758108466Ssam	if (!m_dup_pkthdr(n, m, how))
759108466Ssam		goto nospace;
76015689Swollman	n->m_len = m->m_len;
76115689Swollman	if (m->m_flags & M_EXT) {
76215689Swollman		n->m_data = m->m_data;
763151976Sandre		mb_dupcl(n, m);
76415689Swollman	} else {
76572750Sluigi		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
76615689Swollman		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
76715689Swollman	}
76815689Swollman
76915689Swollman	m = m->m_next;
77015689Swollman	while (m) {
77115689Swollman		MGET(o, how, m->m_type);
77272356Sbmilekic		if (o == NULL)
77315689Swollman			goto nospace;
77415689Swollman
77515689Swollman		n->m_next = o;
77615689Swollman		n = n->m_next;
77715689Swollman
77815689Swollman		n->m_len = m->m_len;
77915689Swollman		if (m->m_flags & M_EXT) {
78015689Swollman			n->m_data = m->m_data;
781151976Sandre			mb_dupcl(n, m);
78215689Swollman		} else {
78315689Swollman			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
78415689Swollman		}
78515689Swollman
78615689Swollman		m = m->m_next;
78715689Swollman	}
78815689Swollman	return top;
78915689Swollmannospace:
79015689Swollman	m_freem(top);
79178592Sbmilekic	mbstat.m_mcfail++;	/* XXX: No consistency. */
79272356Sbmilekic	return (NULL);
79315689Swollman}
79415689Swollman
79515689Swollman/*
7961541Srgrimes * Copy data from an mbuf chain starting "off" bytes from the beginning,
7971541Srgrimes * continuing for "len" bytes, into the indicated buffer.
7981541Srgrimes */
7991549Srgrimesvoid
80081907Sjulianm_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
8011541Srgrimes{
802103569Sbmilekic	u_int count;
8031541Srgrimes
80452201Salfred	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
80552201Salfred	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
8061541Srgrimes	while (off > 0) {
80752201Salfred		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
8081541Srgrimes		if (off < m->m_len)
8091541Srgrimes			break;
8101541Srgrimes		off -= m->m_len;
8111541Srgrimes		m = m->m_next;
8121541Srgrimes	}
8131541Srgrimes	while (len > 0) {
81452201Salfred		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
8151541Srgrimes		count = min(m->m_len - off, len);
8161541Srgrimes		bcopy(mtod(m, caddr_t) + off, cp, count);
8171541Srgrimes		len -= count;
8181541Srgrimes		cp += count;
8191541Srgrimes		off = 0;
8201541Srgrimes		m = m->m_next;
8211541Srgrimes	}
8221541Srgrimes}
8231541Srgrimes
8241541Srgrimes/*
82554002Sarchie * Copy a packet header mbuf chain into a completely new chain, including
82654002Sarchie * copying any mbuf clusters.  Use this instead of m_copypacket() when
82754002Sarchie * you need a writable copy of an mbuf chain.
82854002Sarchie */
82954002Sarchiestruct mbuf *
83072356Sbmilekicm_dup(struct mbuf *m, int how)
83154002Sarchie{
83254002Sarchie	struct mbuf **p, *top = NULL;
83354002Sarchie	int remain, moff, nsize;
83454002Sarchie
835132488Salfred	MBUF_CHECKSLEEP(how);
83654002Sarchie	/* Sanity check */
83754002Sarchie	if (m == NULL)
83872356Sbmilekic		return (NULL);
839113255Sdes	M_ASSERTPKTHDR(m);
84054002Sarchie
84154002Sarchie	/* While there's more data, get a new mbuf, tack it on, and fill it */
84254002Sarchie	remain = m->m_pkthdr.len;
84354002Sarchie	moff = 0;
84454002Sarchie	p = &top;
84554002Sarchie	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
84654002Sarchie		struct mbuf *n;
84754002Sarchie
84854002Sarchie		/* Get the next new mbuf */
849129906Sbmilekic		if (remain >= MINCLSIZE) {
850129906Sbmilekic			n = m_getcl(how, m->m_type, 0);
851129906Sbmilekic			nsize = MCLBYTES;
852129906Sbmilekic		} else {
853129906Sbmilekic			n = m_get(how, m->m_type);
854129906Sbmilekic			nsize = MLEN;
855129906Sbmilekic		}
85654002Sarchie		if (n == NULL)
85754002Sarchie			goto nospace;
858129906Sbmilekic
859129906Sbmilekic		if (top == NULL) {		/* First one, must be PKTHDR */
860129906Sbmilekic			if (!m_dup_pkthdr(n, m, how)) {
861129906Sbmilekic				m_free(n);
862108466Ssam				goto nospace;
863129906Sbmilekic			}
864153428Semaste			if ((n->m_flags & M_EXT) == 0)
865153428Semaste				nsize = MHLEN;
86654002Sarchie		}
86754002Sarchie		n->m_len = 0;
86854002Sarchie
86954002Sarchie		/* Link it into the new chain */
87054002Sarchie		*p = n;
87154002Sarchie		p = &n->m_next;
87254002Sarchie
87354002Sarchie		/* Copy data from original mbuf(s) into new mbuf */
87454002Sarchie		while (n->m_len < nsize && m != NULL) {
87554002Sarchie			int chunk = min(nsize - n->m_len, m->m_len - moff);
87654002Sarchie
87754002Sarchie			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
87854002Sarchie			moff += chunk;
87954002Sarchie			n->m_len += chunk;
88054002Sarchie			remain -= chunk;
88154002Sarchie			if (moff == m->m_len) {
88254002Sarchie				m = m->m_next;
88354002Sarchie				moff = 0;
88454002Sarchie			}
88554002Sarchie		}
88654002Sarchie
88754002Sarchie		/* Check correct total mbuf length */
88854002Sarchie		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
88987594Sobrien		    	("%s: bogus m_pkthdr.len", __func__));
89054002Sarchie	}
89154002Sarchie	return (top);
89254002Sarchie
89354002Sarchienospace:
89454002Sarchie	m_freem(top);
89578592Sbmilekic	mbstat.m_mcfail++;	/* XXX: No consistency. */
89672356Sbmilekic	return (NULL);
89754002Sarchie}
89854002Sarchie
89954002Sarchie/*
9001541Srgrimes * Concatenate mbuf chain n to m.
9011541Srgrimes * Both chains must be of the same type (e.g. MT_DATA).
9021541Srgrimes * Any m_pkthdr is not updated.
9031541Srgrimes */
9041549Srgrimesvoid
90572356Sbmilekicm_cat(struct mbuf *m, struct mbuf *n)
9061541Srgrimes{
9071541Srgrimes	while (m->m_next)
9081541Srgrimes		m = m->m_next;
9091541Srgrimes	while (n) {
9101541Srgrimes		if (m->m_flags & M_EXT ||
9111541Srgrimes		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
9121541Srgrimes			/* just join the two chains */
9131541Srgrimes			m->m_next = n;
9141541Srgrimes			return;
9151541Srgrimes		}
9161541Srgrimes		/* splat the data from one into the other */
9171541Srgrimes		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
9181541Srgrimes		    (u_int)n->m_len);
9191541Srgrimes		m->m_len += n->m_len;
9201541Srgrimes		n = m_free(n);
9211541Srgrimes	}
9221541Srgrimes}
9231541Srgrimes
9241549Srgrimesvoid
92572356Sbmilekicm_adj(struct mbuf *mp, int req_len)
9261541Srgrimes{
92772356Sbmilekic	int len = req_len;
92872356Sbmilekic	struct mbuf *m;
92972356Sbmilekic	int count;
9301541Srgrimes
9311541Srgrimes	if ((m = mp) == NULL)
9321541Srgrimes		return;
9331541Srgrimes	if (len >= 0) {
9341541Srgrimes		/*
9351541Srgrimes		 * Trim from head.
9361541Srgrimes		 */
9371541Srgrimes		while (m != NULL && len > 0) {
9381541Srgrimes			if (m->m_len <= len) {
9391541Srgrimes				len -= m->m_len;
9401541Srgrimes				m->m_len = 0;
9411541Srgrimes				m = m->m_next;
9421541Srgrimes			} else {
9431541Srgrimes				m->m_len -= len;
9441541Srgrimes				m->m_data += len;
9451541Srgrimes				len = 0;
9461541Srgrimes			}
9471541Srgrimes		}
9481541Srgrimes		m = mp;
9491541Srgrimes		if (mp->m_flags & M_PKTHDR)
9501541Srgrimes			m->m_pkthdr.len -= (req_len - len);
9511541Srgrimes	} else {
9521541Srgrimes		/*
9531541Srgrimes		 * Trim from tail.  Scan the mbuf chain,
9541541Srgrimes		 * calculating its length and finding the last mbuf.
9551541Srgrimes		 * If the adjustment only affects this mbuf, then just
9561541Srgrimes		 * adjust and return.  Otherwise, rescan and truncate
9571541Srgrimes		 * after the remaining size.
9581541Srgrimes		 */
9591541Srgrimes		len = -len;
9601541Srgrimes		count = 0;
9611541Srgrimes		for (;;) {
9621541Srgrimes			count += m->m_len;
9631541Srgrimes			if (m->m_next == (struct mbuf *)0)
9641541Srgrimes				break;
9651541Srgrimes			m = m->m_next;
9661541Srgrimes		}
9671541Srgrimes		if (m->m_len >= len) {
9681541Srgrimes			m->m_len -= len;
9691541Srgrimes			if (mp->m_flags & M_PKTHDR)
9701541Srgrimes				mp->m_pkthdr.len -= len;
9711541Srgrimes			return;
9721541Srgrimes		}
9731541Srgrimes		count -= len;
9741541Srgrimes		if (count < 0)
9751541Srgrimes			count = 0;
9761541Srgrimes		/*
9771541Srgrimes		 * Correct length for chain is "count".
9781541Srgrimes		 * Find the mbuf with last data, adjust its length,
9791541Srgrimes		 * and toss data from remaining mbufs on chain.
9801541Srgrimes		 */
9811541Srgrimes		m = mp;
9821541Srgrimes		if (m->m_flags & M_PKTHDR)
9831541Srgrimes			m->m_pkthdr.len = count;
9841541Srgrimes		for (; m; m = m->m_next) {
9851541Srgrimes			if (m->m_len >= count) {
9861541Srgrimes				m->m_len = count;
987142350Ssam				if (m->m_next != NULL) {
988142350Ssam					m_freem(m->m_next);
989142350Ssam					m->m_next = NULL;
990142350Ssam				}
9911541Srgrimes				break;
9921541Srgrimes			}
9931541Srgrimes			count -= m->m_len;
9941541Srgrimes		}
9951541Srgrimes	}
9961541Srgrimes}
9971541Srgrimes
9981541Srgrimes/*
9991541Srgrimes * Rearange an mbuf chain so that len bytes are contiguous
10001541Srgrimes * and in the data area of an mbuf (so that mtod and dtom
10011541Srgrimes * will work for a structure of size len).  Returns the resulting
10021541Srgrimes * mbuf chain on success, frees it and returns null on failure.
10031541Srgrimes * If there is room, it will add up to max_protohdr-len extra bytes to the
10041541Srgrimes * contiguous region in an attempt to avoid being called next time.
10051541Srgrimes */
10061541Srgrimesstruct mbuf *
100772356Sbmilekicm_pullup(struct mbuf *n, int len)
10081541Srgrimes{
100972356Sbmilekic	struct mbuf *m;
101072356Sbmilekic	int count;
10111541Srgrimes	int space;
10121541Srgrimes
10131541Srgrimes	/*
10141541Srgrimes	 * If first mbuf has no cluster, and has room for len bytes
10151541Srgrimes	 * without shifting current data, pullup into it,
10161541Srgrimes	 * otherwise allocate a new mbuf to prepend to the chain.
10171541Srgrimes	 */
10181541Srgrimes	if ((n->m_flags & M_EXT) == 0 &&
10191541Srgrimes	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
10201541Srgrimes		if (n->m_len >= len)
10211541Srgrimes			return (n);
10221541Srgrimes		m = n;
10231541Srgrimes		n = n->m_next;
10241541Srgrimes		len -= m->m_len;
10251541Srgrimes	} else {
10261541Srgrimes		if (len > MHLEN)
10271541Srgrimes			goto bad;
1028111119Simp		MGET(m, M_DONTWAIT, n->m_type);
102972356Sbmilekic		if (m == NULL)
10301541Srgrimes			goto bad;
10311541Srgrimes		m->m_len = 0;
1032108466Ssam		if (n->m_flags & M_PKTHDR)
1033108466Ssam			M_MOVE_PKTHDR(m, n);
10341541Srgrimes	}
10351541Srgrimes	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
10361541Srgrimes	do {
10371541Srgrimes		count = min(min(max(len, max_protohdr), space), n->m_len);
10381541Srgrimes		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1039103569Sbmilekic		  (u_int)count);
10401541Srgrimes		len -= count;
10411541Srgrimes		m->m_len += count;
10421541Srgrimes		n->m_len -= count;
10431541Srgrimes		space -= count;
10441541Srgrimes		if (n->m_len)
10451541Srgrimes			n->m_data += count;
10461541Srgrimes		else
10471541Srgrimes			n = m_free(n);
10481541Srgrimes	} while (len > 0 && n);
10491541Srgrimes	if (len > 0) {
10501541Srgrimes		(void) m_free(m);
10511541Srgrimes		goto bad;
10521541Srgrimes	}
10531541Srgrimes	m->m_next = n;
10541541Srgrimes	return (m);
10551541Srgrimesbad:
10561541Srgrimes	m_freem(n);
105778592Sbmilekic	mbstat.m_mpfail++;	/* XXX: No consistency. */
105872356Sbmilekic	return (NULL);
10591541Srgrimes}
10601541Srgrimes
10611541Srgrimes/*
1062143761Sjmg * Like m_pullup(), except a new mbuf is always allocated, and we allow
1063143761Sjmg * the amount of empty space before the data in the new mbuf to be specified
1064143761Sjmg * (in the event that the caller expects to prepend later).
1065143761Sjmg */
1066143761Sjmgint MSFail;
1067143761Sjmg
1068143761Sjmgstruct mbuf *
1069143761Sjmgm_copyup(struct mbuf *n, int len, int dstoff)
1070143761Sjmg{
1071143761Sjmg	struct mbuf *m;
1072143761Sjmg	int count, space;
1073143761Sjmg
1074143761Sjmg	if (len > (MHLEN - dstoff))
1075143761Sjmg		goto bad;
1076143761Sjmg	MGET(m, M_DONTWAIT, n->m_type);
1077143761Sjmg	if (m == NULL)
1078143761Sjmg		goto bad;
1079143761Sjmg	m->m_len = 0;
1080143761Sjmg	if (n->m_flags & M_PKTHDR)
1081143761Sjmg		M_MOVE_PKTHDR(m, n);
1082143761Sjmg	m->m_data += dstoff;
1083143761Sjmg	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1084143761Sjmg	do {
1085143761Sjmg		count = min(min(max(len, max_protohdr), space), n->m_len);
1086143761Sjmg		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1087143761Sjmg		    (unsigned)count);
1088143761Sjmg		len -= count;
1089143761Sjmg		m->m_len += count;
1090143761Sjmg		n->m_len -= count;
1091143761Sjmg		space -= count;
1092143761Sjmg		if (n->m_len)
1093143761Sjmg			n->m_data += count;
1094143761Sjmg		else
1095143761Sjmg			n = m_free(n);
1096143761Sjmg	} while (len > 0 && n);
1097143761Sjmg	if (len > 0) {
1098143761Sjmg		(void) m_free(m);
1099143761Sjmg		goto bad;
1100143761Sjmg	}
1101143761Sjmg	m->m_next = n;
1102143761Sjmg	return (m);
1103143761Sjmg bad:
1104143761Sjmg	m_freem(n);
1105143761Sjmg	MSFail++;
1106143761Sjmg	return (NULL);
1107143761Sjmg}
1108143761Sjmg
1109143761Sjmg/*
11101541Srgrimes * Partition an mbuf chain in two pieces, returning the tail --
11111541Srgrimes * all but the first len0 bytes.  In case of failure, it returns NULL and
11121541Srgrimes * attempts to restore the chain to its original state.
111397681Sarchie *
111497681Sarchie * Note that the resulting mbufs might be read-only, because the new
111597681Sarchie * mbuf can end up sharing an mbuf cluster with the original mbuf if
111697681Sarchie * the "breaking point" happens to lie within a cluster mbuf. Use the
111797681Sarchie * M_WRITABLE() macro to check for this case.
11181541Srgrimes */
11191541Srgrimesstruct mbuf *
112072356Sbmilekicm_split(struct mbuf *m0, int len0, int wait)
11211541Srgrimes{
112272356Sbmilekic	struct mbuf *m, *n;
1123103569Sbmilekic	u_int len = len0, remain;
11241541Srgrimes
1125132488Salfred	MBUF_CHECKSLEEP(wait);
11261541Srgrimes	for (m = m0; m && len > m->m_len; m = m->m_next)
11271541Srgrimes		len -= m->m_len;
112872356Sbmilekic	if (m == NULL)
112972356Sbmilekic		return (NULL);
11301541Srgrimes	remain = m->m_len - len;
11311541Srgrimes	if (m0->m_flags & M_PKTHDR) {
11321541Srgrimes		MGETHDR(n, wait, m0->m_type);
113372356Sbmilekic		if (n == NULL)
113472356Sbmilekic			return (NULL);
11351541Srgrimes		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
11361541Srgrimes		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
11371541Srgrimes		m0->m_pkthdr.len = len0;
11381541Srgrimes		if (m->m_flags & M_EXT)
11391541Srgrimes			goto extpacket;
11401541Srgrimes		if (remain > MHLEN) {
11411541Srgrimes			/* m can't be the lead packet */
11421541Srgrimes			MH_ALIGN(n, 0);
11431541Srgrimes			n->m_next = m_split(m, len, wait);
114472356Sbmilekic			if (n->m_next == NULL) {
11451541Srgrimes				(void) m_free(n);
114672356Sbmilekic				return (NULL);
114794471Shsu			} else {
114894471Shsu				n->m_len = 0;
11491541Srgrimes				return (n);
115094471Shsu			}
11511541Srgrimes		} else
11521541Srgrimes			MH_ALIGN(n, remain);
11531541Srgrimes	} else if (remain == 0) {
11541541Srgrimes		n = m->m_next;
115572356Sbmilekic		m->m_next = NULL;
11561541Srgrimes		return (n);
11571541Srgrimes	} else {
11581541Srgrimes		MGET(n, wait, m->m_type);
115972356Sbmilekic		if (n == NULL)
116072356Sbmilekic			return (NULL);
11611541Srgrimes		M_ALIGN(n, remain);
11621541Srgrimes	}
11631541Srgrimesextpacket:
11641541Srgrimes	if (m->m_flags & M_EXT) {
11651541Srgrimes		n->m_data = m->m_data + len;
1166151976Sandre		mb_dupcl(n, m);
11671541Srgrimes	} else {
11681541Srgrimes		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
11691541Srgrimes	}
11701541Srgrimes	n->m_len = remain;
11711541Srgrimes	m->m_len = len;
11721541Srgrimes	n->m_next = m->m_next;
117372356Sbmilekic	m->m_next = NULL;
11741541Srgrimes	return (n);
11751541Srgrimes}
11761541Srgrimes/*
11771541Srgrimes * Routine to copy from device local memory into mbufs.
117878508Sbmilekic * Note that `off' argument is offset into first mbuf of target chain from
117978508Sbmilekic * which to begin copying the data to.
11801541Srgrimes */
11811541Srgrimesstruct mbuf *
118278508Sbmilekicm_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1183169624Srwatson    void (*copy)(char *from, caddr_t to, u_int len))
11841541Srgrimes{
118572356Sbmilekic	struct mbuf *m;
1186129906Sbmilekic	struct mbuf *top = NULL, **mp = &top;
118778508Sbmilekic	int len;
11881541Srgrimes
118978508Sbmilekic	if (off < 0 || off > MHLEN)
119078508Sbmilekic		return (NULL);
119178508Sbmilekic
1192129906Sbmilekic	while (totlen > 0) {
1193129906Sbmilekic		if (top == NULL) {	/* First one, must be PKTHDR */
1194129906Sbmilekic			if (totlen + off >= MINCLSIZE) {
1195129906Sbmilekic				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1196129906Sbmilekic				len = MCLBYTES;
1197129906Sbmilekic			} else {
1198129906Sbmilekic				m = m_gethdr(M_DONTWAIT, MT_DATA);
1199129906Sbmilekic				len = MHLEN;
12001541Srgrimes
1201129906Sbmilekic				/* Place initial small packet/header at end of mbuf */
1202129906Sbmilekic				if (m && totlen + off + max_linkhdr <= MLEN) {
1203129906Sbmilekic					m->m_data += max_linkhdr;
1204129906Sbmilekic					len -= max_linkhdr;
1205129906Sbmilekic				}
1206129906Sbmilekic			}
1207129906Sbmilekic			if (m == NULL)
1208129906Sbmilekic				return NULL;
1209129906Sbmilekic			m->m_pkthdr.rcvif = ifp;
1210129906Sbmilekic			m->m_pkthdr.len = totlen;
1211129906Sbmilekic		} else {
1212129906Sbmilekic			if (totlen + off >= MINCLSIZE) {
1213129906Sbmilekic				m = m_getcl(M_DONTWAIT, MT_DATA, 0);
1214129906Sbmilekic				len = MCLBYTES;
1215129906Sbmilekic			} else {
1216129906Sbmilekic				m = m_get(M_DONTWAIT, MT_DATA);
1217129906Sbmilekic				len = MLEN;
1218129906Sbmilekic			}
121972356Sbmilekic			if (m == NULL) {
12201541Srgrimes				m_freem(top);
1221129906Sbmilekic				return NULL;
12221541Srgrimes			}
12231541Srgrimes		}
122478508Sbmilekic		if (off) {
122578508Sbmilekic			m->m_data += off;
122678508Sbmilekic			len -= off;
122778508Sbmilekic			off = 0;
122878508Sbmilekic		}
122978508Sbmilekic		m->m_len = len = min(totlen, len);
12301541Srgrimes		if (copy)
1231103569Sbmilekic			copy(buf, mtod(m, caddr_t), (u_int)len);
12321541Srgrimes		else
1233103569Sbmilekic			bcopy(buf, mtod(m, caddr_t), (u_int)len);
123478508Sbmilekic		buf += len;
12351541Srgrimes		*mp = m;
12361541Srgrimes		mp = &m->m_next;
12371541Srgrimes		totlen -= len;
12381541Srgrimes	}
12391541Srgrimes	return (top);
12401541Srgrimes}
12413352Sphk
12423352Sphk/*
12433352Sphk * Copy data from a buffer back into the indicated mbuf chain,
12443352Sphk * starting "off" bytes from the beginning, extending the mbuf
12453352Sphk * chain if necessary.
12463352Sphk */
12473352Sphkvoid
1248128402Sluigim_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
12493352Sphk{
125072356Sbmilekic	int mlen;
125172356Sbmilekic	struct mbuf *m = m0, *n;
12523352Sphk	int totlen = 0;
12533352Sphk
125472356Sbmilekic	if (m0 == NULL)
12553352Sphk		return;
12563352Sphk	while (off > (mlen = m->m_len)) {
12573352Sphk		off -= mlen;
12583352Sphk		totlen += mlen;
125972356Sbmilekic		if (m->m_next == NULL) {
1260129906Sbmilekic			n = m_get(M_DONTWAIT, m->m_type);
126172356Sbmilekic			if (n == NULL)
12623352Sphk				goto out;
1263129906Sbmilekic			bzero(mtod(n, caddr_t), MLEN);
12643352Sphk			n->m_len = min(MLEN, len + off);
12653352Sphk			m->m_next = n;
12663352Sphk		}
12673352Sphk		m = m->m_next;
12683352Sphk	}
12693352Sphk	while (len > 0) {
12703352Sphk		mlen = min (m->m_len - off, len);
1271103569Sbmilekic		bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
12723352Sphk		cp += mlen;
12733352Sphk		len -= mlen;
12743352Sphk		mlen += off;
12753352Sphk		off = 0;
12763352Sphk		totlen += mlen;
12773352Sphk		if (len == 0)
12783352Sphk			break;
127972356Sbmilekic		if (m->m_next == NULL) {
1280111119Simp			n = m_get(M_DONTWAIT, m->m_type);
128172356Sbmilekic			if (n == NULL)
12823352Sphk				break;
12833352Sphk			n->m_len = min(MLEN, len);
12843352Sphk			m->m_next = n;
12853352Sphk		}
12863352Sphk		m = m->m_next;
12873352Sphk	}
12883352Sphkout:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
12893352Sphk		m->m_pkthdr.len = totlen;
12903352Sphk}
129152756Sphk
1292123557Sbms/*
1293138541Ssam * Append the specified data to the indicated mbuf chain,
1294138541Ssam * Extend the mbuf chain if the new data does not fit in
1295138541Ssam * existing space.
1296138541Ssam *
1297138541Ssam * Return 1 if able to complete the job; otherwise 0.
1298138541Ssam */
1299138541Ssamint
1300138541Ssamm_append(struct mbuf *m0, int len, c_caddr_t cp)
1301138541Ssam{
1302138541Ssam	struct mbuf *m, *n;
1303138541Ssam	int remainder, space;
1304138541Ssam
1305138541Ssam	for (m = m0; m->m_next != NULL; m = m->m_next)
1306138541Ssam		;
1307138541Ssam	remainder = len;
1308138541Ssam	space = M_TRAILINGSPACE(m);
1309138541Ssam	if (space > 0) {
1310138541Ssam		/*
1311138541Ssam		 * Copy into available space.
1312138541Ssam		 */
1313138541Ssam		if (space > remainder)
1314138541Ssam			space = remainder;
1315138541Ssam		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1316138541Ssam		m->m_len += space;
1317138541Ssam		cp += space, remainder -= space;
1318138541Ssam	}
1319138541Ssam	while (remainder > 0) {
1320138541Ssam		/*
1321138541Ssam		 * Allocate a new mbuf; could check space
1322138541Ssam		 * and allocate a cluster instead.
1323138541Ssam		 */
1324138541Ssam		n = m_get(M_DONTWAIT, m->m_type);
1325138541Ssam		if (n == NULL)
1326138541Ssam			break;
1327138541Ssam		n->m_len = min(MLEN, remainder);
1328138894Ssam		bcopy(cp, mtod(n, caddr_t), n->m_len);
1329138894Ssam		cp += n->m_len, remainder -= n->m_len;
1330138541Ssam		m->m_next = n;
1331138541Ssam		m = n;
1332138541Ssam	}
1333138541Ssam	if (m0->m_flags & M_PKTHDR)
1334138541Ssam		m0->m_pkthdr.len += len - remainder;
1335138541Ssam	return (remainder == 0);
1336138541Ssam}
1337138541Ssam
1338138541Ssam/*
1339123557Sbms * Apply function f to the data in an mbuf chain starting "off" bytes from
1340123557Sbms * the beginning, continuing for "len" bytes.
1341123557Sbms */
1342123557Sbmsint
1343123557Sbmsm_apply(struct mbuf *m, int off, int len,
1344123564Sbms    int (*f)(void *, void *, u_int), void *arg)
1345123557Sbms{
1346123564Sbms	u_int count;
1347123557Sbms	int rval;
1348123557Sbms
1349123557Sbms	KASSERT(off >= 0, ("m_apply, negative off %d", off));
1350123557Sbms	KASSERT(len >= 0, ("m_apply, negative len %d", len));
1351123557Sbms	while (off > 0) {
1352123557Sbms		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1353123557Sbms		if (off < m->m_len)
1354123557Sbms			break;
1355123557Sbms		off -= m->m_len;
1356123557Sbms		m = m->m_next;
1357123557Sbms	}
1358123557Sbms	while (len > 0) {
1359123557Sbms		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1360123557Sbms		count = min(m->m_len - off, len);
1361123557Sbms		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1362123557Sbms		if (rval)
1363123557Sbms			return (rval);
1364123557Sbms		len -= count;
1365123557Sbms		off = 0;
1366123557Sbms		m = m->m_next;
1367123557Sbms	}
1368123557Sbms	return (0);
1369123557Sbms}
1370123557Sbms
1371123557Sbms/*
1372123557Sbms * Return a pointer to mbuf/offset of location in mbuf chain.
1373123557Sbms */
1374123557Sbmsstruct mbuf *
1375123557Sbmsm_getptr(struct mbuf *m, int loc, int *off)
1376123557Sbms{
1377123557Sbms
1378123557Sbms	while (loc >= 0) {
1379123564Sbms		/* Normal end of search. */
1380123557Sbms		if (m->m_len > loc) {
1381123557Sbms			*off = loc;
1382123557Sbms			return (m);
1383123557Sbms		} else {
1384123557Sbms			loc -= m->m_len;
1385123557Sbms			if (m->m_next == NULL) {
1386123557Sbms				if (loc == 0) {
1387123564Sbms					/* Point at the end of valid data. */
1388123557Sbms					*off = m->m_len;
1389123557Sbms					return (m);
1390123564Sbms				}
1391123564Sbms				return (NULL);
1392123564Sbms			}
1393123564Sbms			m = m->m_next;
1394123557Sbms		}
1395123557Sbms	}
1396123557Sbms	return (NULL);
1397123557Sbms}
1398123557Sbms
139952756Sphkvoid
1400135904Sjmgm_print(const struct mbuf *m, int maxlen)
140152756Sphk{
140252756Sphk	int len;
1403135904Sjmg	int pdata;
140454906Seivind	const struct mbuf *m2;
140552756Sphk
1406135904Sjmg	if (m->m_flags & M_PKTHDR)
1407135904Sjmg		len = m->m_pkthdr.len;
1408135904Sjmg	else
1409135904Sjmg		len = -1;
141052756Sphk	m2 = m;
1411135904Sjmg	while (m2 != NULL && (len == -1 || len)) {
1412135904Sjmg		pdata = m2->m_len;
1413135904Sjmg		if (maxlen != -1 && pdata > maxlen)
1414135904Sjmg			pdata = maxlen;
1415135904Sjmg		printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1416135904Sjmg		    m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1417135904Sjmg		    "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1418135904Sjmg		    "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1419135904Sjmg		if (pdata)
1420156700Sjmg			printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1421135904Sjmg		if (len != -1)
1422135904Sjmg			len -= m2->m_len;
142352756Sphk		m2 = m2->m_next;
142452756Sphk	}
1425135904Sjmg	if (len > 0)
1426135904Sjmg		printf("%d bytes unaccounted for.\n", len);
142752756Sphk	return;
142852756Sphk}
1429103540Sphk
1430103569Sbmilekicu_int
1431103540Sphkm_fixhdr(struct mbuf *m0)
1432103540Sphk{
1433103569Sbmilekic	u_int len;
1434103540Sphk
1435103544Sphk	len = m_length(m0, NULL);
1436103544Sphk	m0->m_pkthdr.len = len;
1437103544Sphk	return (len);
1438103544Sphk}
1439103544Sphk
1440103569Sbmilekicu_int
1441103544Sphkm_length(struct mbuf *m0, struct mbuf **last)
1442103544Sphk{
1443103544Sphk	struct mbuf *m;
1444103569Sbmilekic	u_int len;
1445103544Sphk
1446103544Sphk	len = 0;
1447103544Sphk	for (m = m0; m != NULL; m = m->m_next) {
1448103540Sphk		len += m->m_len;
1449103544Sphk		if (m->m_next == NULL)
1450103544Sphk			break;
1451103540Sphk	}
1452103544Sphk	if (last != NULL)
1453103544Sphk		*last = m;
1454103544Sphk	return (len);
1455103540Sphk}
1456112777Ssilby
1457112777Ssilby/*
1458112777Ssilby * Defragment a mbuf chain, returning the shortest possible
1459112777Ssilby * chain of mbufs and clusters.  If allocation fails and
1460112777Ssilby * this cannot be completed, NULL will be returned, but
1461112777Ssilby * the passed in chain will be unchanged.  Upon success,
1462112777Ssilby * the original chain will be freed, and the new chain
1463112777Ssilby * will be returned.
1464112777Ssilby *
1465112777Ssilby * If a non-packet header is passed in, the original
1466112777Ssilby * mbuf (chain?) will be returned unharmed.
1467112777Ssilby */
1468112777Ssilbystruct mbuf *
1469112777Ssilbym_defrag(struct mbuf *m0, int how)
1470112777Ssilby{
1471125472Ssilby	struct mbuf *m_new = NULL, *m_final = NULL;
1472125472Ssilby	int progress = 0, length;
1473112777Ssilby
1474132488Salfred	MBUF_CHECKSLEEP(how);
1475112777Ssilby	if (!(m0->m_flags & M_PKTHDR))
1476112777Ssilby		return (m0);
1477112777Ssilby
1478117770Ssilby	m_fixhdr(m0); /* Needed sanity check */
1479117770Ssilby
1480113490Ssilby#ifdef MBUF_STRESS_TEST
1481113490Ssilby	if (m_defragrandomfailures) {
1482113490Ssilby		int temp = arc4random() & 0xff;
1483113490Ssilby		if (temp == 0xba)
1484113490Ssilby			goto nospace;
1485113490Ssilby	}
1486113490Ssilby#endif
1487112777Ssilby
1488112777Ssilby	if (m0->m_pkthdr.len > MHLEN)
1489112777Ssilby		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1490112777Ssilby	else
1491112777Ssilby		m_final = m_gethdr(how, MT_DATA);
1492112777Ssilby
1493112777Ssilby	if (m_final == NULL)
1494112777Ssilby		goto nospace;
1495112777Ssilby
1496123740Speter	if (m_dup_pkthdr(m_final, m0, how) == 0)
1497112777Ssilby		goto nospace;
1498112777Ssilby
1499112777Ssilby	m_new = m_final;
1500112777Ssilby
1501112777Ssilby	while (progress < m0->m_pkthdr.len) {
1502112777Ssilby		length = m0->m_pkthdr.len - progress;
1503112777Ssilby		if (length > MCLBYTES)
1504112777Ssilby			length = MCLBYTES;
1505112777Ssilby
1506112777Ssilby		if (m_new == NULL) {
1507112777Ssilby			if (length > MLEN)
1508112777Ssilby				m_new = m_getcl(how, MT_DATA, 0);
1509112777Ssilby			else
1510112777Ssilby				m_new = m_get(how, MT_DATA);
1511112777Ssilby			if (m_new == NULL)
1512112777Ssilby				goto nospace;
1513112777Ssilby		}
1514112777Ssilby
1515112777Ssilby		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1516112777Ssilby		progress += length;
1517112777Ssilby		m_new->m_len = length;
1518112777Ssilby		if (m_new != m_final)
1519112777Ssilby			m_cat(m_final, m_new);
1520112777Ssilby		m_new = NULL;
1521112777Ssilby	}
1522116455Ssilby#ifdef MBUF_STRESS_TEST
1523112777Ssilby	if (m0->m_next == NULL)
1524112777Ssilby		m_defraguseless++;
1525116455Ssilby#endif
1526112777Ssilby	m_freem(m0);
1527112777Ssilby	m0 = m_final;
1528116455Ssilby#ifdef MBUF_STRESS_TEST
1529112777Ssilby	m_defragpackets++;
1530112777Ssilby	m_defragbytes += m0->m_pkthdr.len;
1531116455Ssilby#endif
1532112777Ssilby	return (m0);
1533112777Ssilbynospace:
1534116455Ssilby#ifdef MBUF_STRESS_TEST
1535112777Ssilby	m_defragfailure++;
1536116455Ssilby#endif
1537112777Ssilby	if (m_final)
1538112777Ssilby		m_freem(m_final);
1539112777Ssilby	return (NULL);
1540112777Ssilby}
1541119644Ssilby
1542175414Ssam/*
1543175414Ssam * Defragment an mbuf chain, returning at most maxfrags separate
1544175414Ssam * mbufs+clusters.  If this is not possible NULL is returned and
1545175414Ssam * the original mbuf chain is left in it's present (potentially
1546175414Ssam * modified) state.  We use two techniques: collapsing consecutive
1547175414Ssam * mbufs and replacing consecutive mbufs by a cluster.
1548175414Ssam *
1549175414Ssam * NB: this should really be named m_defrag but that name is taken
1550175414Ssam */
1551175414Ssamstruct mbuf *
1552175414Ssamm_collapse(struct mbuf *m0, int how, int maxfrags)
1553175414Ssam{
1554175414Ssam	struct mbuf *m, *n, *n2, **prev;
1555175414Ssam	u_int curfrags;
1556175414Ssam
1557175414Ssam	/*
1558175414Ssam	 * Calculate the current number of frags.
1559175414Ssam	 */
1560175414Ssam	curfrags = 0;
1561175414Ssam	for (m = m0; m != NULL; m = m->m_next)
1562175414Ssam		curfrags++;
1563175414Ssam	/*
1564175414Ssam	 * First, try to collapse mbufs.  Note that we always collapse
1565175414Ssam	 * towards the front so we don't need to deal with moving the
1566175414Ssam	 * pkthdr.  This may be suboptimal if the first mbuf has much
1567175414Ssam	 * less data than the following.
1568175414Ssam	 */
1569175414Ssam	m = m0;
1570175414Ssamagain:
1571175414Ssam	for (;;) {
1572175414Ssam		n = m->m_next;
1573175414Ssam		if (n == NULL)
1574175414Ssam			break;
1575175414Ssam		if ((m->m_flags & M_RDONLY) == 0 &&
1576175414Ssam		    n->m_len < M_TRAILINGSPACE(m)) {
1577175414Ssam			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1578175414Ssam				n->m_len);
1579175414Ssam			m->m_len += n->m_len;
1580175414Ssam			m->m_next = n->m_next;
1581175414Ssam			m_free(n);
1582175414Ssam			if (--curfrags <= maxfrags)
1583175414Ssam				return m0;
1584175414Ssam		} else
1585175414Ssam			m = n;
1586175414Ssam	}
1587175414Ssam	KASSERT(maxfrags > 1,
1588175414Ssam		("maxfrags %u, but normal collapse failed", maxfrags));
1589175414Ssam	/*
1590175414Ssam	 * Collapse consecutive mbufs to a cluster.
1591175414Ssam	 */
1592175414Ssam	prev = &m0->m_next;		/* NB: not the first mbuf */
1593175414Ssam	while ((n = *prev) != NULL) {
1594175414Ssam		if ((n2 = n->m_next) != NULL &&
1595175414Ssam		    n->m_len + n2->m_len < MCLBYTES) {
1596175414Ssam			m = m_getcl(how, MT_DATA, 0);
1597175414Ssam			if (m == NULL)
1598175414Ssam				goto bad;
1599175414Ssam			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1600175414Ssam			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1601175414Ssam				n2->m_len);
1602175414Ssam			m->m_len = n->m_len + n2->m_len;
1603175414Ssam			m->m_next = n2->m_next;
1604175414Ssam			*prev = m;
1605175414Ssam			m_free(n);
1606175414Ssam			m_free(n2);
1607175414Ssam			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
1608175414Ssam				return m0;
1609175414Ssam			/*
1610175414Ssam			 * Still not there, try the normal collapse
1611175414Ssam			 * again before we allocate another cluster.
1612175414Ssam			 */
1613175414Ssam			goto again;
1614175414Ssam		}
1615175414Ssam		prev = &n->m_next;
1616175414Ssam	}
1617175414Ssam	/*
1618175414Ssam	 * No place where we can collapse to a cluster; punt.
1619175414Ssam	 * This can occur if, for example, you request 2 frags
1620175414Ssam	 * but the packet requires that both be clusters (we
1621175414Ssam	 * never reallocate the first mbuf to avoid moving the
1622175414Ssam	 * packet header).
1623175414Ssam	 */
1624175414Ssambad:
1625175414Ssam	return NULL;
1626175414Ssam}
1627175414Ssam
1628119644Ssilby#ifdef MBUF_STRESS_TEST
1629119644Ssilby
1630119644Ssilby/*
1631119644Ssilby * Fragment an mbuf chain.  There's no reason you'd ever want to do
1632119644Ssilby * this in normal usage, but it's great for stress testing various
1633119644Ssilby * mbuf consumers.
1634119644Ssilby *
1635119644Ssilby * If fragmentation is not possible, the original chain will be
1636119644Ssilby * returned.
1637119644Ssilby *
1638119644Ssilby * Possible length values:
1639119644Ssilby * 0	 no fragmentation will occur
1640119644Ssilby * > 0	each fragment will be of the specified length
1641119644Ssilby * -1	each fragment will be the same random value in length
1642119644Ssilby * -2	each fragment's length will be entirely random
1643119644Ssilby * (Random values range from 1 to 256)
1644119644Ssilby */
1645119644Ssilbystruct mbuf *
1646119644Ssilbym_fragment(struct mbuf *m0, int how, int length)
1647119644Ssilby{
1648125472Ssilby	struct mbuf *m_new = NULL, *m_final = NULL;
1649125472Ssilby	int progress = 0;
1650119644Ssilby
1651119644Ssilby	if (!(m0->m_flags & M_PKTHDR))
1652119644Ssilby		return (m0);
1653119644Ssilby
1654119644Ssilby	if ((length == 0) || (length < -2))
1655119644Ssilby		return (m0);
1656119644Ssilby
1657119644Ssilby	m_fixhdr(m0); /* Needed sanity check */
1658119644Ssilby
1659119644Ssilby	m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1660119644Ssilby
1661119644Ssilby	if (m_final == NULL)
1662119644Ssilby		goto nospace;
1663119644Ssilby
1664123823Ssilby	if (m_dup_pkthdr(m_final, m0, how) == 0)
1665119644Ssilby		goto nospace;
1666119644Ssilby
1667119644Ssilby	m_new = m_final;
1668119644Ssilby
1669119644Ssilby	if (length == -1)
1670119644Ssilby		length = 1 + (arc4random() & 255);
1671119644Ssilby
1672119644Ssilby	while (progress < m0->m_pkthdr.len) {
1673119644Ssilby		int fraglen;
1674119644Ssilby
1675119644Ssilby		if (length > 0)
1676119644Ssilby			fraglen = length;
1677119644Ssilby		else
1678119644Ssilby			fraglen = 1 + (arc4random() & 255);
1679119644Ssilby		if (fraglen > m0->m_pkthdr.len - progress)
1680119644Ssilby			fraglen = m0->m_pkthdr.len - progress;
1681119644Ssilby
1682119644Ssilby		if (fraglen > MCLBYTES)
1683119644Ssilby			fraglen = MCLBYTES;
1684119644Ssilby
1685119644Ssilby		if (m_new == NULL) {
1686119644Ssilby			m_new = m_getcl(how, MT_DATA, 0);
1687119644Ssilby			if (m_new == NULL)
1688119644Ssilby				goto nospace;
1689119644Ssilby		}
1690119644Ssilby
1691119644Ssilby		m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1692119644Ssilby		progress += fraglen;
1693119644Ssilby		m_new->m_len = fraglen;
1694119644Ssilby		if (m_new != m_final)
1695119644Ssilby			m_cat(m_final, m_new);
1696119644Ssilby		m_new = NULL;
1697119644Ssilby	}
1698119644Ssilby	m_freem(m0);
1699119644Ssilby	m0 = m_final;
1700119644Ssilby	return (m0);
1701119644Ssilbynospace:
1702119644Ssilby	if (m_final)
1703119644Ssilby		m_freem(m_final);
1704119644Ssilby	/* Return the original chain on failure */
1705119644Ssilby	return (m0);
1706119644Ssilby}
1707119644Ssilby
1708119644Ssilby#endif
1709125296Ssilby
1710163915Sandre/*
1711163915Sandre * Copy the contents of uio into a properly sized mbuf chain.
1712163915Sandre */
1713125296Ssilbystruct mbuf *
1714163915Sandrem_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1715125296Ssilby{
1716163915Sandre	struct mbuf *m, *mb;
1717163915Sandre	int error, length, total;
1718163915Sandre	int progress = 0;
1719125296Ssilby
1720163915Sandre	/*
1721163915Sandre	 * len can be zero or an arbitrary large value bound by
1722163915Sandre	 * the total data supplied by the uio.
1723163915Sandre	 */
1724125296Ssilby	if (len > 0)
1725125296Ssilby		total = min(uio->uio_resid, len);
1726125296Ssilby	else
1727125296Ssilby		total = uio->uio_resid;
1728163915Sandre
1729163915Sandre	/*
1730163915Sandre	 * The smallest unit returned by m_getm2() is a single mbuf
1731163915Sandre	 * with pkthdr.  We can't align past it.  Align align itself.
1732163915Sandre	 */
1733163915Sandre	if (align)
1734163915Sandre		align &= ~(sizeof(long) - 1);
1735145883Semax	if (align >= MHLEN)
1736163915Sandre		return (NULL);
1737163915Sandre
1738166171Sandre	/*
1739166171Sandre	 * Give us the full allocation or nothing.
1740166171Sandre	 * If len is zero return the smallest empty mbuf.
1741166171Sandre	 */
1742166171Sandre	m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1743163915Sandre	if (m == NULL)
1744163915Sandre		return (NULL);
1745163915Sandre	m->m_data += align;
1746163915Sandre
1747163915Sandre	/* Fill all mbufs with uio data and update header information. */
1748163915Sandre	for (mb = m; mb != NULL; mb = mb->m_next) {
1749163915Sandre		length = min(M_TRAILINGSPACE(mb), total - progress);
1750163915Sandre
1751163915Sandre		error = uiomove(mtod(mb, void *), length, uio);
1752163915Sandre		if (error) {
1753163915Sandre			m_freem(m);
1754163915Sandre			return (NULL);
1755125296Ssilby		}
1756163915Sandre
1757163915Sandre		mb->m_len = length;
1758125296Ssilby		progress += length;
1759163915Sandre		if (flags & M_PKTHDR)
1760163915Sandre			m->m_pkthdr.len += length;
1761125296Ssilby	}
1762163915Sandre	KASSERT(progress == total, ("%s: progress != total", __func__));
1763163915Sandre
1764163915Sandre	return (m);
1765125296Ssilby}
1766148552Ssam
1767148552Ssam/*
1768148552Ssam * Set the m_data pointer of a newly-allocated mbuf
1769148552Ssam * to place an object of the specified size at the
1770148552Ssam * end of the mbuf, longword aligned.
1771148552Ssam */
1772148552Ssamvoid
1773148552Ssamm_align(struct mbuf *m, int len)
1774148552Ssam{
1775148552Ssam	int adjust;
1776148552Ssam
1777148552Ssam	if (m->m_flags & M_EXT)
1778148552Ssam		adjust = m->m_ext.ext_size - len;
1779148552Ssam	else if (m->m_flags & M_PKTHDR)
1780148552Ssam		adjust = MHLEN - len;
1781148552Ssam	else
1782148552Ssam		adjust = MLEN - len;
1783148552Ssam	m->m_data += adjust &~ (sizeof(long)-1);
1784148552Ssam}
1785156756Ssam
1786156756Ssam/*
1787156756Ssam * Create a writable copy of the mbuf chain.  While doing this
1788156756Ssam * we compact the chain with a goal of producing a chain with
1789156756Ssam * at most two mbufs.  The second mbuf in this chain is likely
1790156756Ssam * to be a cluster.  The primary purpose of this work is to create
1791156756Ssam * a writable packet for encryption, compression, etc.  The
1792156756Ssam * secondary goal is to linearize the data so the data can be
1793156756Ssam * passed to crypto hardware in the most efficient manner possible.
1794156756Ssam */
1795156756Ssamstruct mbuf *
1796156756Ssamm_unshare(struct mbuf *m0, int how)
1797156756Ssam{
1798156756Ssam	struct mbuf *m, *mprev;
1799156756Ssam	struct mbuf *n, *mfirst, *mlast;
1800156756Ssam	int len, off;
1801156756Ssam
1802156756Ssam	mprev = NULL;
1803156756Ssam	for (m = m0; m != NULL; m = mprev->m_next) {
1804156756Ssam		/*
1805156756Ssam		 * Regular mbufs are ignored unless there's a cluster
1806156756Ssam		 * in front of it that we can use to coalesce.  We do
1807156756Ssam		 * the latter mainly so later clusters can be coalesced
1808156756Ssam		 * also w/o having to handle them specially (i.e. convert
1809156756Ssam		 * mbuf+cluster -> cluster).  This optimization is heavily
1810156756Ssam		 * influenced by the assumption that we're running over
1811156756Ssam		 * Ethernet where MCLBYTES is large enough that the max
1812156756Ssam		 * packet size will permit lots of coalescing into a
1813156756Ssam		 * single cluster.  This in turn permits efficient
1814156756Ssam		 * crypto operations, especially when using hardware.
1815156756Ssam		 */
1816156756Ssam		if ((m->m_flags & M_EXT) == 0) {
1817156756Ssam			if (mprev && (mprev->m_flags & M_EXT) &&
1818156756Ssam			    m->m_len <= M_TRAILINGSPACE(mprev)) {
1819156756Ssam				/* XXX: this ignores mbuf types */
1820156756Ssam				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1821156756Ssam				       mtod(m, caddr_t), m->m_len);
1822156756Ssam				mprev->m_len += m->m_len;
1823156756Ssam				mprev->m_next = m->m_next;	/* unlink from chain */
1824156756Ssam				m_free(m);			/* reclaim mbuf */
1825156756Ssam#if 0
1826156756Ssam				newipsecstat.ips_mbcoalesced++;
1827156756Ssam#endif
1828156756Ssam			} else {
1829156756Ssam				mprev = m;
1830156756Ssam			}
1831156756Ssam			continue;
1832156756Ssam		}
1833156756Ssam		/*
1834156756Ssam		 * Writable mbufs are left alone (for now).
1835156756Ssam		 */
1836156756Ssam		if (M_WRITABLE(m)) {
1837156756Ssam			mprev = m;
1838156756Ssam			continue;
1839156756Ssam		}
1840156756Ssam
1841156756Ssam		/*
1842156756Ssam		 * Not writable, replace with a copy or coalesce with
1843156756Ssam		 * the previous mbuf if possible (since we have to copy
1844156756Ssam		 * it anyway, we try to reduce the number of mbufs and
1845156756Ssam		 * clusters so that future work is easier).
1846156756Ssam		 */
1847156756Ssam		KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1848156756Ssam		/* NB: we only coalesce into a cluster or larger */
1849156756Ssam		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1850156756Ssam		    m->m_len <= M_TRAILINGSPACE(mprev)) {
1851156756Ssam			/* XXX: this ignores mbuf types */
1852156756Ssam			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1853156756Ssam			       mtod(m, caddr_t), m->m_len);
1854156756Ssam			mprev->m_len += m->m_len;
1855156756Ssam			mprev->m_next = m->m_next;	/* unlink from chain */
1856156756Ssam			m_free(m);			/* reclaim mbuf */
1857156756Ssam#if 0
1858156756Ssam			newipsecstat.ips_clcoalesced++;
1859156756Ssam#endif
1860156756Ssam			continue;
1861156756Ssam		}
1862156756Ssam
1863156756Ssam		/*
1864156756Ssam		 * Allocate new space to hold the copy...
1865156756Ssam		 */
1866156756Ssam		/* XXX why can M_PKTHDR be set past the first mbuf? */
1867156756Ssam		if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
1868156756Ssam			/*
1869156756Ssam			 * NB: if a packet header is present we must
1870156756Ssam			 * allocate the mbuf separately from any cluster
1871156756Ssam			 * because M_MOVE_PKTHDR will smash the data
1872156756Ssam			 * pointer and drop the M_EXT marker.
1873156756Ssam			 */
1874156756Ssam			MGETHDR(n, how, m->m_type);
1875156756Ssam			if (n == NULL) {
1876156756Ssam				m_freem(m0);
1877156756Ssam				return (NULL);
1878156756Ssam			}
1879156756Ssam			M_MOVE_PKTHDR(n, m);
1880156756Ssam			MCLGET(n, how);
1881156756Ssam			if ((n->m_flags & M_EXT) == 0) {
1882156756Ssam				m_free(n);
1883156756Ssam				m_freem(m0);
1884156756Ssam				return (NULL);
1885156756Ssam			}
1886156756Ssam		} else {
1887156756Ssam			n = m_getcl(how, m->m_type, m->m_flags);
1888156756Ssam			if (n == NULL) {
1889156756Ssam				m_freem(m0);
1890156756Ssam				return (NULL);
1891156756Ssam			}
1892156756Ssam		}
1893156756Ssam		/*
1894156756Ssam		 * ... and copy the data.  We deal with jumbo mbufs
1895156756Ssam		 * (i.e. m_len > MCLBYTES) by splitting them into
1896156756Ssam		 * clusters.  We could just malloc a buffer and make
1897156756Ssam		 * it external but too many device drivers don't know
1898156756Ssam		 * how to break up the non-contiguous memory when
1899156756Ssam		 * doing DMA.
1900156756Ssam		 */
1901156756Ssam		len = m->m_len;
1902156756Ssam		off = 0;
1903156756Ssam		mfirst = n;
1904156756Ssam		mlast = NULL;
1905156756Ssam		for (;;) {
1906156756Ssam			int cc = min(len, MCLBYTES);
1907156756Ssam			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1908156756Ssam			n->m_len = cc;
1909156756Ssam			if (mlast != NULL)
1910156756Ssam				mlast->m_next = n;
1911156756Ssam			mlast = n;
1912156756Ssam#if 0
1913156756Ssam			newipsecstat.ips_clcopied++;
1914156756Ssam#endif
1915156756Ssam
1916156756Ssam			len -= cc;
1917156756Ssam			if (len <= 0)
1918156756Ssam				break;
1919156756Ssam			off += cc;
1920156756Ssam
1921156756Ssam			n = m_getcl(how, m->m_type, m->m_flags);
1922156756Ssam			if (n == NULL) {
1923156756Ssam				m_freem(mfirst);
1924156756Ssam				m_freem(m0);
1925156756Ssam				return (NULL);
1926156756Ssam			}
1927156756Ssam		}
1928156756Ssam		n->m_next = m->m_next;
1929156756Ssam		if (mprev == NULL)
1930156756Ssam			m0 = mfirst;		/* new head of chain */
1931156756Ssam		else
1932156756Ssam			mprev->m_next = mfirst;	/* replace old mbuf */
1933156756Ssam		m_free(m);			/* release old mbuf */
1934156756Ssam		mprev = mfirst;
1935156756Ssam	}
1936156756Ssam	return (m0);
1937156756Ssam}
1938