uipc_mbuf.c revision 55171
11541Srgrimes/*
21541Srgrimes * Copyright (c) 1982, 1986, 1988, 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes *
51541Srgrimes * Redistribution and use in source and binary forms, with or without
61541Srgrimes * modification, are permitted provided that the following conditions
71541Srgrimes * are met:
81541Srgrimes * 1. Redistributions of source code must retain the above copyright
91541Srgrimes *    notice, this list of conditions and the following disclaimer.
101541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
111541Srgrimes *    notice, this list of conditions and the following disclaimer in the
121541Srgrimes *    documentation and/or other materials provided with the distribution.
131541Srgrimes * 3. All advertising materials mentioning features or use of this software
141541Srgrimes *    must display the following acknowledgement:
151541Srgrimes *	This product includes software developed by the University of
161541Srgrimes *	California, Berkeley and its contributors.
171541Srgrimes * 4. Neither the name of the University nor the names of its contributors
181541Srgrimes *    may be used to endorse or promote products derived from this software
191541Srgrimes *    without specific prior written permission.
201541Srgrimes *
211541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
221541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
231541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
241541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
251541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
261541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
271541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
281541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
291541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
301541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
311541Srgrimes * SUCH DAMAGE.
321541Srgrimes *
331541Srgrimes *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
3450477Speter * $FreeBSD: head/sys/kern/uipc_mbuf.c 55171 1999-12-28 06:35:57Z msmith $
351541Srgrimes */
361541Srgrimes
3748579Smsmith#include "opt_param.h"
381541Srgrimes#include <sys/param.h>
391541Srgrimes#include <sys/systm.h>
4032036Sbde#include <sys/malloc.h>
411541Srgrimes#include <sys/mbuf.h>
421541Srgrimes#include <sys/kernel.h>
4323081Swollman#include <sys/sysctl.h>
441541Srgrimes#include <sys/domain.h>
451541Srgrimes#include <sys/protosw.h>
461541Srgrimes
471541Srgrimes#include <vm/vm.h>
489759Sbde#include <vm/vm_kern.h>
4912662Sdg#include <vm/vm_extern.h>
501541Srgrimes
5154478Sgreen#ifdef INVARIANTS
5254478Sgreen#include <machine/cpu.h>
5354478Sgreen#endif
5454478Sgreen
5510653Sdgstatic void mbinit __P((void *));
5610358SjulianSYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
5710358Sjulian
589759Sbdestruct mbuf *mbutl;
591541Srgrimeschar	*mclrefcnt;
609759Sbdestruct mbstat mbstat;
6115689Swollmanstruct mbuf *mmbfree;
629759Sbdeunion mcluster *mclfree;
639759Sbdeint	max_linkhdr;
649759Sbdeint	max_protohdr;
659759Sbdeint	max_hdr;
669759Sbdeint	max_datalen;
6748579Smsmithint	nmbclusters;
6848579Smsmithint	nmbufs;
6954584Sgreenu_int	m_mballoc_wid = 0;
7054584Sgreenu_int	m_clalloc_wid = 0;
711541Srgrimes
7244078SdfrSYSCTL_DECL(_kern_ipc);
7323081SwollmanSYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
7423081Swollman	   &max_linkhdr, 0, "");
7523081SwollmanSYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
7623081Swollman	   &max_protohdr, 0, "");
7723081SwollmanSYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
7823081SwollmanSYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
7923081Swollman	   &max_datalen, 0, "");
8054478SgreenSYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
8154478Sgreen	   &mbuf_wait, 0, "");
8223081SwollmanSYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
8348579SmsmithSYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
8455171Smsmith	   &nmbclusters, 0, "Maximum number of mbuf clusters available");
8555171SmsmithSYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
8655171Smsmith	   "Maximum number of mbufs available");
8748579Smsmith#ifndef NMBCLUSTERS
8848579Smsmith#define NMBCLUSTERS	(512 + MAXUSERS * 16)
8948579Smsmith#endif
9048579SmsmithTUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters);
9155171SmsmithTUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs);
9223081Swollman
9312819Sphkstatic void	m_reclaim __P((void));
9412819Sphk
9515736Sphk/* "number of clusters of pages" */
9615736Sphk#define NCL_INIT	1
9715736Sphk
9815744Sphk#define NMB_INIT	16
9915744Sphk
10010358Sjulian/* ARGSUSED*/
10110358Sjulianstatic void
10212569Sbdembinit(dummy)
10312569Sbde	void *dummy;
1041541Srgrimes{
1051541Srgrimes	int s;
1061541Srgrimes
10715689Swollman	mmbfree = NULL; mclfree = NULL;
10823081Swollman	mbstat.m_msize = MSIZE;
10923081Swollman	mbstat.m_mclbytes = MCLBYTES;
11023081Swollman	mbstat.m_minclsize = MINCLSIZE;
11123081Swollman	mbstat.m_mlen = MLEN;
11223081Swollman	mbstat.m_mhlen = MHLEN;
11323081Swollman
1141541Srgrimes	s = splimp();
11515689Swollman	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
11615689Swollman		goto bad;
11722671Swollman#if MCLBYTES <= PAGE_SIZE
1181541Srgrimes	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
1191541Srgrimes		goto bad;
12022671Swollman#else
12122671Swollman	/* It's OK to call contigmalloc in this context. */
12232036Sbde	if (m_clalloc(16, M_WAIT) == 0)
12322671Swollman		goto bad;
12422671Swollman#endif
1251541Srgrimes	splx(s);
1261541Srgrimes	return;
1271541Srgrimesbad:
1281541Srgrimes	panic("mbinit");
1291541Srgrimes}
1301541Srgrimes
1311541Srgrimes/*
13215689Swollman * Allocate at least nmb mbufs and place on mbuf free list.
13315689Swollman * Must be called at splimp.
13415689Swollman */
13515689Swollman/* ARGSUSED */
13615689Swollmanint
13732036Sbdem_mballoc(nmb, how)
13815689Swollman	register int nmb;
13932036Sbde	int how;
14015689Swollman{
14115689Swollman	register caddr_t p;
14215689Swollman	register int i;
14315689Swollman	int nbytes;
14415689Swollman
14554478Sgreen	/*
14655171Smsmith	 * If we've hit the mbuf limit, stop allocating from mb_map,
14755171Smsmith	 * (or trying to) in order to avoid dipping into the section of
14855171Smsmith	 * mb_map which we've "reserved" for clusters.
14955171Smsmith	 */
15055171Smsmith	if ((nmb + mbstat.m_mbufs) > nmbufs)
15155171Smsmith		return (0);
15255171Smsmith
15355171Smsmith	/*
15454478Sgreen	 * Once we run out of map space, it will be impossible to get
15554478Sgreen	 * any more (nothing is ever freed back to the map)
15654478Sgreen	 * -- however you are not dead as m_reclaim might
15754478Sgreen	 * still be able to free a substantial amount of space.
15854478Sgreen	 *
15954478Sgreen	 * XXX Furthermore, we can also work with "recycled" mbufs (when
16054478Sgreen	 * we're calling with M_WAIT the sleep procedure will be woken
16154478Sgreen	 * up when an mbuf is freed. See m_mballoc_wait()).
16215689Swollman	 */
16315689Swollman	if (mb_map_full)
16415689Swollman		return (0);
16515689Swollman
16615689Swollman	nbytes = round_page(nmb * MSIZE);
16722899Swollman	p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
16832036Sbde	if (p == 0 && how == M_WAIT) {
16922899Swollman		mbstat.m_wait++;
17022899Swollman		p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
17122899Swollman	}
17222899Swollman
17315689Swollman	/*
17432036Sbde	 * Either the map is now full, or `how' is M_NOWAIT and there
17515689Swollman	 * are no pages left.
17615689Swollman	 */
17715689Swollman	if (p == NULL)
17815689Swollman		return (0);
17915689Swollman
18015689Swollman	nmb = nbytes / MSIZE;
18115689Swollman	for (i = 0; i < nmb; i++) {
18215689Swollman		((struct mbuf *)p)->m_next = mmbfree;
18315689Swollman		mmbfree = (struct mbuf *)p;
18415689Swollman		p += MSIZE;
18515689Swollman	}
18615689Swollman	mbstat.m_mbufs += nmb;
18715689Swollman	return (1);
18815689Swollman}
18915689Swollman
19054478Sgreen/*
19154478Sgreen * Once the mb_map has been exhausted and if the call to the allocation macros
19254478Sgreen * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
19354478Sgreen * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
19454478Sgreen * designated (mbuf_wait) time.
19554478Sgreen */
19654478Sgreenstruct mbuf *
19754478Sgreenm_mballoc_wait(int caller, int type)
19854478Sgreen{
19954478Sgreen	struct mbuf *p;
20054478Sgreen	int s;
20154478Sgreen
20254478Sgreen	m_mballoc_wid++;
20354478Sgreen	if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK)
20454478Sgreen		m_mballoc_wid--;
20554478Sgreen
20654478Sgreen	/*
20754478Sgreen	 * Now that we (think) that we've got something, we will redo an
20854478Sgreen	 * MGET, but avoid getting into another instance of m_mballoc_wait()
20954478Sgreen	 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
21054478Sgreen	 *      this way, purposely, in the [unlikely] case that an mbuf was
21154478Sgreen	 *      freed but the sleep was not awakened in time.
21254478Sgreen	 */
21354478Sgreen	p = NULL;
21454478Sgreen	switch (caller) {
21554478Sgreen	case MGET_C:
21654478Sgreen		MGET(p, M_DONTWAIT, type);
21754478Sgreen		break;
21854478Sgreen	case MGETHDR_C:
21954478Sgreen		MGETHDR(p, M_DONTWAIT, type);
22054478Sgreen		break;
22154478Sgreen	default:
22254478Sgreen		panic("m_mballoc_wait: invalid caller (%d)", caller);
22354478Sgreen	}
22454478Sgreen
22554478Sgreen	s = splimp();
22654478Sgreen	if (p != NULL) {		/* We waited and got something... */
22754478Sgreen		mbstat.m_wait++;
22854478Sgreen		/* Wake up another if we have more free. */
22954478Sgreen		if (mmbfree != NULL)
23054787Sgreen			MMBWAKEUP();
23154478Sgreen	}
23254478Sgreen	splx(s);
23354478Sgreen	return (p);
23454478Sgreen}
23554478Sgreen
23622671Swollman#if MCLBYTES > PAGE_SIZE
23722899Swollmanstatic int i_want_my_mcl;
23822671Swollman
23922899Swollmanstatic void
24022671Swollmankproc_mclalloc(void)
24122671Swollman{
24222671Swollman	int status;
24322671Swollman
24422671Swollman	while (1) {
24522671Swollman		tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
24622671Swollman
24722671Swollman		for (; i_want_my_mcl; i_want_my_mcl--) {
24832036Sbde			if (m_clalloc(1, M_WAIT) == 0)
24922671Swollman				printf("m_clalloc failed even in process context!\n");
25022671Swollman		}
25122671Swollman	}
25222671Swollman}
25322671Swollman
25422671Swollmanstatic struct proc *mclallocproc;
25522671Swollmanstatic struct kproc_desc mclalloc_kp = {
25622671Swollman	"mclalloc",
25722671Swollman	kproc_mclalloc,
25822671Swollman	&mclallocproc
25922671Swollman};
26048391SpeterSYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
26122671Swollman	   &mclalloc_kp);
26222671Swollman#endif
26322671Swollman
26415689Swollman/*
2651541Srgrimes * Allocate some number of mbuf clusters
2661541Srgrimes * and place on cluster free list.
2671541Srgrimes * Must be called at splimp.
2681541Srgrimes */
2691541Srgrimes/* ARGSUSED */
2701549Srgrimesint
27132036Sbdem_clalloc(ncl, how)
2721541Srgrimes	register int ncl;
27332036Sbde	int how;
2741541Srgrimes{
2751541Srgrimes	register caddr_t p;
2761541Srgrimes	register int i;
2771541Srgrimes	int npg;
2781541Srgrimes
2797066Sdg	/*
28055171Smsmith	 * If we've hit the mcluster number limit, stop allocating from
28155171Smsmith	 * mb_map, (or trying to) in order to avoid dipping into the section
28255171Smsmith	 * of mb_map which we've "reserved" for mbufs.
28355171Smsmith	 */
28455171Smsmith	if ((ncl + mbstat.m_clusters) > nmbclusters) {
28555171Smsmith		mbstat.m_drops++;
28655171Smsmith		return (0);
28755171Smsmith	}
28855171Smsmith
28955171Smsmith	/*
2907066Sdg	 * Once we run out of map space, it will be impossible
2917066Sdg	 * to get any more (nothing is ever freed back to the
29254478Sgreen	 * map). From this point on, we solely rely on freed
29354478Sgreen	 * mclusters.
2947066Sdg	 */
29522899Swollman	if (mb_map_full) {
29622899Swollman		mbstat.m_drops++;
2977066Sdg		return (0);
29822899Swollman	}
2997066Sdg
30022671Swollman#if MCLBYTES > PAGE_SIZE
30132036Sbde	if (how != M_WAIT) {
30222671Swollman		i_want_my_mcl += ncl;
30322671Swollman		wakeup(&i_want_my_mcl);
30422899Swollman		mbstat.m_wait++;
30522671Swollman		p = 0;
30622671Swollman	} else {
30722671Swollman		p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
30822671Swollman				  ~0ul, PAGE_SIZE, 0, mb_map);
30922671Swollman	}
31022671Swollman#else
31115543Sphk	npg = ncl;
31221737Sdg	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
31332036Sbde				 how != M_WAIT ? M_NOWAIT : M_WAITOK);
31422671Swollman	ncl = ncl * PAGE_SIZE / MCLBYTES;
31522671Swollman#endif
3167066Sdg	/*
31732036Sbde	 * Either the map is now full, or `how' is M_NOWAIT and there
3187066Sdg	 * are no pages left.
3197066Sdg	 */
32022899Swollman	if (p == NULL) {
32122899Swollman		mbstat.m_drops++;
3221541Srgrimes		return (0);
32322899Swollman	}
3247066Sdg
3251541Srgrimes	for (i = 0; i < ncl; i++) {
3261541Srgrimes		((union mcluster *)p)->mcl_next = mclfree;
3271541Srgrimes		mclfree = (union mcluster *)p;
3281541Srgrimes		p += MCLBYTES;
3291541Srgrimes		mbstat.m_clfree++;
3301541Srgrimes	}
3311541Srgrimes	mbstat.m_clusters += ncl;
3321541Srgrimes	return (1);
3331541Srgrimes}
3341541Srgrimes
3351541Srgrimes/*
33654478Sgreen * Once the mb_map submap has been exhausted and the allocation is called with
33754478Sgreen * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
33854478Sgreen * sleep for a designated amount of time (mbuf_wait) or until we're woken up
33954478Sgreen * due to sudden mcluster availability.
34054478Sgreen */
34154478Sgreencaddr_t
34254478Sgreenm_clalloc_wait(void)
34354478Sgreen{
34454478Sgreen	caddr_t p;
34554478Sgreen	int s;
34654478Sgreen
34754478Sgreen#ifdef __i386__
34854478Sgreen	/* If in interrupt context, and INVARIANTS, maintain sanity and die. */
34954478Sgreen	KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
35054478Sgreen#endif
35154478Sgreen
35254478Sgreen	/* Sleep until something's available or until we expire. */
35354478Sgreen	m_clalloc_wid++;
35454478Sgreen	if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK)
35554478Sgreen		m_clalloc_wid--;
35654478Sgreen
35754478Sgreen	/*
35854478Sgreen	 * Now that we (think) that we've got something, we will redo and
35954478Sgreen	 * MGET, but avoid getting into another instance of m_clalloc_wait()
36054478Sgreen	 */
36154478Sgreen	p = NULL;
36254478Sgreen	MCLALLOC(p, M_DONTWAIT);
36354478Sgreen
36454478Sgreen	s = splimp();
36554478Sgreen	if (p != NULL) {	/* We waited and got something... */
36654478Sgreen		mbstat.m_wait++;
36754478Sgreen		/* Wake up another if we have more free. */
36854478Sgreen		if (mclfree != NULL)
36954787Sgreen			MCLWAKEUP();
37054478Sgreen	}
37154478Sgreen
37254478Sgreen	splx(s);
37354478Sgreen	return (p);
37454478Sgreen}
37554478Sgreen
37654478Sgreen/*
37745615Sdes * When MGET fails, ask protocols to free space when short of memory,
3781541Srgrimes * then re-attempt to allocate an mbuf.
3791541Srgrimes */
3801541Srgrimesstruct mbuf *
3811541Srgrimesm_retry(i, t)
3821541Srgrimes	int i, t;
3831541Srgrimes{
3841541Srgrimes	register struct mbuf *m;
3851541Srgrimes
38637878Sdg	/*
38737878Sdg	 * Must only do the reclaim if not in an interrupt context.
38837878Sdg	 */
38954478Sgreen	if (i == M_WAIT) {
39054478Sgreen#ifdef __i386__
39154478Sgreen		KASSERT(intr_nesting_level == 0,
39254478Sgreen		    ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
39354478Sgreen#endif
39437878Sdg		m_reclaim();
39554478Sgreen	}
39654478Sgreen
39754478Sgreen	/*
39854478Sgreen	 * Both m_mballoc_wait and m_retry must be nulled because
39954478Sgreen	 * when the MGET macro is run from here, we deffinately do _not_
40054478Sgreen	 * want to enter an instance of m_mballoc_wait() or m_retry() (again!)
40154478Sgreen	 */
40254478Sgreen#define m_mballoc_wait(caller,type)    (struct mbuf *)0
4031541Srgrimes#define m_retry(i, t)	(struct mbuf *)0
4041541Srgrimes	MGET(m, i, t);
4051541Srgrimes#undef m_retry
40654478Sgreen#undef m_mballoc_wait
40754478Sgreen
40854478Sgreen	if (m != NULL)
4096669Sdg		mbstat.m_wait++;
41054478Sgreen	else
41154478Sgreen		mbstat.m_drops++;
41254478Sgreen
4131541Srgrimes	return (m);
4141541Srgrimes}
4151541Srgrimes
4161541Srgrimes/*
4171541Srgrimes * As above; retry an MGETHDR.
4181541Srgrimes */
4191541Srgrimesstruct mbuf *
4201541Srgrimesm_retryhdr(i, t)
4211541Srgrimes	int i, t;
4221541Srgrimes{
4231541Srgrimes	register struct mbuf *m;
4241541Srgrimes
42537878Sdg	/*
42637878Sdg	 * Must only do the reclaim if not in an interrupt context.
42737878Sdg	 */
42854478Sgreen	if (i == M_WAIT) {
42954478Sgreen#ifdef __i386__
43054478Sgreen		KASSERT(intr_nesting_level == 0,
43154478Sgreen		    ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
43254478Sgreen#endif
43337878Sdg		m_reclaim();
43454478Sgreen	}
43554478Sgreen
43654478Sgreen#define m_mballoc_wait(caller,type)    (struct mbuf *)0
4371541Srgrimes#define m_retryhdr(i, t) (struct mbuf *)0
4381541Srgrimes	MGETHDR(m, i, t);
4391541Srgrimes#undef m_retryhdr
44054478Sgreen#undef m_mballoc_wait
44154478Sgreen
44254478Sgreen	if (m != NULL)
4436669Sdg		mbstat.m_wait++;
44454478Sgreen	else
44554478Sgreen		mbstat.m_drops++;
44654478Sgreen
4471541Srgrimes	return (m);
4481541Srgrimes}
4491541Srgrimes
45012819Sphkstatic void
4511541Srgrimesm_reclaim()
4521541Srgrimes{
4531541Srgrimes	register struct domain *dp;
4541541Srgrimes	register struct protosw *pr;
4551541Srgrimes	int s = splimp();
4561541Srgrimes
4571541Srgrimes	for (dp = domains; dp; dp = dp->dom_next)
4581541Srgrimes		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
4591541Srgrimes			if (pr->pr_drain)
4601541Srgrimes				(*pr->pr_drain)();
4611541Srgrimes	splx(s);
4621541Srgrimes	mbstat.m_drain++;
4631541Srgrimes}
4641541Srgrimes
4651541Srgrimes/*
4661541Srgrimes * Space allocation routines.
4671541Srgrimes * These are also available as macros
4681541Srgrimes * for critical paths.
4691541Srgrimes */
4701541Srgrimesstruct mbuf *
47132036Sbdem_get(how, type)
47232036Sbde	int how, type;
4731541Srgrimes{
4741541Srgrimes	register struct mbuf *m;
4751541Srgrimes
47632036Sbde	MGET(m, how, type);
4771541Srgrimes	return (m);
4781541Srgrimes}
4791541Srgrimes
4801541Srgrimesstruct mbuf *
48132036Sbdem_gethdr(how, type)
48232036Sbde	int how, type;
4831541Srgrimes{
4841541Srgrimes	register struct mbuf *m;
4851541Srgrimes
48632036Sbde	MGETHDR(m, how, type);
4871541Srgrimes	return (m);
4881541Srgrimes}
4891541Srgrimes
4901541Srgrimesstruct mbuf *
49132036Sbdem_getclr(how, type)
49232036Sbde	int how, type;
4931541Srgrimes{
4941541Srgrimes	register struct mbuf *m;
4951541Srgrimes
49632036Sbde	MGET(m, how, type);
4971541Srgrimes	if (m == 0)
4981541Srgrimes		return (0);
4991541Srgrimes	bzero(mtod(m, caddr_t), MLEN);
5001541Srgrimes	return (m);
5011541Srgrimes}
5021541Srgrimes
5031541Srgrimesstruct mbuf *
5041541Srgrimesm_free(m)
5051541Srgrimes	struct mbuf *m;
5061541Srgrimes{
5071541Srgrimes	register struct mbuf *n;
5081541Srgrimes
5091541Srgrimes	MFREE(m, n);
5101541Srgrimes	return (n);
5111541Srgrimes}
5121541Srgrimes
5131541Srgrimesvoid
5141541Srgrimesm_freem(m)
5151541Srgrimes	register struct mbuf *m;
5161541Srgrimes{
5171541Srgrimes	register struct mbuf *n;
5181541Srgrimes
5191541Srgrimes	if (m == NULL)
5201541Srgrimes		return;
5211541Srgrimes	do {
5221541Srgrimes		MFREE(m, n);
5233308Sphk		m = n;
5243308Sphk	} while (m);
5251541Srgrimes}
5261541Srgrimes
5271541Srgrimes/*
5281541Srgrimes * Mbuffer utility routines.
5291541Srgrimes */
5301541Srgrimes
5311541Srgrimes/*
5321541Srgrimes * Lesser-used path for M_PREPEND:
5331541Srgrimes * allocate new mbuf to prepend to chain,
5341541Srgrimes * copy junk along.
5351541Srgrimes */
5361541Srgrimesstruct mbuf *
5371541Srgrimesm_prepend(m, len, how)
5381541Srgrimes	register struct mbuf *m;
5391541Srgrimes	int len, how;
5401541Srgrimes{
5411541Srgrimes	struct mbuf *mn;
5421541Srgrimes
5431541Srgrimes	MGET(mn, how, m->m_type);
5441541Srgrimes	if (mn == (struct mbuf *)NULL) {
5451541Srgrimes		m_freem(m);
5461541Srgrimes		return ((struct mbuf *)NULL);
5471541Srgrimes	}
5481541Srgrimes	if (m->m_flags & M_PKTHDR) {
5491541Srgrimes		M_COPY_PKTHDR(mn, m);
5501541Srgrimes		m->m_flags &= ~M_PKTHDR;
5511541Srgrimes	}
5521541Srgrimes	mn->m_next = m;
5531541Srgrimes	m = mn;
5541541Srgrimes	if (len < MHLEN)
5551541Srgrimes		MH_ALIGN(m, len);
5561541Srgrimes	m->m_len = len;
5571541Srgrimes	return (m);
5581541Srgrimes}
5591541Srgrimes
5601541Srgrimes/*
5611541Srgrimes * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
5621541Srgrimes * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
5631541Srgrimes * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
56454002Sarchie * Note that the copy is read-only, because clusters are not copied,
56554002Sarchie * only their reference counts are incremented.
5661541Srgrimes */
56723081Swollman#define MCFail (mbstat.m_mcfail)
5681541Srgrimes
5691541Srgrimesstruct mbuf *
5701541Srgrimesm_copym(m, off0, len, wait)
5711541Srgrimes	register struct mbuf *m;
5721541Srgrimes	int off0, wait;
5731541Srgrimes	register int len;
5741541Srgrimes{
5751541Srgrimes	register struct mbuf *n, **np;
5761541Srgrimes	register int off = off0;
5771541Srgrimes	struct mbuf *top;
5781541Srgrimes	int copyhdr = 0;
5791541Srgrimes
58052201Salfred	KASSERT(off >= 0, ("m_copym, negative off %d", off));
58152201Salfred	KASSERT(len >= 0, ("m_copym, negative len %d", len));
5821541Srgrimes	if (off == 0 && m->m_flags & M_PKTHDR)
5831541Srgrimes		copyhdr = 1;
5841541Srgrimes	while (off > 0) {
58552201Salfred		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
5861541Srgrimes		if (off < m->m_len)
5871541Srgrimes			break;
5881541Srgrimes		off -= m->m_len;
5891541Srgrimes		m = m->m_next;
5901541Srgrimes	}
5911541Srgrimes	np = &top;
5921541Srgrimes	top = 0;
5931541Srgrimes	while (len > 0) {
5941541Srgrimes		if (m == 0) {
59552201Salfred			KASSERT(len == M_COPYALL,
59652201Salfred			    ("m_copym, length > size of mbuf chain"));
5971541Srgrimes			break;
5981541Srgrimes		}
5991541Srgrimes		MGET(n, wait, m->m_type);
6001541Srgrimes		*np = n;
6011541Srgrimes		if (n == 0)
6021541Srgrimes			goto nospace;
6031541Srgrimes		if (copyhdr) {
6041541Srgrimes			M_COPY_PKTHDR(n, m);
6051541Srgrimes			if (len == M_COPYALL)
6061541Srgrimes				n->m_pkthdr.len -= off0;
6071541Srgrimes			else
6081541Srgrimes				n->m_pkthdr.len = len;
6091541Srgrimes			copyhdr = 0;
6101541Srgrimes		}
6111541Srgrimes		n->m_len = min(len, m->m_len - off);
6121541Srgrimes		if (m->m_flags & M_EXT) {
6131541Srgrimes			n->m_data = m->m_data + off;
61417663Sjulian			if(!m->m_ext.ext_ref)
61517663Sjulian				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
61617663Sjulian			else
61717663Sjulian				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
61817663Sjulian							m->m_ext.ext_size);
6191541Srgrimes			n->m_ext = m->m_ext;
6201541Srgrimes			n->m_flags |= M_EXT;
6211541Srgrimes		} else
6221541Srgrimes			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
6231541Srgrimes			    (unsigned)n->m_len);
6241541Srgrimes		if (len != M_COPYALL)
6251541Srgrimes			len -= n->m_len;
6261541Srgrimes		off = 0;
6271541Srgrimes		m = m->m_next;
6281541Srgrimes		np = &n->m_next;
6291541Srgrimes	}
6301541Srgrimes	if (top == 0)
6311541Srgrimes		MCFail++;
6321541Srgrimes	return (top);
6331541Srgrimesnospace:
6341541Srgrimes	m_freem(top);
6351541Srgrimes	MCFail++;
6361541Srgrimes	return (0);
6371541Srgrimes}
6381541Srgrimes
6391541Srgrimes/*
64015689Swollman * Copy an entire packet, including header (which must be present).
64115689Swollman * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
64254002Sarchie * Note that the copy is read-only, because clusters are not copied,
64354002Sarchie * only their reference counts are incremented.
64415689Swollman */
64515689Swollmanstruct mbuf *
64615689Swollmanm_copypacket(m, how)
64715689Swollman	struct mbuf *m;
64815689Swollman	int how;
64915689Swollman{
65015689Swollman	struct mbuf *top, *n, *o;
65115689Swollman
65215689Swollman	MGET(n, how, m->m_type);
65315689Swollman	top = n;
65415689Swollman	if (!n)
65515689Swollman		goto nospace;
65615689Swollman
65715689Swollman	M_COPY_PKTHDR(n, m);
65815689Swollman	n->m_len = m->m_len;
65915689Swollman	if (m->m_flags & M_EXT) {
66015689Swollman		n->m_data = m->m_data;
66137350Sphk		if(!m->m_ext.ext_ref)
66237350Sphk			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
66337350Sphk		else
66437350Sphk			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
66537350Sphk						m->m_ext.ext_size);
66615689Swollman		n->m_ext = m->m_ext;
66715689Swollman		n->m_flags |= M_EXT;
66815689Swollman	} else {
66915689Swollman		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
67015689Swollman	}
67115689Swollman
67215689Swollman	m = m->m_next;
67315689Swollman	while (m) {
67415689Swollman		MGET(o, how, m->m_type);
67515689Swollman		if (!o)
67615689Swollman			goto nospace;
67715689Swollman
67815689Swollman		n->m_next = o;
67915689Swollman		n = n->m_next;
68015689Swollman
68115689Swollman		n->m_len = m->m_len;
68215689Swollman		if (m->m_flags & M_EXT) {
68315689Swollman			n->m_data = m->m_data;
68437350Sphk			if(!m->m_ext.ext_ref)
68537350Sphk				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
68637350Sphk			else
68737350Sphk				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
68837350Sphk							m->m_ext.ext_size);
68915689Swollman			n->m_ext = m->m_ext;
69015689Swollman			n->m_flags |= M_EXT;
69115689Swollman		} else {
69215689Swollman			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
69315689Swollman		}
69415689Swollman
69515689Swollman		m = m->m_next;
69615689Swollman	}
69715689Swollman	return top;
69815689Swollmannospace:
69915689Swollman	m_freem(top);
70015689Swollman	MCFail++;
70115689Swollman	return 0;
70215689Swollman}
70315689Swollman
70415689Swollman/*
7051541Srgrimes * Copy data from an mbuf chain starting "off" bytes from the beginning,
7061541Srgrimes * continuing for "len" bytes, into the indicated buffer.
7071541Srgrimes */
7081549Srgrimesvoid
7091541Srgrimesm_copydata(m, off, len, cp)
7101541Srgrimes	register struct mbuf *m;
7111541Srgrimes	register int off;
7121541Srgrimes	register int len;
7131541Srgrimes	caddr_t cp;
7141541Srgrimes{
7151541Srgrimes	register unsigned count;
7161541Srgrimes
71752201Salfred	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
71852201Salfred	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
7191541Srgrimes	while (off > 0) {
72052201Salfred		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
7211541Srgrimes		if (off < m->m_len)
7221541Srgrimes			break;
7231541Srgrimes		off -= m->m_len;
7241541Srgrimes		m = m->m_next;
7251541Srgrimes	}
7261541Srgrimes	while (len > 0) {
72752201Salfred		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
7281541Srgrimes		count = min(m->m_len - off, len);
7291541Srgrimes		bcopy(mtod(m, caddr_t) + off, cp, count);
7301541Srgrimes		len -= count;
7311541Srgrimes		cp += count;
7321541Srgrimes		off = 0;
7331541Srgrimes		m = m->m_next;
7341541Srgrimes	}
7351541Srgrimes}
7361541Srgrimes
7371541Srgrimes/*
73854002Sarchie * Copy a packet header mbuf chain into a completely new chain, including
73954002Sarchie * copying any mbuf clusters.  Use this instead of m_copypacket() when
74054002Sarchie * you need a writable copy of an mbuf chain.
74154002Sarchie */
74254002Sarchiestruct mbuf *
74354002Sarchiem_dup(m, how)
74454002Sarchie	struct mbuf *m;
74554002Sarchie	int how;
74654002Sarchie{
74754002Sarchie	struct mbuf **p, *top = NULL;
74854002Sarchie	int remain, moff, nsize;
74954002Sarchie
75054002Sarchie	/* Sanity check */
75154002Sarchie	if (m == NULL)
75254002Sarchie		return (0);
75354002Sarchie	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
75454002Sarchie
75554002Sarchie	/* While there's more data, get a new mbuf, tack it on, and fill it */
75654002Sarchie	remain = m->m_pkthdr.len;
75754002Sarchie	moff = 0;
75854002Sarchie	p = &top;
75954002Sarchie	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
76054002Sarchie		struct mbuf *n;
76154002Sarchie
76254002Sarchie		/* Get the next new mbuf */
76354002Sarchie		MGET(n, how, m->m_type);
76454002Sarchie		if (n == NULL)
76554002Sarchie			goto nospace;
76654002Sarchie		if (top == NULL) {		/* first one, must be PKTHDR */
76754002Sarchie			M_COPY_PKTHDR(n, m);
76854002Sarchie			nsize = MHLEN;
76954002Sarchie		} else				/* not the first one */
77054002Sarchie			nsize = MLEN;
77154002Sarchie		if (remain >= MINCLSIZE) {
77254002Sarchie			MCLGET(n, how);
77354002Sarchie			if ((n->m_flags & M_EXT) == 0) {
77454002Sarchie				(void)m_free(n);
77554002Sarchie				goto nospace;
77654002Sarchie			}
77754002Sarchie			nsize = MCLBYTES;
77854002Sarchie		}
77954002Sarchie		n->m_len = 0;
78054002Sarchie
78154002Sarchie		/* Link it into the new chain */
78254002Sarchie		*p = n;
78354002Sarchie		p = &n->m_next;
78454002Sarchie
78554002Sarchie		/* Copy data from original mbuf(s) into new mbuf */
78654002Sarchie		while (n->m_len < nsize && m != NULL) {
78754002Sarchie			int chunk = min(nsize - n->m_len, m->m_len - moff);
78854002Sarchie
78954002Sarchie			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
79054002Sarchie			moff += chunk;
79154002Sarchie			n->m_len += chunk;
79254002Sarchie			remain -= chunk;
79354002Sarchie			if (moff == m->m_len) {
79454002Sarchie				m = m->m_next;
79554002Sarchie				moff = 0;
79654002Sarchie			}
79754002Sarchie		}
79854002Sarchie
79954002Sarchie		/* Check correct total mbuf length */
80054002Sarchie		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
80154002Sarchie		    	("%s: bogus m_pkthdr.len", __FUNCTION__));
80254002Sarchie	}
80354002Sarchie	return (top);
80454002Sarchie
80554002Sarchienospace:
80654002Sarchie	m_freem(top);
80754002Sarchie	MCFail++;
80854002Sarchie	return (0);
80954002Sarchie}
81054002Sarchie
81154002Sarchie/*
8121541Srgrimes * Concatenate mbuf chain n to m.
8131541Srgrimes * Both chains must be of the same type (e.g. MT_DATA).
8141541Srgrimes * Any m_pkthdr is not updated.
8151541Srgrimes */
8161549Srgrimesvoid
8171541Srgrimesm_cat(m, n)
8181541Srgrimes	register struct mbuf *m, *n;
8191541Srgrimes{
8201541Srgrimes	while (m->m_next)
8211541Srgrimes		m = m->m_next;
8221541Srgrimes	while (n) {
8231541Srgrimes		if (m->m_flags & M_EXT ||
8241541Srgrimes		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
8251541Srgrimes			/* just join the two chains */
8261541Srgrimes			m->m_next = n;
8271541Srgrimes			return;
8281541Srgrimes		}
8291541Srgrimes		/* splat the data from one into the other */
8301541Srgrimes		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
8311541Srgrimes		    (u_int)n->m_len);
8321541Srgrimes		m->m_len += n->m_len;
8331541Srgrimes		n = m_free(n);
8341541Srgrimes	}
8351541Srgrimes}
8361541Srgrimes
8371549Srgrimesvoid
8381541Srgrimesm_adj(mp, req_len)
8391541Srgrimes	struct mbuf *mp;
8401541Srgrimes	int req_len;
8411541Srgrimes{
8421541Srgrimes	register int len = req_len;
8431541Srgrimes	register struct mbuf *m;
84433678Sbde	register int count;
8451541Srgrimes
8461541Srgrimes	if ((m = mp) == NULL)
8471541Srgrimes		return;
8481541Srgrimes	if (len >= 0) {
8491541Srgrimes		/*
8501541Srgrimes		 * Trim from head.
8511541Srgrimes		 */
8521541Srgrimes		while (m != NULL && len > 0) {
8531541Srgrimes			if (m->m_len <= len) {
8541541Srgrimes				len -= m->m_len;
8551541Srgrimes				m->m_len = 0;
8561541Srgrimes				m = m->m_next;
8571541Srgrimes			} else {
8581541Srgrimes				m->m_len -= len;
8591541Srgrimes				m->m_data += len;
8601541Srgrimes				len = 0;
8611541Srgrimes			}
8621541Srgrimes		}
8631541Srgrimes		m = mp;
8641541Srgrimes		if (mp->m_flags & M_PKTHDR)
8651541Srgrimes			m->m_pkthdr.len -= (req_len - len);
8661541Srgrimes	} else {
8671541Srgrimes		/*
8681541Srgrimes		 * Trim from tail.  Scan the mbuf chain,
8691541Srgrimes		 * calculating its length and finding the last mbuf.
8701541Srgrimes		 * If the adjustment only affects this mbuf, then just
8711541Srgrimes		 * adjust and return.  Otherwise, rescan and truncate
8721541Srgrimes		 * after the remaining size.
8731541Srgrimes		 */
8741541Srgrimes		len = -len;
8751541Srgrimes		count = 0;
8761541Srgrimes		for (;;) {
8771541Srgrimes			count += m->m_len;
8781541Srgrimes			if (m->m_next == (struct mbuf *)0)
8791541Srgrimes				break;
8801541Srgrimes			m = m->m_next;
8811541Srgrimes		}
8821541Srgrimes		if (m->m_len >= len) {
8831541Srgrimes			m->m_len -= len;
8841541Srgrimes			if (mp->m_flags & M_PKTHDR)
8851541Srgrimes				mp->m_pkthdr.len -= len;
8861541Srgrimes			return;
8871541Srgrimes		}
8881541Srgrimes		count -= len;
8891541Srgrimes		if (count < 0)
8901541Srgrimes			count = 0;
8911541Srgrimes		/*
8921541Srgrimes		 * Correct length for chain is "count".
8931541Srgrimes		 * Find the mbuf with last data, adjust its length,
8941541Srgrimes		 * and toss data from remaining mbufs on chain.
8951541Srgrimes		 */
8961541Srgrimes		m = mp;
8971541Srgrimes		if (m->m_flags & M_PKTHDR)
8981541Srgrimes			m->m_pkthdr.len = count;
8991541Srgrimes		for (; m; m = m->m_next) {
9001541Srgrimes			if (m->m_len >= count) {
9011541Srgrimes				m->m_len = count;
9021541Srgrimes				break;
9031541Srgrimes			}
9041541Srgrimes			count -= m->m_len;
9051541Srgrimes		}
9063308Sphk		while (m->m_next)
9073308Sphk			(m = m->m_next) ->m_len = 0;
9081541Srgrimes	}
9091541Srgrimes}
9101541Srgrimes
9111541Srgrimes/*
9121541Srgrimes * Rearange an mbuf chain so that len bytes are contiguous
9131541Srgrimes * and in the data area of an mbuf (so that mtod and dtom
9141541Srgrimes * will work for a structure of size len).  Returns the resulting
9151541Srgrimes * mbuf chain on success, frees it and returns null on failure.
9161541Srgrimes * If there is room, it will add up to max_protohdr-len extra bytes to the
9171541Srgrimes * contiguous region in an attempt to avoid being called next time.
9181541Srgrimes */
91923081Swollman#define MPFail (mbstat.m_mpfail)
9201541Srgrimes
9211541Srgrimesstruct mbuf *
9221541Srgrimesm_pullup(n, len)
9231541Srgrimes	register struct mbuf *n;
9241541Srgrimes	int len;
9251541Srgrimes{
9261541Srgrimes	register struct mbuf *m;
9271541Srgrimes	register int count;
9281541Srgrimes	int space;
9291541Srgrimes
9301541Srgrimes	/*
9311541Srgrimes	 * If first mbuf has no cluster, and has room for len bytes
9321541Srgrimes	 * without shifting current data, pullup into it,
9331541Srgrimes	 * otherwise allocate a new mbuf to prepend to the chain.
9341541Srgrimes	 */
9351541Srgrimes	if ((n->m_flags & M_EXT) == 0 &&
9361541Srgrimes	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
9371541Srgrimes		if (n->m_len >= len)
9381541Srgrimes			return (n);
9391541Srgrimes		m = n;
9401541Srgrimes		n = n->m_next;
9411541Srgrimes		len -= m->m_len;
9421541Srgrimes	} else {
9431541Srgrimes		if (len > MHLEN)
9441541Srgrimes			goto bad;
9451541Srgrimes		MGET(m, M_DONTWAIT, n->m_type);
9461541Srgrimes		if (m == 0)
9471541Srgrimes			goto bad;
9481541Srgrimes		m->m_len = 0;
9491541Srgrimes		if (n->m_flags & M_PKTHDR) {
9501541Srgrimes			M_COPY_PKTHDR(m, n);
9511541Srgrimes			n->m_flags &= ~M_PKTHDR;
9521541Srgrimes		}
9531541Srgrimes	}
9541541Srgrimes	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
9551541Srgrimes	do {
9561541Srgrimes		count = min(min(max(len, max_protohdr), space), n->m_len);
9571541Srgrimes		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
9581541Srgrimes		  (unsigned)count);
9591541Srgrimes		len -= count;
9601541Srgrimes		m->m_len += count;
9611541Srgrimes		n->m_len -= count;
9621541Srgrimes		space -= count;
9631541Srgrimes		if (n->m_len)
9641541Srgrimes			n->m_data += count;
9651541Srgrimes		else
9661541Srgrimes			n = m_free(n);
9671541Srgrimes	} while (len > 0 && n);
9681541Srgrimes	if (len > 0) {
9691541Srgrimes		(void) m_free(m);
9701541Srgrimes		goto bad;
9711541Srgrimes	}
9721541Srgrimes	m->m_next = n;
9731541Srgrimes	return (m);
9741541Srgrimesbad:
9751541Srgrimes	m_freem(n);
9761541Srgrimes	MPFail++;
9771541Srgrimes	return (0);
9781541Srgrimes}
9791541Srgrimes
9801541Srgrimes/*
9811541Srgrimes * Partition an mbuf chain in two pieces, returning the tail --
9821541Srgrimes * all but the first len0 bytes.  In case of failure, it returns NULL and
9831541Srgrimes * attempts to restore the chain to its original state.
9841541Srgrimes */
9851541Srgrimesstruct mbuf *
9861541Srgrimesm_split(m0, len0, wait)
9871541Srgrimes	register struct mbuf *m0;
9881541Srgrimes	int len0, wait;
9891541Srgrimes{
9901541Srgrimes	register struct mbuf *m, *n;
9911541Srgrimes	unsigned len = len0, remain;
9921541Srgrimes
9931541Srgrimes	for (m = m0; m && len > m->m_len; m = m->m_next)
9941541Srgrimes		len -= m->m_len;
9951541Srgrimes	if (m == 0)
9961541Srgrimes		return (0);
9971541Srgrimes	remain = m->m_len - len;
9981541Srgrimes	if (m0->m_flags & M_PKTHDR) {
9991541Srgrimes		MGETHDR(n, wait, m0->m_type);
10001541Srgrimes		if (n == 0)
10011541Srgrimes			return (0);
10021541Srgrimes		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
10031541Srgrimes		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
10041541Srgrimes		m0->m_pkthdr.len = len0;
10051541Srgrimes		if (m->m_flags & M_EXT)
10061541Srgrimes			goto extpacket;
10071541Srgrimes		if (remain > MHLEN) {
10081541Srgrimes			/* m can't be the lead packet */
10091541Srgrimes			MH_ALIGN(n, 0);
10101541Srgrimes			n->m_next = m_split(m, len, wait);
10111541Srgrimes			if (n->m_next == 0) {
10121541Srgrimes				(void) m_free(n);
10131541Srgrimes				return (0);
10141541Srgrimes			} else
10151541Srgrimes				return (n);
10161541Srgrimes		} else
10171541Srgrimes			MH_ALIGN(n, remain);
10181541Srgrimes	} else if (remain == 0) {
10191541Srgrimes		n = m->m_next;
10201541Srgrimes		m->m_next = 0;
10211541Srgrimes		return (n);
10221541Srgrimes	} else {
10231541Srgrimes		MGET(n, wait, m->m_type);
10241541Srgrimes		if (n == 0)
10251541Srgrimes			return (0);
10261541Srgrimes		M_ALIGN(n, remain);
10271541Srgrimes	}
10281541Srgrimesextpacket:
10291541Srgrimes	if (m->m_flags & M_EXT) {
10301541Srgrimes		n->m_flags |= M_EXT;
10311541Srgrimes		n->m_ext = m->m_ext;
103217663Sjulian		if(!m->m_ext.ext_ref)
103317663Sjulian			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
103417663Sjulian		else
103517663Sjulian			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
103617663Sjulian						m->m_ext.ext_size);
10371541Srgrimes		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
10381541Srgrimes		n->m_data = m->m_data + len;
10391541Srgrimes	} else {
10401541Srgrimes		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
10411541Srgrimes	}
10421541Srgrimes	n->m_len = remain;
10431541Srgrimes	m->m_len = len;
10441541Srgrimes	n->m_next = m->m_next;
10451541Srgrimes	m->m_next = 0;
10461541Srgrimes	return (n);
10471541Srgrimes}
10481541Srgrimes/*
10491541Srgrimes * Routine to copy from device local memory into mbufs.
10501541Srgrimes */
10511541Srgrimesstruct mbuf *
10521541Srgrimesm_devget(buf, totlen, off0, ifp, copy)
10531541Srgrimes	char *buf;
10541541Srgrimes	int totlen, off0;
10551541Srgrimes	struct ifnet *ifp;
105612577Sbde	void (*copy) __P((char *from, caddr_t to, u_int len));
10571541Srgrimes{
10581541Srgrimes	register struct mbuf *m;
10591541Srgrimes	struct mbuf *top = 0, **mp = &top;
10601541Srgrimes	register int off = off0, len;
10611541Srgrimes	register char *cp;
10621541Srgrimes	char *epkt;
10631541Srgrimes
10641541Srgrimes	cp = buf;
10651541Srgrimes	epkt = cp + totlen;
10661541Srgrimes	if (off) {
10671541Srgrimes		cp += off + 2 * sizeof(u_short);
10681541Srgrimes		totlen -= 2 * sizeof(u_short);
10691541Srgrimes	}
10701541Srgrimes	MGETHDR(m, M_DONTWAIT, MT_DATA);
10711541Srgrimes	if (m == 0)
10721541Srgrimes		return (0);
10731541Srgrimes	m->m_pkthdr.rcvif = ifp;
10741541Srgrimes	m->m_pkthdr.len = totlen;
10751541Srgrimes	m->m_len = MHLEN;
10761541Srgrimes
10771541Srgrimes	while (totlen > 0) {
10781541Srgrimes		if (top) {
10791541Srgrimes			MGET(m, M_DONTWAIT, MT_DATA);
10801541Srgrimes			if (m == 0) {
10811541Srgrimes				m_freem(top);
10821541Srgrimes				return (0);
10831541Srgrimes			}
10841541Srgrimes			m->m_len = MLEN;
10851541Srgrimes		}
10861541Srgrimes		len = min(totlen, epkt - cp);
10871541Srgrimes		if (len >= MINCLSIZE) {
10881541Srgrimes			MCLGET(m, M_DONTWAIT);
10891541Srgrimes			if (m->m_flags & M_EXT)
10901541Srgrimes				m->m_len = len = min(len, MCLBYTES);
10911541Srgrimes			else
10921541Srgrimes				len = m->m_len;
10931541Srgrimes		} else {
10941541Srgrimes			/*
10951541Srgrimes			 * Place initial small packet/header at end of mbuf.
10961541Srgrimes			 */
10971541Srgrimes			if (len < m->m_len) {
10981541Srgrimes				if (top == 0 && len + max_linkhdr <= m->m_len)
10991541Srgrimes					m->m_data += max_linkhdr;
11001541Srgrimes				m->m_len = len;
11011541Srgrimes			} else
11021541Srgrimes				len = m->m_len;
11031541Srgrimes		}
11041541Srgrimes		if (copy)
11051541Srgrimes			copy(cp, mtod(m, caddr_t), (unsigned)len);
11061541Srgrimes		else
11071541Srgrimes			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
11081541Srgrimes		cp += len;
11091541Srgrimes		*mp = m;
11101541Srgrimes		mp = &m->m_next;
11111541Srgrimes		totlen -= len;
11121541Srgrimes		if (cp == epkt)
11131541Srgrimes			cp = buf;
11141541Srgrimes	}
11151541Srgrimes	return (top);
11161541Srgrimes}
11173352Sphk
11183352Sphk/*
11193352Sphk * Copy data from a buffer back into the indicated mbuf chain,
11203352Sphk * starting "off" bytes from the beginning, extending the mbuf
11213352Sphk * chain if necessary.
11223352Sphk */
11233352Sphkvoid
11243352Sphkm_copyback(m0, off, len, cp)
11253352Sphk	struct	mbuf *m0;
11263352Sphk	register int off;
11273352Sphk	register int len;
11283352Sphk	caddr_t cp;
11293352Sphk{
11303352Sphk	register int mlen;
11313352Sphk	register struct mbuf *m = m0, *n;
11323352Sphk	int totlen = 0;
11333352Sphk
11343352Sphk	if (m0 == 0)
11353352Sphk		return;
11363352Sphk	while (off > (mlen = m->m_len)) {
11373352Sphk		off -= mlen;
11383352Sphk		totlen += mlen;
11393352Sphk		if (m->m_next == 0) {
11403352Sphk			n = m_getclr(M_DONTWAIT, m->m_type);
11413352Sphk			if (n == 0)
11423352Sphk				goto out;
11433352Sphk			n->m_len = min(MLEN, len + off);
11443352Sphk			m->m_next = n;
11453352Sphk		}
11463352Sphk		m = m->m_next;
11473352Sphk	}
11483352Sphk	while (len > 0) {
11493352Sphk		mlen = min (m->m_len - off, len);
11503352Sphk		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
11513352Sphk		cp += mlen;
11523352Sphk		len -= mlen;
11533352Sphk		mlen += off;
11543352Sphk		off = 0;
11553352Sphk		totlen += mlen;
11563352Sphk		if (len == 0)
11573352Sphk			break;
11583352Sphk		if (m->m_next == 0) {
11593352Sphk			n = m_get(M_DONTWAIT, m->m_type);
11603352Sphk			if (n == 0)
11613352Sphk				break;
11623352Sphk			n->m_len = min(MLEN, len);
11633352Sphk			m->m_next = n;
11643352Sphk		}
11653352Sphk		m = m->m_next;
11663352Sphk	}
11673352Sphkout:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
11683352Sphk		m->m_pkthdr.len = totlen;
11693352Sphk}
117052756Sphk
117152756Sphkvoid
117252756Sphkm_print(const struct mbuf *m)
117352756Sphk{
117452756Sphk	int len;
117554906Seivind	const struct mbuf *m2;
117652756Sphk
117752756Sphk	len = m->m_pkthdr.len;
117852756Sphk	m2 = m;
117952756Sphk	while (len) {
118052756Sphk		printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
118152756Sphk		len -= m2->m_len;
118252756Sphk		m2 = m2->m_next;
118352756Sphk	}
118452756Sphk	return;
118552756Sphk}
1186