uipc_mbuf.c revision 54906
11541Srgrimes/*
21541Srgrimes * Copyright (c) 1982, 1986, 1988, 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes *
51541Srgrimes * Redistribution and use in source and binary forms, with or without
61541Srgrimes * modification, are permitted provided that the following conditions
71541Srgrimes * are met:
81541Srgrimes * 1. Redistributions of source code must retain the above copyright
91541Srgrimes *    notice, this list of conditions and the following disclaimer.
101541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
111541Srgrimes *    notice, this list of conditions and the following disclaimer in the
121541Srgrimes *    documentation and/or other materials provided with the distribution.
131541Srgrimes * 3. All advertising materials mentioning features or use of this software
141541Srgrimes *    must display the following acknowledgement:
151541Srgrimes *	This product includes software developed by the University of
161541Srgrimes *	California, Berkeley and its contributors.
171541Srgrimes * 4. Neither the name of the University nor the names of its contributors
181541Srgrimes *    may be used to endorse or promote products derived from this software
191541Srgrimes *    without specific prior written permission.
201541Srgrimes *
211541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
221541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
231541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
241541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
251541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
261541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
271541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
281541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
291541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
301541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
311541Srgrimes * SUCH DAMAGE.
321541Srgrimes *
331541Srgrimes *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
3450477Speter * $FreeBSD: head/sys/kern/uipc_mbuf.c 54906 1999-12-20 18:10:00Z eivind $
351541Srgrimes */
361541Srgrimes
3748579Smsmith#include "opt_param.h"
381541Srgrimes#include <sys/param.h>
391541Srgrimes#include <sys/systm.h>
4032036Sbde#include <sys/malloc.h>
411541Srgrimes#include <sys/mbuf.h>
421541Srgrimes#include <sys/kernel.h>
4323081Swollman#include <sys/sysctl.h>
441541Srgrimes#include <sys/domain.h>
451541Srgrimes#include <sys/protosw.h>
461541Srgrimes
471541Srgrimes#include <vm/vm.h>
489759Sbde#include <vm/vm_kern.h>
4912662Sdg#include <vm/vm_extern.h>
501541Srgrimes
5154478Sgreen#ifdef INVARIANTS
5254478Sgreen#include <machine/cpu.h>
5354478Sgreen#endif
5454478Sgreen
5510653Sdgstatic void mbinit __P((void *));
5610358SjulianSYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
5710358Sjulian
589759Sbdestruct mbuf *mbutl;
591541Srgrimeschar	*mclrefcnt;
609759Sbdestruct mbstat mbstat;
6115689Swollmanstruct mbuf *mmbfree;
629759Sbdeunion mcluster *mclfree;
639759Sbdeint	max_linkhdr;
649759Sbdeint	max_protohdr;
659759Sbdeint	max_hdr;
669759Sbdeint	max_datalen;
6748579Smsmithint	nmbclusters;
6848579Smsmithint	nmbufs;
6954584Sgreenu_int	m_mballoc_wid = 0;
7054584Sgreenu_int	m_clalloc_wid = 0;
711541Srgrimes
7244078SdfrSYSCTL_DECL(_kern_ipc);
7323081SwollmanSYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
7423081Swollman	   &max_linkhdr, 0, "");
7523081SwollmanSYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
7623081Swollman	   &max_protohdr, 0, "");
7723081SwollmanSYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
7823081SwollmanSYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
7923081Swollman	   &max_datalen, 0, "");
8054478SgreenSYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
8154478Sgreen	   &mbuf_wait, 0, "");
8223081SwollmanSYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
8348579SmsmithSYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
8448579Smsmith	   &nmbclusters, 0, "Maximum number of mbuf clusters avaliable");
8548579Smsmith#ifndef NMBCLUSTERS
8648579Smsmith#define NMBCLUSTERS	(512 + MAXUSERS * 16)
8748579Smsmith#endif
8848579SmsmithTUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters);
8948579SmsmithTUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs);	/* XXX fixup? */
9023081Swollman
9112819Sphkstatic void	m_reclaim __P((void));
9212819Sphk
9315736Sphk/* "number of clusters of pages" */
9415736Sphk#define NCL_INIT	1
9515736Sphk
9615744Sphk#define NMB_INIT	16
9715744Sphk
9810358Sjulian/* ARGSUSED*/
9910358Sjulianstatic void
10012569Sbdembinit(dummy)
10112569Sbde	void *dummy;
1021541Srgrimes{
1031541Srgrimes	int s;
1041541Srgrimes
10515689Swollman	mmbfree = NULL; mclfree = NULL;
10623081Swollman	mbstat.m_msize = MSIZE;
10723081Swollman	mbstat.m_mclbytes = MCLBYTES;
10823081Swollman	mbstat.m_minclsize = MINCLSIZE;
10923081Swollman	mbstat.m_mlen = MLEN;
11023081Swollman	mbstat.m_mhlen = MHLEN;
11123081Swollman
1121541Srgrimes	s = splimp();
11315689Swollman	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
11415689Swollman		goto bad;
11522671Swollman#if MCLBYTES <= PAGE_SIZE
1161541Srgrimes	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
1171541Srgrimes		goto bad;
11822671Swollman#else
11922671Swollman	/* It's OK to call contigmalloc in this context. */
12032036Sbde	if (m_clalloc(16, M_WAIT) == 0)
12122671Swollman		goto bad;
12222671Swollman#endif
1231541Srgrimes	splx(s);
1241541Srgrimes	return;
1251541Srgrimesbad:
1261541Srgrimes	panic("mbinit");
1271541Srgrimes}
1281541Srgrimes
1291541Srgrimes/*
13015689Swollman * Allocate at least nmb mbufs and place on mbuf free list.
13115689Swollman * Must be called at splimp.
13215689Swollman */
13315689Swollman/* ARGSUSED */
13415689Swollmanint
13532036Sbdem_mballoc(nmb, how)
13615689Swollman	register int nmb;
13732036Sbde	int how;
13815689Swollman{
13915689Swollman	register caddr_t p;
14015689Swollman	register int i;
14115689Swollman	int nbytes;
14215689Swollman
14354478Sgreen	/*
14454478Sgreen	 * Once we run out of map space, it will be impossible to get
14554478Sgreen	 * any more (nothing is ever freed back to the map)
14654478Sgreen	 * -- however you are not dead as m_reclaim might
14754478Sgreen	 * still be able to free a substantial amount of space.
14854478Sgreen	 *
14954478Sgreen	 * XXX Furthermore, we can also work with "recycled" mbufs (when
15054478Sgreen	 * we're calling with M_WAIT the sleep procedure will be woken
15154478Sgreen	 * up when an mbuf is freed. See m_mballoc_wait()).
15215689Swollman	 */
15315689Swollman	if (mb_map_full)
15415689Swollman		return (0);
15515689Swollman
15615689Swollman	nbytes = round_page(nmb * MSIZE);
15722899Swollman	p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
15832036Sbde	if (p == 0 && how == M_WAIT) {
15922899Swollman		mbstat.m_wait++;
16022899Swollman		p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
16122899Swollman	}
16222899Swollman
16315689Swollman	/*
16432036Sbde	 * Either the map is now full, or `how' is M_NOWAIT and there
16515689Swollman	 * are no pages left.
16615689Swollman	 */
16715689Swollman	if (p == NULL)
16815689Swollman		return (0);
16915689Swollman
17015689Swollman	nmb = nbytes / MSIZE;
17115689Swollman	for (i = 0; i < nmb; i++) {
17215689Swollman		((struct mbuf *)p)->m_next = mmbfree;
17315689Swollman		mmbfree = (struct mbuf *)p;
17415689Swollman		p += MSIZE;
17515689Swollman	}
17615689Swollman	mbstat.m_mbufs += nmb;
17715689Swollman	return (1);
17815689Swollman}
17915689Swollman
18054478Sgreen/*
18154478Sgreen * Once the mb_map has been exhausted and if the call to the allocation macros
18254478Sgreen * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
18354478Sgreen * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
18454478Sgreen * designated (mbuf_wait) time.
18554478Sgreen */
18654478Sgreenstruct mbuf *
18754478Sgreenm_mballoc_wait(int caller, int type)
18854478Sgreen{
18954478Sgreen	struct mbuf *p;
19054478Sgreen	int s;
19154478Sgreen
19254478Sgreen	m_mballoc_wid++;
19354478Sgreen	if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK)
19454478Sgreen		m_mballoc_wid--;
19554478Sgreen
19654478Sgreen	/*
19754478Sgreen	 * Now that we (think) that we've got something, we will redo an
19854478Sgreen	 * MGET, but avoid getting into another instance of m_mballoc_wait()
19954478Sgreen	 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
20054478Sgreen	 *      this way, purposely, in the [unlikely] case that an mbuf was
20154478Sgreen	 *      freed but the sleep was not awakened in time.
20254478Sgreen	 */
20354478Sgreen	p = NULL;
20454478Sgreen	switch (caller) {
20554478Sgreen	case MGET_C:
20654478Sgreen		MGET(p, M_DONTWAIT, type);
20754478Sgreen		break;
20854478Sgreen	case MGETHDR_C:
20954478Sgreen		MGETHDR(p, M_DONTWAIT, type);
21054478Sgreen		break;
21154478Sgreen	default:
21254478Sgreen		panic("m_mballoc_wait: invalid caller (%d)", caller);
21354478Sgreen	}
21454478Sgreen
21554478Sgreen	s = splimp();
21654478Sgreen	if (p != NULL) {		/* We waited and got something... */
21754478Sgreen		mbstat.m_wait++;
21854478Sgreen		/* Wake up another if we have more free. */
21954478Sgreen		if (mmbfree != NULL)
22054787Sgreen			MMBWAKEUP();
22154478Sgreen	}
22254478Sgreen	splx(s);
22354478Sgreen	return (p);
22454478Sgreen}
22554478Sgreen
22622671Swollman#if MCLBYTES > PAGE_SIZE
22722899Swollmanstatic int i_want_my_mcl;
22822671Swollman
22922899Swollmanstatic void
23022671Swollmankproc_mclalloc(void)
23122671Swollman{
23222671Swollman	int status;
23322671Swollman
23422671Swollman	while (1) {
23522671Swollman		tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
23622671Swollman
23722671Swollman		for (; i_want_my_mcl; i_want_my_mcl--) {
23832036Sbde			if (m_clalloc(1, M_WAIT) == 0)
23922671Swollman				printf("m_clalloc failed even in process context!\n");
24022671Swollman		}
24122671Swollman	}
24222671Swollman}
24322671Swollman
24422671Swollmanstatic struct proc *mclallocproc;
24522671Swollmanstatic struct kproc_desc mclalloc_kp = {
24622671Swollman	"mclalloc",
24722671Swollman	kproc_mclalloc,
24822671Swollman	&mclallocproc
24922671Swollman};
25048391SpeterSYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
25122671Swollman	   &mclalloc_kp);
25222671Swollman#endif
25322671Swollman
25415689Swollman/*
2551541Srgrimes * Allocate some number of mbuf clusters
2561541Srgrimes * and place on cluster free list.
2571541Srgrimes * Must be called at splimp.
2581541Srgrimes */
2591541Srgrimes/* ARGSUSED */
2601549Srgrimesint
26132036Sbdem_clalloc(ncl, how)
2621541Srgrimes	register int ncl;
26332036Sbde	int how;
2641541Srgrimes{
2651541Srgrimes	register caddr_t p;
2661541Srgrimes	register int i;
2671541Srgrimes	int npg;
2681541Srgrimes
2697066Sdg	/*
2707066Sdg	 * Once we run out of map space, it will be impossible
2717066Sdg	 * to get any more (nothing is ever freed back to the
27254478Sgreen	 * map). From this point on, we solely rely on freed
27354478Sgreen	 * mclusters.
2747066Sdg	 */
27522899Swollman	if (mb_map_full) {
27622899Swollman		mbstat.m_drops++;
2777066Sdg		return (0);
27822899Swollman	}
2797066Sdg
28022671Swollman#if MCLBYTES > PAGE_SIZE
28132036Sbde	if (how != M_WAIT) {
28222671Swollman		i_want_my_mcl += ncl;
28322671Swollman		wakeup(&i_want_my_mcl);
28422899Swollman		mbstat.m_wait++;
28522671Swollman		p = 0;
28622671Swollman	} else {
28722671Swollman		p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
28822671Swollman				  ~0ul, PAGE_SIZE, 0, mb_map);
28922671Swollman	}
29022671Swollman#else
29115543Sphk	npg = ncl;
29221737Sdg	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
29332036Sbde				 how != M_WAIT ? M_NOWAIT : M_WAITOK);
29422671Swollman	ncl = ncl * PAGE_SIZE / MCLBYTES;
29522671Swollman#endif
2967066Sdg	/*
29732036Sbde	 * Either the map is now full, or `how' is M_NOWAIT and there
2987066Sdg	 * are no pages left.
2997066Sdg	 */
30022899Swollman	if (p == NULL) {
30122899Swollman		mbstat.m_drops++;
3021541Srgrimes		return (0);
30322899Swollman	}
3047066Sdg
3051541Srgrimes	for (i = 0; i < ncl; i++) {
3061541Srgrimes		((union mcluster *)p)->mcl_next = mclfree;
3071541Srgrimes		mclfree = (union mcluster *)p;
3081541Srgrimes		p += MCLBYTES;
3091541Srgrimes		mbstat.m_clfree++;
3101541Srgrimes	}
3111541Srgrimes	mbstat.m_clusters += ncl;
3121541Srgrimes	return (1);
3131541Srgrimes}
3141541Srgrimes
3151541Srgrimes/*
31654478Sgreen * Once the mb_map submap has been exhausted and the allocation is called with
31754478Sgreen * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
31854478Sgreen * sleep for a designated amount of time (mbuf_wait) or until we're woken up
31954478Sgreen * due to sudden mcluster availability.
32054478Sgreen */
32154478Sgreencaddr_t
32254478Sgreenm_clalloc_wait(void)
32354478Sgreen{
32454478Sgreen	caddr_t p;
32554478Sgreen	int s;
32654478Sgreen
32754478Sgreen#ifdef __i386__
32854478Sgreen	/* If in interrupt context, and INVARIANTS, maintain sanity and die. */
32954478Sgreen	KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
33054478Sgreen#endif
33154478Sgreen
33254478Sgreen	/* Sleep until something's available or until we expire. */
33354478Sgreen	m_clalloc_wid++;
33454478Sgreen	if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK)
33554478Sgreen		m_clalloc_wid--;
33654478Sgreen
33754478Sgreen	/*
33854478Sgreen	 * Now that we (think) that we've got something, we will redo and
33954478Sgreen	 * MGET, but avoid getting into another instance of m_clalloc_wait()
34054478Sgreen	 */
34154478Sgreen	p = NULL;
34254478Sgreen	MCLALLOC(p, M_DONTWAIT);
34354478Sgreen
34454478Sgreen	s = splimp();
34554478Sgreen	if (p != NULL) {	/* We waited and got something... */
34654478Sgreen		mbstat.m_wait++;
34754478Sgreen		/* Wake up another if we have more free. */
34854478Sgreen		if (mclfree != NULL)
34954787Sgreen			MCLWAKEUP();
35054478Sgreen	}
35154478Sgreen
35254478Sgreen	splx(s);
35354478Sgreen	return (p);
35454478Sgreen}
35554478Sgreen
35654478Sgreen/*
35745615Sdes * When MGET fails, ask protocols to free space when short of memory,
3581541Srgrimes * then re-attempt to allocate an mbuf.
3591541Srgrimes */
3601541Srgrimesstruct mbuf *
3611541Srgrimesm_retry(i, t)
3621541Srgrimes	int i, t;
3631541Srgrimes{
3641541Srgrimes	register struct mbuf *m;
3651541Srgrimes
36637878Sdg	/*
36737878Sdg	 * Must only do the reclaim if not in an interrupt context.
36837878Sdg	 */
36954478Sgreen	if (i == M_WAIT) {
37054478Sgreen#ifdef __i386__
37154478Sgreen		KASSERT(intr_nesting_level == 0,
37254478Sgreen		    ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
37354478Sgreen#endif
37437878Sdg		m_reclaim();
37554478Sgreen	}
37654478Sgreen
37754478Sgreen	/*
37854478Sgreen	 * Both m_mballoc_wait and m_retry must be nulled because
37954478Sgreen	 * when the MGET macro is run from here, we deffinately do _not_
38054478Sgreen	 * want to enter an instance of m_mballoc_wait() or m_retry() (again!)
38154478Sgreen	 */
38254478Sgreen#define m_mballoc_wait(caller,type)    (struct mbuf *)0
3831541Srgrimes#define m_retry(i, t)	(struct mbuf *)0
3841541Srgrimes	MGET(m, i, t);
3851541Srgrimes#undef m_retry
38654478Sgreen#undef m_mballoc_wait
38754478Sgreen
38854478Sgreen	if (m != NULL)
3896669Sdg		mbstat.m_wait++;
39054478Sgreen	else
39154478Sgreen		mbstat.m_drops++;
39254478Sgreen
3931541Srgrimes	return (m);
3941541Srgrimes}
3951541Srgrimes
3961541Srgrimes/*
3971541Srgrimes * As above; retry an MGETHDR.
3981541Srgrimes */
3991541Srgrimesstruct mbuf *
4001541Srgrimesm_retryhdr(i, t)
4011541Srgrimes	int i, t;
4021541Srgrimes{
4031541Srgrimes	register struct mbuf *m;
4041541Srgrimes
40537878Sdg	/*
40637878Sdg	 * Must only do the reclaim if not in an interrupt context.
40737878Sdg	 */
40854478Sgreen	if (i == M_WAIT) {
40954478Sgreen#ifdef __i386__
41054478Sgreen		KASSERT(intr_nesting_level == 0,
41154478Sgreen		    ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
41254478Sgreen#endif
41337878Sdg		m_reclaim();
41454478Sgreen	}
41554478Sgreen
41654478Sgreen#define m_mballoc_wait(caller,type)    (struct mbuf *)0
4171541Srgrimes#define m_retryhdr(i, t) (struct mbuf *)0
4181541Srgrimes	MGETHDR(m, i, t);
4191541Srgrimes#undef m_retryhdr
42054478Sgreen#undef m_mballoc_wait
42154478Sgreen
42254478Sgreen	if (m != NULL)
4236669Sdg		mbstat.m_wait++;
42454478Sgreen	else
42554478Sgreen		mbstat.m_drops++;
42654478Sgreen
4271541Srgrimes	return (m);
4281541Srgrimes}
4291541Srgrimes
43012819Sphkstatic void
4311541Srgrimesm_reclaim()
4321541Srgrimes{
4331541Srgrimes	register struct domain *dp;
4341541Srgrimes	register struct protosw *pr;
4351541Srgrimes	int s = splimp();
4361541Srgrimes
4371541Srgrimes	for (dp = domains; dp; dp = dp->dom_next)
4381541Srgrimes		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
4391541Srgrimes			if (pr->pr_drain)
4401541Srgrimes				(*pr->pr_drain)();
4411541Srgrimes	splx(s);
4421541Srgrimes	mbstat.m_drain++;
4431541Srgrimes}
4441541Srgrimes
4451541Srgrimes/*
4461541Srgrimes * Space allocation routines.
4471541Srgrimes * These are also available as macros
4481541Srgrimes * for critical paths.
4491541Srgrimes */
4501541Srgrimesstruct mbuf *
45132036Sbdem_get(how, type)
45232036Sbde	int how, type;
4531541Srgrimes{
4541541Srgrimes	register struct mbuf *m;
4551541Srgrimes
45632036Sbde	MGET(m, how, type);
4571541Srgrimes	return (m);
4581541Srgrimes}
4591541Srgrimes
4601541Srgrimesstruct mbuf *
46132036Sbdem_gethdr(how, type)
46232036Sbde	int how, type;
4631541Srgrimes{
4641541Srgrimes	register struct mbuf *m;
4651541Srgrimes
46632036Sbde	MGETHDR(m, how, type);
4671541Srgrimes	return (m);
4681541Srgrimes}
4691541Srgrimes
4701541Srgrimesstruct mbuf *
47132036Sbdem_getclr(how, type)
47232036Sbde	int how, type;
4731541Srgrimes{
4741541Srgrimes	register struct mbuf *m;
4751541Srgrimes
47632036Sbde	MGET(m, how, type);
4771541Srgrimes	if (m == 0)
4781541Srgrimes		return (0);
4791541Srgrimes	bzero(mtod(m, caddr_t), MLEN);
4801541Srgrimes	return (m);
4811541Srgrimes}
4821541Srgrimes
4831541Srgrimesstruct mbuf *
4841541Srgrimesm_free(m)
4851541Srgrimes	struct mbuf *m;
4861541Srgrimes{
4871541Srgrimes	register struct mbuf *n;
4881541Srgrimes
4891541Srgrimes	MFREE(m, n);
4901541Srgrimes	return (n);
4911541Srgrimes}
4921541Srgrimes
4931541Srgrimesvoid
4941541Srgrimesm_freem(m)
4951541Srgrimes	register struct mbuf *m;
4961541Srgrimes{
4971541Srgrimes	register struct mbuf *n;
4981541Srgrimes
4991541Srgrimes	if (m == NULL)
5001541Srgrimes		return;
5011541Srgrimes	do {
5021541Srgrimes		MFREE(m, n);
5033308Sphk		m = n;
5043308Sphk	} while (m);
5051541Srgrimes}
5061541Srgrimes
5071541Srgrimes/*
5081541Srgrimes * Mbuffer utility routines.
5091541Srgrimes */
5101541Srgrimes
5111541Srgrimes/*
5121541Srgrimes * Lesser-used path for M_PREPEND:
5131541Srgrimes * allocate new mbuf to prepend to chain,
5141541Srgrimes * copy junk along.
5151541Srgrimes */
5161541Srgrimesstruct mbuf *
5171541Srgrimesm_prepend(m, len, how)
5181541Srgrimes	register struct mbuf *m;
5191541Srgrimes	int len, how;
5201541Srgrimes{
5211541Srgrimes	struct mbuf *mn;
5221541Srgrimes
5231541Srgrimes	MGET(mn, how, m->m_type);
5241541Srgrimes	if (mn == (struct mbuf *)NULL) {
5251541Srgrimes		m_freem(m);
5261541Srgrimes		return ((struct mbuf *)NULL);
5271541Srgrimes	}
5281541Srgrimes	if (m->m_flags & M_PKTHDR) {
5291541Srgrimes		M_COPY_PKTHDR(mn, m);
5301541Srgrimes		m->m_flags &= ~M_PKTHDR;
5311541Srgrimes	}
5321541Srgrimes	mn->m_next = m;
5331541Srgrimes	m = mn;
5341541Srgrimes	if (len < MHLEN)
5351541Srgrimes		MH_ALIGN(m, len);
5361541Srgrimes	m->m_len = len;
5371541Srgrimes	return (m);
5381541Srgrimes}
5391541Srgrimes
5401541Srgrimes/*
5411541Srgrimes * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
5421541Srgrimes * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
5431541Srgrimes * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
54454002Sarchie * Note that the copy is read-only, because clusters are not copied,
54554002Sarchie * only their reference counts are incremented.
5461541Srgrimes */
54723081Swollman#define MCFail (mbstat.m_mcfail)
5481541Srgrimes
5491541Srgrimesstruct mbuf *
5501541Srgrimesm_copym(m, off0, len, wait)
5511541Srgrimes	register struct mbuf *m;
5521541Srgrimes	int off0, wait;
5531541Srgrimes	register int len;
5541541Srgrimes{
5551541Srgrimes	register struct mbuf *n, **np;
5561541Srgrimes	register int off = off0;
5571541Srgrimes	struct mbuf *top;
5581541Srgrimes	int copyhdr = 0;
5591541Srgrimes
56052201Salfred	KASSERT(off >= 0, ("m_copym, negative off %d", off));
56152201Salfred	KASSERT(len >= 0, ("m_copym, negative len %d", len));
5621541Srgrimes	if (off == 0 && m->m_flags & M_PKTHDR)
5631541Srgrimes		copyhdr = 1;
5641541Srgrimes	while (off > 0) {
56552201Salfred		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
5661541Srgrimes		if (off < m->m_len)
5671541Srgrimes			break;
5681541Srgrimes		off -= m->m_len;
5691541Srgrimes		m = m->m_next;
5701541Srgrimes	}
5711541Srgrimes	np = &top;
5721541Srgrimes	top = 0;
5731541Srgrimes	while (len > 0) {
5741541Srgrimes		if (m == 0) {
57552201Salfred			KASSERT(len == M_COPYALL,
57652201Salfred			    ("m_copym, length > size of mbuf chain"));
5771541Srgrimes			break;
5781541Srgrimes		}
5791541Srgrimes		MGET(n, wait, m->m_type);
5801541Srgrimes		*np = n;
5811541Srgrimes		if (n == 0)
5821541Srgrimes			goto nospace;
5831541Srgrimes		if (copyhdr) {
5841541Srgrimes			M_COPY_PKTHDR(n, m);
5851541Srgrimes			if (len == M_COPYALL)
5861541Srgrimes				n->m_pkthdr.len -= off0;
5871541Srgrimes			else
5881541Srgrimes				n->m_pkthdr.len = len;
5891541Srgrimes			copyhdr = 0;
5901541Srgrimes		}
5911541Srgrimes		n->m_len = min(len, m->m_len - off);
5921541Srgrimes		if (m->m_flags & M_EXT) {
5931541Srgrimes			n->m_data = m->m_data + off;
59417663Sjulian			if(!m->m_ext.ext_ref)
59517663Sjulian				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
59617663Sjulian			else
59717663Sjulian				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
59817663Sjulian							m->m_ext.ext_size);
5991541Srgrimes			n->m_ext = m->m_ext;
6001541Srgrimes			n->m_flags |= M_EXT;
6011541Srgrimes		} else
6021541Srgrimes			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
6031541Srgrimes			    (unsigned)n->m_len);
6041541Srgrimes		if (len != M_COPYALL)
6051541Srgrimes			len -= n->m_len;
6061541Srgrimes		off = 0;
6071541Srgrimes		m = m->m_next;
6081541Srgrimes		np = &n->m_next;
6091541Srgrimes	}
6101541Srgrimes	if (top == 0)
6111541Srgrimes		MCFail++;
6121541Srgrimes	return (top);
6131541Srgrimesnospace:
6141541Srgrimes	m_freem(top);
6151541Srgrimes	MCFail++;
6161541Srgrimes	return (0);
6171541Srgrimes}
6181541Srgrimes
6191541Srgrimes/*
62015689Swollman * Copy an entire packet, including header (which must be present).
62115689Swollman * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
62254002Sarchie * Note that the copy is read-only, because clusters are not copied,
62354002Sarchie * only their reference counts are incremented.
62415689Swollman */
62515689Swollmanstruct mbuf *
62615689Swollmanm_copypacket(m, how)
62715689Swollman	struct mbuf *m;
62815689Swollman	int how;
62915689Swollman{
63015689Swollman	struct mbuf *top, *n, *o;
63115689Swollman
63215689Swollman	MGET(n, how, m->m_type);
63315689Swollman	top = n;
63415689Swollman	if (!n)
63515689Swollman		goto nospace;
63615689Swollman
63715689Swollman	M_COPY_PKTHDR(n, m);
63815689Swollman	n->m_len = m->m_len;
63915689Swollman	if (m->m_flags & M_EXT) {
64015689Swollman		n->m_data = m->m_data;
64137350Sphk		if(!m->m_ext.ext_ref)
64237350Sphk			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
64337350Sphk		else
64437350Sphk			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
64537350Sphk						m->m_ext.ext_size);
64615689Swollman		n->m_ext = m->m_ext;
64715689Swollman		n->m_flags |= M_EXT;
64815689Swollman	} else {
64915689Swollman		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
65015689Swollman	}
65115689Swollman
65215689Swollman	m = m->m_next;
65315689Swollman	while (m) {
65415689Swollman		MGET(o, how, m->m_type);
65515689Swollman		if (!o)
65615689Swollman			goto nospace;
65715689Swollman
65815689Swollman		n->m_next = o;
65915689Swollman		n = n->m_next;
66015689Swollman
66115689Swollman		n->m_len = m->m_len;
66215689Swollman		if (m->m_flags & M_EXT) {
66315689Swollman			n->m_data = m->m_data;
66437350Sphk			if(!m->m_ext.ext_ref)
66537350Sphk				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
66637350Sphk			else
66737350Sphk				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
66837350Sphk							m->m_ext.ext_size);
66915689Swollman			n->m_ext = m->m_ext;
67015689Swollman			n->m_flags |= M_EXT;
67115689Swollman		} else {
67215689Swollman			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
67315689Swollman		}
67415689Swollman
67515689Swollman		m = m->m_next;
67615689Swollman	}
67715689Swollman	return top;
67815689Swollmannospace:
67915689Swollman	m_freem(top);
68015689Swollman	MCFail++;
68115689Swollman	return 0;
68215689Swollman}
68315689Swollman
68415689Swollman/*
6851541Srgrimes * Copy data from an mbuf chain starting "off" bytes from the beginning,
6861541Srgrimes * continuing for "len" bytes, into the indicated buffer.
6871541Srgrimes */
6881549Srgrimesvoid
6891541Srgrimesm_copydata(m, off, len, cp)
6901541Srgrimes	register struct mbuf *m;
6911541Srgrimes	register int off;
6921541Srgrimes	register int len;
6931541Srgrimes	caddr_t cp;
6941541Srgrimes{
6951541Srgrimes	register unsigned count;
6961541Srgrimes
69752201Salfred	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
69852201Salfred	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
6991541Srgrimes	while (off > 0) {
70052201Salfred		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
7011541Srgrimes		if (off < m->m_len)
7021541Srgrimes			break;
7031541Srgrimes		off -= m->m_len;
7041541Srgrimes		m = m->m_next;
7051541Srgrimes	}
7061541Srgrimes	while (len > 0) {
70752201Salfred		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
7081541Srgrimes		count = min(m->m_len - off, len);
7091541Srgrimes		bcopy(mtod(m, caddr_t) + off, cp, count);
7101541Srgrimes		len -= count;
7111541Srgrimes		cp += count;
7121541Srgrimes		off = 0;
7131541Srgrimes		m = m->m_next;
7141541Srgrimes	}
7151541Srgrimes}
7161541Srgrimes
7171541Srgrimes/*
71854002Sarchie * Copy a packet header mbuf chain into a completely new chain, including
71954002Sarchie * copying any mbuf clusters.  Use this instead of m_copypacket() when
72054002Sarchie * you need a writable copy of an mbuf chain.
72154002Sarchie */
72254002Sarchiestruct mbuf *
72354002Sarchiem_dup(m, how)
72454002Sarchie	struct mbuf *m;
72554002Sarchie	int how;
72654002Sarchie{
72754002Sarchie	struct mbuf **p, *top = NULL;
72854002Sarchie	int remain, moff, nsize;
72954002Sarchie
73054002Sarchie	/* Sanity check */
73154002Sarchie	if (m == NULL)
73254002Sarchie		return (0);
73354002Sarchie	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
73454002Sarchie
73554002Sarchie	/* While there's more data, get a new mbuf, tack it on, and fill it */
73654002Sarchie	remain = m->m_pkthdr.len;
73754002Sarchie	moff = 0;
73854002Sarchie	p = &top;
73954002Sarchie	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
74054002Sarchie		struct mbuf *n;
74154002Sarchie
74254002Sarchie		/* Get the next new mbuf */
74354002Sarchie		MGET(n, how, m->m_type);
74454002Sarchie		if (n == NULL)
74554002Sarchie			goto nospace;
74654002Sarchie		if (top == NULL) {		/* first one, must be PKTHDR */
74754002Sarchie			M_COPY_PKTHDR(n, m);
74854002Sarchie			nsize = MHLEN;
74954002Sarchie		} else				/* not the first one */
75054002Sarchie			nsize = MLEN;
75154002Sarchie		if (remain >= MINCLSIZE) {
75254002Sarchie			MCLGET(n, how);
75354002Sarchie			if ((n->m_flags & M_EXT) == 0) {
75454002Sarchie				(void)m_free(n);
75554002Sarchie				goto nospace;
75654002Sarchie			}
75754002Sarchie			nsize = MCLBYTES;
75854002Sarchie		}
75954002Sarchie		n->m_len = 0;
76054002Sarchie
76154002Sarchie		/* Link it into the new chain */
76254002Sarchie		*p = n;
76354002Sarchie		p = &n->m_next;
76454002Sarchie
76554002Sarchie		/* Copy data from original mbuf(s) into new mbuf */
76654002Sarchie		while (n->m_len < nsize && m != NULL) {
76754002Sarchie			int chunk = min(nsize - n->m_len, m->m_len - moff);
76854002Sarchie
76954002Sarchie			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
77054002Sarchie			moff += chunk;
77154002Sarchie			n->m_len += chunk;
77254002Sarchie			remain -= chunk;
77354002Sarchie			if (moff == m->m_len) {
77454002Sarchie				m = m->m_next;
77554002Sarchie				moff = 0;
77654002Sarchie			}
77754002Sarchie		}
77854002Sarchie
77954002Sarchie		/* Check correct total mbuf length */
78054002Sarchie		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
78154002Sarchie		    	("%s: bogus m_pkthdr.len", __FUNCTION__));
78254002Sarchie	}
78354002Sarchie	return (top);
78454002Sarchie
78554002Sarchienospace:
78654002Sarchie	m_freem(top);
78754002Sarchie	MCFail++;
78854002Sarchie	return (0);
78954002Sarchie}
79054002Sarchie
79154002Sarchie/*
7921541Srgrimes * Concatenate mbuf chain n to m.
7931541Srgrimes * Both chains must be of the same type (e.g. MT_DATA).
7941541Srgrimes * Any m_pkthdr is not updated.
7951541Srgrimes */
7961549Srgrimesvoid
7971541Srgrimesm_cat(m, n)
7981541Srgrimes	register struct mbuf *m, *n;
7991541Srgrimes{
8001541Srgrimes	while (m->m_next)
8011541Srgrimes		m = m->m_next;
8021541Srgrimes	while (n) {
8031541Srgrimes		if (m->m_flags & M_EXT ||
8041541Srgrimes		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
8051541Srgrimes			/* just join the two chains */
8061541Srgrimes			m->m_next = n;
8071541Srgrimes			return;
8081541Srgrimes		}
8091541Srgrimes		/* splat the data from one into the other */
8101541Srgrimes		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
8111541Srgrimes		    (u_int)n->m_len);
8121541Srgrimes		m->m_len += n->m_len;
8131541Srgrimes		n = m_free(n);
8141541Srgrimes	}
8151541Srgrimes}
8161541Srgrimes
8171549Srgrimesvoid
8181541Srgrimesm_adj(mp, req_len)
8191541Srgrimes	struct mbuf *mp;
8201541Srgrimes	int req_len;
8211541Srgrimes{
8221541Srgrimes	register int len = req_len;
8231541Srgrimes	register struct mbuf *m;
82433678Sbde	register int count;
8251541Srgrimes
8261541Srgrimes	if ((m = mp) == NULL)
8271541Srgrimes		return;
8281541Srgrimes	if (len >= 0) {
8291541Srgrimes		/*
8301541Srgrimes		 * Trim from head.
8311541Srgrimes		 */
8321541Srgrimes		while (m != NULL && len > 0) {
8331541Srgrimes			if (m->m_len <= len) {
8341541Srgrimes				len -= m->m_len;
8351541Srgrimes				m->m_len = 0;
8361541Srgrimes				m = m->m_next;
8371541Srgrimes			} else {
8381541Srgrimes				m->m_len -= len;
8391541Srgrimes				m->m_data += len;
8401541Srgrimes				len = 0;
8411541Srgrimes			}
8421541Srgrimes		}
8431541Srgrimes		m = mp;
8441541Srgrimes		if (mp->m_flags & M_PKTHDR)
8451541Srgrimes			m->m_pkthdr.len -= (req_len - len);
8461541Srgrimes	} else {
8471541Srgrimes		/*
8481541Srgrimes		 * Trim from tail.  Scan the mbuf chain,
8491541Srgrimes		 * calculating its length and finding the last mbuf.
8501541Srgrimes		 * If the adjustment only affects this mbuf, then just
8511541Srgrimes		 * adjust and return.  Otherwise, rescan and truncate
8521541Srgrimes		 * after the remaining size.
8531541Srgrimes		 */
8541541Srgrimes		len = -len;
8551541Srgrimes		count = 0;
8561541Srgrimes		for (;;) {
8571541Srgrimes			count += m->m_len;
8581541Srgrimes			if (m->m_next == (struct mbuf *)0)
8591541Srgrimes				break;
8601541Srgrimes			m = m->m_next;
8611541Srgrimes		}
8621541Srgrimes		if (m->m_len >= len) {
8631541Srgrimes			m->m_len -= len;
8641541Srgrimes			if (mp->m_flags & M_PKTHDR)
8651541Srgrimes				mp->m_pkthdr.len -= len;
8661541Srgrimes			return;
8671541Srgrimes		}
8681541Srgrimes		count -= len;
8691541Srgrimes		if (count < 0)
8701541Srgrimes			count = 0;
8711541Srgrimes		/*
8721541Srgrimes		 * Correct length for chain is "count".
8731541Srgrimes		 * Find the mbuf with last data, adjust its length,
8741541Srgrimes		 * and toss data from remaining mbufs on chain.
8751541Srgrimes		 */
8761541Srgrimes		m = mp;
8771541Srgrimes		if (m->m_flags & M_PKTHDR)
8781541Srgrimes			m->m_pkthdr.len = count;
8791541Srgrimes		for (; m; m = m->m_next) {
8801541Srgrimes			if (m->m_len >= count) {
8811541Srgrimes				m->m_len = count;
8821541Srgrimes				break;
8831541Srgrimes			}
8841541Srgrimes			count -= m->m_len;
8851541Srgrimes		}
8863308Sphk		while (m->m_next)
8873308Sphk			(m = m->m_next) ->m_len = 0;
8881541Srgrimes	}
8891541Srgrimes}
8901541Srgrimes
8911541Srgrimes/*
8921541Srgrimes * Rearange an mbuf chain so that len bytes are contiguous
8931541Srgrimes * and in the data area of an mbuf (so that mtod and dtom
8941541Srgrimes * will work for a structure of size len).  Returns the resulting
8951541Srgrimes * mbuf chain on success, frees it and returns null on failure.
8961541Srgrimes * If there is room, it will add up to max_protohdr-len extra bytes to the
8971541Srgrimes * contiguous region in an attempt to avoid being called next time.
8981541Srgrimes */
89923081Swollman#define MPFail (mbstat.m_mpfail)
9001541Srgrimes
9011541Srgrimesstruct mbuf *
9021541Srgrimesm_pullup(n, len)
9031541Srgrimes	register struct mbuf *n;
9041541Srgrimes	int len;
9051541Srgrimes{
9061541Srgrimes	register struct mbuf *m;
9071541Srgrimes	register int count;
9081541Srgrimes	int space;
9091541Srgrimes
9101541Srgrimes	/*
9111541Srgrimes	 * If first mbuf has no cluster, and has room for len bytes
9121541Srgrimes	 * without shifting current data, pullup into it,
9131541Srgrimes	 * otherwise allocate a new mbuf to prepend to the chain.
9141541Srgrimes	 */
9151541Srgrimes	if ((n->m_flags & M_EXT) == 0 &&
9161541Srgrimes	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
9171541Srgrimes		if (n->m_len >= len)
9181541Srgrimes			return (n);
9191541Srgrimes		m = n;
9201541Srgrimes		n = n->m_next;
9211541Srgrimes		len -= m->m_len;
9221541Srgrimes	} else {
9231541Srgrimes		if (len > MHLEN)
9241541Srgrimes			goto bad;
9251541Srgrimes		MGET(m, M_DONTWAIT, n->m_type);
9261541Srgrimes		if (m == 0)
9271541Srgrimes			goto bad;
9281541Srgrimes		m->m_len = 0;
9291541Srgrimes		if (n->m_flags & M_PKTHDR) {
9301541Srgrimes			M_COPY_PKTHDR(m, n);
9311541Srgrimes			n->m_flags &= ~M_PKTHDR;
9321541Srgrimes		}
9331541Srgrimes	}
9341541Srgrimes	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
9351541Srgrimes	do {
9361541Srgrimes		count = min(min(max(len, max_protohdr), space), n->m_len);
9371541Srgrimes		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
9381541Srgrimes		  (unsigned)count);
9391541Srgrimes		len -= count;
9401541Srgrimes		m->m_len += count;
9411541Srgrimes		n->m_len -= count;
9421541Srgrimes		space -= count;
9431541Srgrimes		if (n->m_len)
9441541Srgrimes			n->m_data += count;
9451541Srgrimes		else
9461541Srgrimes			n = m_free(n);
9471541Srgrimes	} while (len > 0 && n);
9481541Srgrimes	if (len > 0) {
9491541Srgrimes		(void) m_free(m);
9501541Srgrimes		goto bad;
9511541Srgrimes	}
9521541Srgrimes	m->m_next = n;
9531541Srgrimes	return (m);
9541541Srgrimesbad:
9551541Srgrimes	m_freem(n);
9561541Srgrimes	MPFail++;
9571541Srgrimes	return (0);
9581541Srgrimes}
9591541Srgrimes
9601541Srgrimes/*
9611541Srgrimes * Partition an mbuf chain in two pieces, returning the tail --
9621541Srgrimes * all but the first len0 bytes.  In case of failure, it returns NULL and
9631541Srgrimes * attempts to restore the chain to its original state.
9641541Srgrimes */
9651541Srgrimesstruct mbuf *
9661541Srgrimesm_split(m0, len0, wait)
9671541Srgrimes	register struct mbuf *m0;
9681541Srgrimes	int len0, wait;
9691541Srgrimes{
9701541Srgrimes	register struct mbuf *m, *n;
9711541Srgrimes	unsigned len = len0, remain;
9721541Srgrimes
9731541Srgrimes	for (m = m0; m && len > m->m_len; m = m->m_next)
9741541Srgrimes		len -= m->m_len;
9751541Srgrimes	if (m == 0)
9761541Srgrimes		return (0);
9771541Srgrimes	remain = m->m_len - len;
9781541Srgrimes	if (m0->m_flags & M_PKTHDR) {
9791541Srgrimes		MGETHDR(n, wait, m0->m_type);
9801541Srgrimes		if (n == 0)
9811541Srgrimes			return (0);
9821541Srgrimes		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
9831541Srgrimes		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
9841541Srgrimes		m0->m_pkthdr.len = len0;
9851541Srgrimes		if (m->m_flags & M_EXT)
9861541Srgrimes			goto extpacket;
9871541Srgrimes		if (remain > MHLEN) {
9881541Srgrimes			/* m can't be the lead packet */
9891541Srgrimes			MH_ALIGN(n, 0);
9901541Srgrimes			n->m_next = m_split(m, len, wait);
9911541Srgrimes			if (n->m_next == 0) {
9921541Srgrimes				(void) m_free(n);
9931541Srgrimes				return (0);
9941541Srgrimes			} else
9951541Srgrimes				return (n);
9961541Srgrimes		} else
9971541Srgrimes			MH_ALIGN(n, remain);
9981541Srgrimes	} else if (remain == 0) {
9991541Srgrimes		n = m->m_next;
10001541Srgrimes		m->m_next = 0;
10011541Srgrimes		return (n);
10021541Srgrimes	} else {
10031541Srgrimes		MGET(n, wait, m->m_type);
10041541Srgrimes		if (n == 0)
10051541Srgrimes			return (0);
10061541Srgrimes		M_ALIGN(n, remain);
10071541Srgrimes	}
10081541Srgrimesextpacket:
10091541Srgrimes	if (m->m_flags & M_EXT) {
10101541Srgrimes		n->m_flags |= M_EXT;
10111541Srgrimes		n->m_ext = m->m_ext;
101217663Sjulian		if(!m->m_ext.ext_ref)
101317663Sjulian			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
101417663Sjulian		else
101517663Sjulian			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
101617663Sjulian						m->m_ext.ext_size);
10171541Srgrimes		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
10181541Srgrimes		n->m_data = m->m_data + len;
10191541Srgrimes	} else {
10201541Srgrimes		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
10211541Srgrimes	}
10221541Srgrimes	n->m_len = remain;
10231541Srgrimes	m->m_len = len;
10241541Srgrimes	n->m_next = m->m_next;
10251541Srgrimes	m->m_next = 0;
10261541Srgrimes	return (n);
10271541Srgrimes}
10281541Srgrimes/*
10291541Srgrimes * Routine to copy from device local memory into mbufs.
10301541Srgrimes */
10311541Srgrimesstruct mbuf *
10321541Srgrimesm_devget(buf, totlen, off0, ifp, copy)
10331541Srgrimes	char *buf;
10341541Srgrimes	int totlen, off0;
10351541Srgrimes	struct ifnet *ifp;
103612577Sbde	void (*copy) __P((char *from, caddr_t to, u_int len));
10371541Srgrimes{
10381541Srgrimes	register struct mbuf *m;
10391541Srgrimes	struct mbuf *top = 0, **mp = &top;
10401541Srgrimes	register int off = off0, len;
10411541Srgrimes	register char *cp;
10421541Srgrimes	char *epkt;
10431541Srgrimes
10441541Srgrimes	cp = buf;
10451541Srgrimes	epkt = cp + totlen;
10461541Srgrimes	if (off) {
10471541Srgrimes		cp += off + 2 * sizeof(u_short);
10481541Srgrimes		totlen -= 2 * sizeof(u_short);
10491541Srgrimes	}
10501541Srgrimes	MGETHDR(m, M_DONTWAIT, MT_DATA);
10511541Srgrimes	if (m == 0)
10521541Srgrimes		return (0);
10531541Srgrimes	m->m_pkthdr.rcvif = ifp;
10541541Srgrimes	m->m_pkthdr.len = totlen;
10551541Srgrimes	m->m_len = MHLEN;
10561541Srgrimes
10571541Srgrimes	while (totlen > 0) {
10581541Srgrimes		if (top) {
10591541Srgrimes			MGET(m, M_DONTWAIT, MT_DATA);
10601541Srgrimes			if (m == 0) {
10611541Srgrimes				m_freem(top);
10621541Srgrimes				return (0);
10631541Srgrimes			}
10641541Srgrimes			m->m_len = MLEN;
10651541Srgrimes		}
10661541Srgrimes		len = min(totlen, epkt - cp);
10671541Srgrimes		if (len >= MINCLSIZE) {
10681541Srgrimes			MCLGET(m, M_DONTWAIT);
10691541Srgrimes			if (m->m_flags & M_EXT)
10701541Srgrimes				m->m_len = len = min(len, MCLBYTES);
10711541Srgrimes			else
10721541Srgrimes				len = m->m_len;
10731541Srgrimes		} else {
10741541Srgrimes			/*
10751541Srgrimes			 * Place initial small packet/header at end of mbuf.
10761541Srgrimes			 */
10771541Srgrimes			if (len < m->m_len) {
10781541Srgrimes				if (top == 0 && len + max_linkhdr <= m->m_len)
10791541Srgrimes					m->m_data += max_linkhdr;
10801541Srgrimes				m->m_len = len;
10811541Srgrimes			} else
10821541Srgrimes				len = m->m_len;
10831541Srgrimes		}
10841541Srgrimes		if (copy)
10851541Srgrimes			copy(cp, mtod(m, caddr_t), (unsigned)len);
10861541Srgrimes		else
10871541Srgrimes			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
10881541Srgrimes		cp += len;
10891541Srgrimes		*mp = m;
10901541Srgrimes		mp = &m->m_next;
10911541Srgrimes		totlen -= len;
10921541Srgrimes		if (cp == epkt)
10931541Srgrimes			cp = buf;
10941541Srgrimes	}
10951541Srgrimes	return (top);
10961541Srgrimes}
10973352Sphk
10983352Sphk/*
10993352Sphk * Copy data from a buffer back into the indicated mbuf chain,
11003352Sphk * starting "off" bytes from the beginning, extending the mbuf
11013352Sphk * chain if necessary.
11023352Sphk */
11033352Sphkvoid
11043352Sphkm_copyback(m0, off, len, cp)
11053352Sphk	struct	mbuf *m0;
11063352Sphk	register int off;
11073352Sphk	register int len;
11083352Sphk	caddr_t cp;
11093352Sphk{
11103352Sphk	register int mlen;
11113352Sphk	register struct mbuf *m = m0, *n;
11123352Sphk	int totlen = 0;
11133352Sphk
11143352Sphk	if (m0 == 0)
11153352Sphk		return;
11163352Sphk	while (off > (mlen = m->m_len)) {
11173352Sphk		off -= mlen;
11183352Sphk		totlen += mlen;
11193352Sphk		if (m->m_next == 0) {
11203352Sphk			n = m_getclr(M_DONTWAIT, m->m_type);
11213352Sphk			if (n == 0)
11223352Sphk				goto out;
11233352Sphk			n->m_len = min(MLEN, len + off);
11243352Sphk			m->m_next = n;
11253352Sphk		}
11263352Sphk		m = m->m_next;
11273352Sphk	}
11283352Sphk	while (len > 0) {
11293352Sphk		mlen = min (m->m_len - off, len);
11303352Sphk		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
11313352Sphk		cp += mlen;
11323352Sphk		len -= mlen;
11333352Sphk		mlen += off;
11343352Sphk		off = 0;
11353352Sphk		totlen += mlen;
11363352Sphk		if (len == 0)
11373352Sphk			break;
11383352Sphk		if (m->m_next == 0) {
11393352Sphk			n = m_get(M_DONTWAIT, m->m_type);
11403352Sphk			if (n == 0)
11413352Sphk				break;
11423352Sphk			n->m_len = min(MLEN, len);
11433352Sphk			m->m_next = n;
11443352Sphk		}
11453352Sphk		m = m->m_next;
11463352Sphk	}
11473352Sphkout:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
11483352Sphk		m->m_pkthdr.len = totlen;
11493352Sphk}
115052756Sphk
115152756Sphkvoid
115252756Sphkm_print(const struct mbuf *m)
115352756Sphk{
115452756Sphk	int len;
115554906Seivind	const struct mbuf *m2;
115652756Sphk
115752756Sphk	len = m->m_pkthdr.len;
115852756Sphk	m2 = m;
115952756Sphk	while (len) {
116052756Sphk		printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
116152756Sphk		len -= m2->m_len;
116252756Sphk		m2 = m2->m_next;
116352756Sphk	}
116452756Sphk	return;
116552756Sphk}
1166