uipc_mbuf.c revision 52756
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 52756 1999-11-01 15:03:20Z phk $
35 */
36
37#include "opt_param.h"
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/kernel.h>
43#include <sys/sysctl.h>
44#include <sys/domain.h>
45#include <sys/protosw.h>
46
47#include <vm/vm.h>
48#include <vm/vm_kern.h>
49#include <vm/vm_extern.h>
50
51static void mbinit __P((void *));
52SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
53
54struct mbuf *mbutl;
55char	*mclrefcnt;
56struct mbstat mbstat;
57struct mbuf *mmbfree;
58union mcluster *mclfree;
59int	max_linkhdr;
60int	max_protohdr;
61int	max_hdr;
62int	max_datalen;
63int	nmbclusters;
64int	nmbufs;
65
66SYSCTL_DECL(_kern_ipc);
67SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
68	   &max_linkhdr, 0, "");
69SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
70	   &max_protohdr, 0, "");
71SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
72SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
73	   &max_datalen, 0, "");
74SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
75SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
76	   &nmbclusters, 0, "Maximum number of mbuf clusters avaliable");
77#ifndef NMBCLUSTERS
78#define NMBCLUSTERS	(512 + MAXUSERS * 16)
79#endif
80TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters);
81TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs);	/* XXX fixup? */
82
83static void	m_reclaim __P((void));
84
85/* "number of clusters of pages" */
86#define NCL_INIT	1
87
88#define NMB_INIT	16
89
90/* ARGSUSED*/
91static void
92mbinit(dummy)
93	void *dummy;
94{
95	int s;
96
97	mmbfree = NULL; mclfree = NULL;
98	mbstat.m_msize = MSIZE;
99	mbstat.m_mclbytes = MCLBYTES;
100	mbstat.m_minclsize = MINCLSIZE;
101	mbstat.m_mlen = MLEN;
102	mbstat.m_mhlen = MHLEN;
103
104	s = splimp();
105	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
106		goto bad;
107#if MCLBYTES <= PAGE_SIZE
108	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
109		goto bad;
110#else
111	/* It's OK to call contigmalloc in this context. */
112	if (m_clalloc(16, M_WAIT) == 0)
113		goto bad;
114#endif
115	splx(s);
116	return;
117bad:
118	panic("mbinit");
119}
120
121/*
122 * Allocate at least nmb mbufs and place on mbuf free list.
123 * Must be called at splimp.
124 */
125/* ARGSUSED */
126int
127m_mballoc(nmb, how)
128	register int nmb;
129	int how;
130{
131	register caddr_t p;
132	register int i;
133	int nbytes;
134
135	/* Once we run out of map space, it will be impossible to get
136	 * any more (nothing is ever freed back to the map) (XXX which
137	 * is dumb). (however you are not dead as m_reclaim might
138	 * still be able to free a substantial amount of space).
139	 */
140	if (mb_map_full)
141		return (0);
142
143	nbytes = round_page(nmb * MSIZE);
144	p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
145	if (p == 0 && how == M_WAIT) {
146		mbstat.m_wait++;
147		p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
148	}
149
150	/*
151	 * Either the map is now full, or `how' is M_NOWAIT and there
152	 * are no pages left.
153	 */
154	if (p == NULL)
155		return (0);
156
157	nmb = nbytes / MSIZE;
158	for (i = 0; i < nmb; i++) {
159		((struct mbuf *)p)->m_next = mmbfree;
160		mmbfree = (struct mbuf *)p;
161		p += MSIZE;
162	}
163	mbstat.m_mbufs += nmb;
164	return (1);
165}
166
167#if MCLBYTES > PAGE_SIZE
168static int i_want_my_mcl;
169
170static void
171kproc_mclalloc(void)
172{
173	int status;
174
175	while (1) {
176		tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
177
178		for (; i_want_my_mcl; i_want_my_mcl--) {
179			if (m_clalloc(1, M_WAIT) == 0)
180				printf("m_clalloc failed even in process context!\n");
181		}
182	}
183}
184
185static struct proc *mclallocproc;
186static struct kproc_desc mclalloc_kp = {
187	"mclalloc",
188	kproc_mclalloc,
189	&mclallocproc
190};
191SYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
192	   &mclalloc_kp);
193#endif
194
195/*
196 * Allocate some number of mbuf clusters
197 * and place on cluster free list.
198 * Must be called at splimp.
199 */
200/* ARGSUSED */
201int
202m_clalloc(ncl, how)
203	register int ncl;
204	int how;
205{
206	register caddr_t p;
207	register int i;
208	int npg;
209
210	/*
211	 * Once we run out of map space, it will be impossible
212	 * to get any more (nothing is ever freed back to the
213	 * map).
214	 */
215	if (mb_map_full) {
216		mbstat.m_drops++;
217		return (0);
218	}
219
220#if MCLBYTES > PAGE_SIZE
221	if (how != M_WAIT) {
222		i_want_my_mcl += ncl;
223		wakeup(&i_want_my_mcl);
224		mbstat.m_wait++;
225		p = 0;
226	} else {
227		p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
228				  ~0ul, PAGE_SIZE, 0, mb_map);
229	}
230#else
231	npg = ncl;
232	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
233				 how != M_WAIT ? M_NOWAIT : M_WAITOK);
234	ncl = ncl * PAGE_SIZE / MCLBYTES;
235#endif
236	/*
237	 * Either the map is now full, or `how' is M_NOWAIT and there
238	 * are no pages left.
239	 */
240	if (p == NULL) {
241		mbstat.m_drops++;
242		return (0);
243	}
244
245	for (i = 0; i < ncl; i++) {
246		((union mcluster *)p)->mcl_next = mclfree;
247		mclfree = (union mcluster *)p;
248		p += MCLBYTES;
249		mbstat.m_clfree++;
250	}
251	mbstat.m_clusters += ncl;
252	return (1);
253}
254
255/*
256 * When MGET fails, ask protocols to free space when short of memory,
257 * then re-attempt to allocate an mbuf.
258 */
259struct mbuf *
260m_retry(i, t)
261	int i, t;
262{
263	register struct mbuf *m;
264
265	/*
266	 * Must only do the reclaim if not in an interrupt context.
267	 */
268	if (i == M_WAIT)
269		m_reclaim();
270#define m_retry(i, t)	(struct mbuf *)0
271	MGET(m, i, t);
272#undef m_retry
273	if (m != NULL) {
274		mbstat.m_wait++;
275	} else {
276		if (i == M_DONTWAIT)
277			mbstat.m_drops++;
278		else
279			panic("Out of mbuf clusters");
280	}
281	return (m);
282}
283
284/*
285 * As above; retry an MGETHDR.
286 */
287struct mbuf *
288m_retryhdr(i, t)
289	int i, t;
290{
291	register struct mbuf *m;
292
293	/*
294	 * Must only do the reclaim if not in an interrupt context.
295	 */
296	if (i == M_WAIT)
297		m_reclaim();
298#define m_retryhdr(i, t) (struct mbuf *)0
299	MGETHDR(m, i, t);
300#undef m_retryhdr
301	if (m != NULL) {
302		mbstat.m_wait++;
303	} else {
304		if (i == M_DONTWAIT)
305			mbstat.m_drops++;
306		else
307			panic("Out of mbuf clusters");
308	}
309	return (m);
310}
311
312static void
313m_reclaim()
314{
315	register struct domain *dp;
316	register struct protosw *pr;
317	int s = splimp();
318
319	for (dp = domains; dp; dp = dp->dom_next)
320		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
321			if (pr->pr_drain)
322				(*pr->pr_drain)();
323	splx(s);
324	mbstat.m_drain++;
325}
326
327/*
328 * Space allocation routines.
329 * These are also available as macros
330 * for critical paths.
331 */
332struct mbuf *
333m_get(how, type)
334	int how, type;
335{
336	register struct mbuf *m;
337
338	MGET(m, how, type);
339	return (m);
340}
341
342struct mbuf *
343m_gethdr(how, type)
344	int how, type;
345{
346	register struct mbuf *m;
347
348	MGETHDR(m, how, type);
349	return (m);
350}
351
352struct mbuf *
353m_getclr(how, type)
354	int how, type;
355{
356	register struct mbuf *m;
357
358	MGET(m, how, type);
359	if (m == 0)
360		return (0);
361	bzero(mtod(m, caddr_t), MLEN);
362	return (m);
363}
364
365struct mbuf *
366m_free(m)
367	struct mbuf *m;
368{
369	register struct mbuf *n;
370
371	MFREE(m, n);
372	return (n);
373}
374
375void
376m_freem(m)
377	register struct mbuf *m;
378{
379	register struct mbuf *n;
380
381	if (m == NULL)
382		return;
383	do {
384		MFREE(m, n);
385		m = n;
386	} while (m);
387}
388
389/*
390 * Mbuffer utility routines.
391 */
392
393/*
394 * Lesser-used path for M_PREPEND:
395 * allocate new mbuf to prepend to chain,
396 * copy junk along.
397 */
398struct mbuf *
399m_prepend(m, len, how)
400	register struct mbuf *m;
401	int len, how;
402{
403	struct mbuf *mn;
404
405	MGET(mn, how, m->m_type);
406	if (mn == (struct mbuf *)NULL) {
407		m_freem(m);
408		return ((struct mbuf *)NULL);
409	}
410	if (m->m_flags & M_PKTHDR) {
411		M_COPY_PKTHDR(mn, m);
412		m->m_flags &= ~M_PKTHDR;
413	}
414	mn->m_next = m;
415	m = mn;
416	if (len < MHLEN)
417		MH_ALIGN(m, len);
418	m->m_len = len;
419	return (m);
420}
421
422/*
423 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
424 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
425 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
426 */
427#define MCFail (mbstat.m_mcfail)
428
429struct mbuf *
430m_copym(m, off0, len, wait)
431	register struct mbuf *m;
432	int off0, wait;
433	register int len;
434{
435	register struct mbuf *n, **np;
436	register int off = off0;
437	struct mbuf *top;
438	int copyhdr = 0;
439
440	KASSERT(off >= 0, ("m_copym, negative off %d", off));
441	KASSERT(len >= 0, ("m_copym, negative len %d", len));
442	if (off == 0 && m->m_flags & M_PKTHDR)
443		copyhdr = 1;
444	while (off > 0) {
445		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
446		if (off < m->m_len)
447			break;
448		off -= m->m_len;
449		m = m->m_next;
450	}
451	np = &top;
452	top = 0;
453	while (len > 0) {
454		if (m == 0) {
455			KASSERT(len == M_COPYALL,
456			    ("m_copym, length > size of mbuf chain"));
457			break;
458		}
459		MGET(n, wait, m->m_type);
460		*np = n;
461		if (n == 0)
462			goto nospace;
463		if (copyhdr) {
464			M_COPY_PKTHDR(n, m);
465			if (len == M_COPYALL)
466				n->m_pkthdr.len -= off0;
467			else
468				n->m_pkthdr.len = len;
469			copyhdr = 0;
470		}
471		n->m_len = min(len, m->m_len - off);
472		if (m->m_flags & M_EXT) {
473			n->m_data = m->m_data + off;
474			if(!m->m_ext.ext_ref)
475				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
476			else
477				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
478							m->m_ext.ext_size);
479			n->m_ext = m->m_ext;
480			n->m_flags |= M_EXT;
481		} else
482			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
483			    (unsigned)n->m_len);
484		if (len != M_COPYALL)
485			len -= n->m_len;
486		off = 0;
487		m = m->m_next;
488		np = &n->m_next;
489	}
490	if (top == 0)
491		MCFail++;
492	return (top);
493nospace:
494	m_freem(top);
495	MCFail++;
496	return (0);
497}
498
499/*
500 * Copy an entire packet, including header (which must be present).
501 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
502 */
503struct mbuf *
504m_copypacket(m, how)
505	struct mbuf *m;
506	int how;
507{
508	struct mbuf *top, *n, *o;
509
510	MGET(n, how, m->m_type);
511	top = n;
512	if (!n)
513		goto nospace;
514
515	M_COPY_PKTHDR(n, m);
516	n->m_len = m->m_len;
517	if (m->m_flags & M_EXT) {
518		n->m_data = m->m_data;
519		if(!m->m_ext.ext_ref)
520			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
521		else
522			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
523						m->m_ext.ext_size);
524		n->m_ext = m->m_ext;
525		n->m_flags |= M_EXT;
526	} else {
527		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
528	}
529
530	m = m->m_next;
531	while (m) {
532		MGET(o, how, m->m_type);
533		if (!o)
534			goto nospace;
535
536		n->m_next = o;
537		n = n->m_next;
538
539		n->m_len = m->m_len;
540		if (m->m_flags & M_EXT) {
541			n->m_data = m->m_data;
542			if(!m->m_ext.ext_ref)
543				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
544			else
545				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
546							m->m_ext.ext_size);
547			n->m_ext = m->m_ext;
548			n->m_flags |= M_EXT;
549		} else {
550			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
551		}
552
553		m = m->m_next;
554	}
555	return top;
556nospace:
557	m_freem(top);
558	MCFail++;
559	return 0;
560}
561
562/*
563 * Copy data from an mbuf chain starting "off" bytes from the beginning,
564 * continuing for "len" bytes, into the indicated buffer.
565 */
566void
567m_copydata(m, off, len, cp)
568	register struct mbuf *m;
569	register int off;
570	register int len;
571	caddr_t cp;
572{
573	register unsigned count;
574
575	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
576	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
577	while (off > 0) {
578		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
579		if (off < m->m_len)
580			break;
581		off -= m->m_len;
582		m = m->m_next;
583	}
584	while (len > 0) {
585		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
586		count = min(m->m_len - off, len);
587		bcopy(mtod(m, caddr_t) + off, cp, count);
588		len -= count;
589		cp += count;
590		off = 0;
591		m = m->m_next;
592	}
593}
594
595/*
596 * Concatenate mbuf chain n to m.
597 * Both chains must be of the same type (e.g. MT_DATA).
598 * Any m_pkthdr is not updated.
599 */
600void
601m_cat(m, n)
602	register struct mbuf *m, *n;
603{
604	while (m->m_next)
605		m = m->m_next;
606	while (n) {
607		if (m->m_flags & M_EXT ||
608		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
609			/* just join the two chains */
610			m->m_next = n;
611			return;
612		}
613		/* splat the data from one into the other */
614		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
615		    (u_int)n->m_len);
616		m->m_len += n->m_len;
617		n = m_free(n);
618	}
619}
620
621void
622m_adj(mp, req_len)
623	struct mbuf *mp;
624	int req_len;
625{
626	register int len = req_len;
627	register struct mbuf *m;
628	register int count;
629
630	if ((m = mp) == NULL)
631		return;
632	if (len >= 0) {
633		/*
634		 * Trim from head.
635		 */
636		while (m != NULL && len > 0) {
637			if (m->m_len <= len) {
638				len -= m->m_len;
639				m->m_len = 0;
640				m = m->m_next;
641			} else {
642				m->m_len -= len;
643				m->m_data += len;
644				len = 0;
645			}
646		}
647		m = mp;
648		if (mp->m_flags & M_PKTHDR)
649			m->m_pkthdr.len -= (req_len - len);
650	} else {
651		/*
652		 * Trim from tail.  Scan the mbuf chain,
653		 * calculating its length and finding the last mbuf.
654		 * If the adjustment only affects this mbuf, then just
655		 * adjust and return.  Otherwise, rescan and truncate
656		 * after the remaining size.
657		 */
658		len = -len;
659		count = 0;
660		for (;;) {
661			count += m->m_len;
662			if (m->m_next == (struct mbuf *)0)
663				break;
664			m = m->m_next;
665		}
666		if (m->m_len >= len) {
667			m->m_len -= len;
668			if (mp->m_flags & M_PKTHDR)
669				mp->m_pkthdr.len -= len;
670			return;
671		}
672		count -= len;
673		if (count < 0)
674			count = 0;
675		/*
676		 * Correct length for chain is "count".
677		 * Find the mbuf with last data, adjust its length,
678		 * and toss data from remaining mbufs on chain.
679		 */
680		m = mp;
681		if (m->m_flags & M_PKTHDR)
682			m->m_pkthdr.len = count;
683		for (; m; m = m->m_next) {
684			if (m->m_len >= count) {
685				m->m_len = count;
686				break;
687			}
688			count -= m->m_len;
689		}
690		while (m->m_next)
691			(m = m->m_next) ->m_len = 0;
692	}
693}
694
695/*
696 * Rearange an mbuf chain so that len bytes are contiguous
697 * and in the data area of an mbuf (so that mtod and dtom
698 * will work for a structure of size len).  Returns the resulting
699 * mbuf chain on success, frees it and returns null on failure.
700 * If there is room, it will add up to max_protohdr-len extra bytes to the
701 * contiguous region in an attempt to avoid being called next time.
702 */
703#define MPFail (mbstat.m_mpfail)
704
705struct mbuf *
706m_pullup(n, len)
707	register struct mbuf *n;
708	int len;
709{
710	register struct mbuf *m;
711	register int count;
712	int space;
713
714	/*
715	 * If first mbuf has no cluster, and has room for len bytes
716	 * without shifting current data, pullup into it,
717	 * otherwise allocate a new mbuf to prepend to the chain.
718	 */
719	if ((n->m_flags & M_EXT) == 0 &&
720	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
721		if (n->m_len >= len)
722			return (n);
723		m = n;
724		n = n->m_next;
725		len -= m->m_len;
726	} else {
727		if (len > MHLEN)
728			goto bad;
729		MGET(m, M_DONTWAIT, n->m_type);
730		if (m == 0)
731			goto bad;
732		m->m_len = 0;
733		if (n->m_flags & M_PKTHDR) {
734			M_COPY_PKTHDR(m, n);
735			n->m_flags &= ~M_PKTHDR;
736		}
737	}
738	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
739	do {
740		count = min(min(max(len, max_protohdr), space), n->m_len);
741		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
742		  (unsigned)count);
743		len -= count;
744		m->m_len += count;
745		n->m_len -= count;
746		space -= count;
747		if (n->m_len)
748			n->m_data += count;
749		else
750			n = m_free(n);
751	} while (len > 0 && n);
752	if (len > 0) {
753		(void) m_free(m);
754		goto bad;
755	}
756	m->m_next = n;
757	return (m);
758bad:
759	m_freem(n);
760	MPFail++;
761	return (0);
762}
763
764/*
765 * Partition an mbuf chain in two pieces, returning the tail --
766 * all but the first len0 bytes.  In case of failure, it returns NULL and
767 * attempts to restore the chain to its original state.
768 */
769struct mbuf *
770m_split(m0, len0, wait)
771	register struct mbuf *m0;
772	int len0, wait;
773{
774	register struct mbuf *m, *n;
775	unsigned len = len0, remain;
776
777	for (m = m0; m && len > m->m_len; m = m->m_next)
778		len -= m->m_len;
779	if (m == 0)
780		return (0);
781	remain = m->m_len - len;
782	if (m0->m_flags & M_PKTHDR) {
783		MGETHDR(n, wait, m0->m_type);
784		if (n == 0)
785			return (0);
786		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
787		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
788		m0->m_pkthdr.len = len0;
789		if (m->m_flags & M_EXT)
790			goto extpacket;
791		if (remain > MHLEN) {
792			/* m can't be the lead packet */
793			MH_ALIGN(n, 0);
794			n->m_next = m_split(m, len, wait);
795			if (n->m_next == 0) {
796				(void) m_free(n);
797				return (0);
798			} else
799				return (n);
800		} else
801			MH_ALIGN(n, remain);
802	} else if (remain == 0) {
803		n = m->m_next;
804		m->m_next = 0;
805		return (n);
806	} else {
807		MGET(n, wait, m->m_type);
808		if (n == 0)
809			return (0);
810		M_ALIGN(n, remain);
811	}
812extpacket:
813	if (m->m_flags & M_EXT) {
814		n->m_flags |= M_EXT;
815		n->m_ext = m->m_ext;
816		if(!m->m_ext.ext_ref)
817			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
818		else
819			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
820						m->m_ext.ext_size);
821		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
822		n->m_data = m->m_data + len;
823	} else {
824		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
825	}
826	n->m_len = remain;
827	m->m_len = len;
828	n->m_next = m->m_next;
829	m->m_next = 0;
830	return (n);
831}
832/*
833 * Routine to copy from device local memory into mbufs.
834 */
835struct mbuf *
836m_devget(buf, totlen, off0, ifp, copy)
837	char *buf;
838	int totlen, off0;
839	struct ifnet *ifp;
840	void (*copy) __P((char *from, caddr_t to, u_int len));
841{
842	register struct mbuf *m;
843	struct mbuf *top = 0, **mp = &top;
844	register int off = off0, len;
845	register char *cp;
846	char *epkt;
847
848	cp = buf;
849	epkt = cp + totlen;
850	if (off) {
851		cp += off + 2 * sizeof(u_short);
852		totlen -= 2 * sizeof(u_short);
853	}
854	MGETHDR(m, M_DONTWAIT, MT_DATA);
855	if (m == 0)
856		return (0);
857	m->m_pkthdr.rcvif = ifp;
858	m->m_pkthdr.len = totlen;
859	m->m_len = MHLEN;
860
861	while (totlen > 0) {
862		if (top) {
863			MGET(m, M_DONTWAIT, MT_DATA);
864			if (m == 0) {
865				m_freem(top);
866				return (0);
867			}
868			m->m_len = MLEN;
869		}
870		len = min(totlen, epkt - cp);
871		if (len >= MINCLSIZE) {
872			MCLGET(m, M_DONTWAIT);
873			if (m->m_flags & M_EXT)
874				m->m_len = len = min(len, MCLBYTES);
875			else
876				len = m->m_len;
877		} else {
878			/*
879			 * Place initial small packet/header at end of mbuf.
880			 */
881			if (len < m->m_len) {
882				if (top == 0 && len + max_linkhdr <= m->m_len)
883					m->m_data += max_linkhdr;
884				m->m_len = len;
885			} else
886				len = m->m_len;
887		}
888		if (copy)
889			copy(cp, mtod(m, caddr_t), (unsigned)len);
890		else
891			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
892		cp += len;
893		*mp = m;
894		mp = &m->m_next;
895		totlen -= len;
896		if (cp == epkt)
897			cp = buf;
898	}
899	return (top);
900}
901
902/*
903 * Copy data from a buffer back into the indicated mbuf chain,
904 * starting "off" bytes from the beginning, extending the mbuf
905 * chain if necessary.
906 */
907void
908m_copyback(m0, off, len, cp)
909	struct	mbuf *m0;
910	register int off;
911	register int len;
912	caddr_t cp;
913{
914	register int mlen;
915	register struct mbuf *m = m0, *n;
916	int totlen = 0;
917
918	if (m0 == 0)
919		return;
920	while (off > (mlen = m->m_len)) {
921		off -= mlen;
922		totlen += mlen;
923		if (m->m_next == 0) {
924			n = m_getclr(M_DONTWAIT, m->m_type);
925			if (n == 0)
926				goto out;
927			n->m_len = min(MLEN, len + off);
928			m->m_next = n;
929		}
930		m = m->m_next;
931	}
932	while (len > 0) {
933		mlen = min (m->m_len - off, len);
934		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
935		cp += mlen;
936		len -= mlen;
937		mlen += off;
938		off = 0;
939		totlen += mlen;
940		if (len == 0)
941			break;
942		if (m->m_next == 0) {
943			n = m_get(M_DONTWAIT, m->m_type);
944			if (n == 0)
945				break;
946			n->m_len = min(MLEN, len);
947			m->m_next = n;
948		}
949		m = m->m_next;
950	}
951out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
952		m->m_pkthdr.len = totlen;
953}
954
955void
956m_print(const struct mbuf *m)
957{
958	int len;
959	struct mbuf *m2;
960
961	len = m->m_pkthdr.len;
962	m2 = m;
963	while (len) {
964		printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
965		len -= m2->m_len;
966		m2 = m2->m_next;
967	}
968	return;
969}
970