uipc_mbuf.c revision 23081
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
34 *	$Id: uipc_mbuf.c,v 1.28 1997/02/18 20:43:05 wollman Exp $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/proc.h>
40#include <sys/malloc.h>
41#define MBTYPES
42#include <sys/mbuf.h>
43#include <sys/kernel.h>
44#include <sys/sysctl.h>
45#include <sys/syslog.h>
46#include <sys/domain.h>
47#include <sys/protosw.h>
48
49#include <vm/vm.h>
50#include <vm/vm_param.h>
51#include <vm/vm_kern.h>
52#include <vm/vm_extern.h>
53
54static void mbinit __P((void *));
55SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
56
57struct mbuf *mbutl;
58char	*mclrefcnt;
59struct mbstat mbstat;
60struct mbuf *mmbfree;
61union mcluster *mclfree;
62int	max_linkhdr;
63int	max_protohdr;
64int	max_hdr;
65int	max_datalen;
66
67SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
68	   &max_linkhdr, 0, "");
69SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
70	   &max_protohdr, 0, "");
71SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
72SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
73	   &max_datalen, 0, "");
74SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
75
76static void	m_reclaim __P((void));
77
78/* "number of clusters of pages" */
79#define NCL_INIT	1
80
81#define NMB_INIT	16
82
83/* ARGSUSED*/
84static void
85mbinit(dummy)
86	void *dummy;
87{
88	int s;
89
90	mmbfree = NULL; mclfree = NULL;
91	mbstat.m_msize = MSIZE;
92	mbstat.m_mclbytes = MCLBYTES;
93	mbstat.m_minclsize = MINCLSIZE;
94	mbstat.m_mlen = MLEN;
95	mbstat.m_mhlen = MHLEN;
96
97	s = splimp();
98	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
99		goto bad;
100#if MCLBYTES <= PAGE_SIZE
101	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
102		goto bad;
103#else
104	/* It's OK to call contigmalloc in this context. */
105	if (m_clalloc(16, 0) == 0)
106		goto bad;
107#endif
108	splx(s);
109	return;
110bad:
111	panic("mbinit");
112}
113
114/*
115 * Allocate at least nmb mbufs and place on mbuf free list.
116 * Must be called at splimp.
117 */
118/* ARGSUSED */
119int
120m_mballoc(nmb, nowait)
121	register int nmb;
122	int nowait;
123{
124	register caddr_t p;
125	register int i;
126	int nbytes;
127
128	/* Once we run out of map space, it will be impossible to get
129	 * any more (nothing is ever freed back to the map) (XXX which
130	 * is dumb). (however you are not dead as m_reclaim might
131	 * still be able to free a substantial amount of space).
132	 */
133	if (mb_map_full)
134		return (0);
135
136	nbytes = round_page(nmb * MSIZE);
137	p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
138	if (p == 0 && !nowait) {
139		mbstat.m_wait++;
140		p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
141	}
142
143	/*
144	 * Either the map is now full, or this is nowait and there
145	 * are no pages left.
146	 */
147	if (p == NULL)
148		return (0);
149
150	nmb = nbytes / MSIZE;
151	for (i = 0; i < nmb; i++) {
152		((struct mbuf *)p)->m_next = mmbfree;
153		mmbfree = (struct mbuf *)p;
154		p += MSIZE;
155	}
156	mbstat.m_mbufs += nmb;
157	return (1);
158}
159
160#if MCLBYTES > PAGE_SIZE
161static int i_want_my_mcl;
162
163static void
164kproc_mclalloc(void)
165{
166	int status;
167
168	while (1) {
169		tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
170
171		for (; i_want_my_mcl; i_want_my_mcl--) {
172			if (m_clalloc(1, 0) == 0)
173				printf("m_clalloc failed even in process context!\n");
174		}
175	}
176}
177
178static struct proc *mclallocproc;
179static struct kproc_desc mclalloc_kp = {
180	"mclalloc",
181	kproc_mclalloc,
182	&mclallocproc
183};
184SYSINIT_KT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
185	   &mclalloc_kp);
186#endif
187
188/*
189 * Allocate some number of mbuf clusters
190 * and place on cluster free list.
191 * Must be called at splimp.
192 */
193/* ARGSUSED */
194int
195m_clalloc(ncl, nowait)
196	register int ncl;
197	int nowait;
198{
199	register caddr_t p;
200	register int i;
201	int npg;
202
203	/*
204	 * Once we run out of map space, it will be impossible
205	 * to get any more (nothing is ever freed back to the
206	 * map).
207	 */
208	if (mb_map_full) {
209		mbstat.m_drops++;
210		return (0);
211	}
212
213#if MCLBYTES > PAGE_SIZE
214	if (nowait) {
215		i_want_my_mcl += ncl;
216		wakeup(&i_want_my_mcl);
217		mbstat.m_wait++;
218		p = 0;
219	} else {
220		p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
221				  ~0ul, PAGE_SIZE, 0, mb_map);
222	}
223#else
224	npg = ncl;
225	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
226				 nowait ? M_NOWAIT : M_WAITOK);
227	ncl = ncl * PAGE_SIZE / MCLBYTES;
228#endif
229	/*
230	 * Either the map is now full, or this is nowait and there
231	 * are no pages left.
232	 */
233	if (p == NULL) {
234		mbstat.m_drops++;
235		return (0);
236	}
237
238	for (i = 0; i < ncl; i++) {
239		((union mcluster *)p)->mcl_next = mclfree;
240		mclfree = (union mcluster *)p;
241		p += MCLBYTES;
242		mbstat.m_clfree++;
243	}
244	mbstat.m_clusters += ncl;
245	return (1);
246}
247
248/*
249 * When MGET failes, ask protocols to free space when short of memory,
250 * then re-attempt to allocate an mbuf.
251 */
252struct mbuf *
253m_retry(i, t)
254	int i, t;
255{
256	register struct mbuf *m;
257
258	m_reclaim();
259#define m_retry(i, t)	(struct mbuf *)0
260	MGET(m, i, t);
261#undef m_retry
262	if (m != NULL)
263		mbstat.m_wait++;
264	else
265		mbstat.m_drops++;
266	return (m);
267}
268
269/*
270 * As above; retry an MGETHDR.
271 */
272struct mbuf *
273m_retryhdr(i, t)
274	int i, t;
275{
276	register struct mbuf *m;
277
278	m_reclaim();
279#define m_retryhdr(i, t) (struct mbuf *)0
280	MGETHDR(m, i, t);
281#undef m_retryhdr
282	if (m != NULL)
283		mbstat.m_wait++;
284	else
285		mbstat.m_drops++;
286	return (m);
287}
288
289static void
290m_reclaim()
291{
292	register struct domain *dp;
293	register struct protosw *pr;
294	int s = splimp();
295
296	for (dp = domains; dp; dp = dp->dom_next)
297		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
298			if (pr->pr_drain)
299				(*pr->pr_drain)();
300	splx(s);
301	mbstat.m_drain++;
302}
303
304/*
305 * Space allocation routines.
306 * These are also available as macros
307 * for critical paths.
308 */
309struct mbuf *
310m_get(nowait, type)
311	int nowait, type;
312{
313	register struct mbuf *m;
314
315	MGET(m, nowait, type);
316	return (m);
317}
318
319struct mbuf *
320m_gethdr(nowait, type)
321	int nowait, type;
322{
323	register struct mbuf *m;
324
325	MGETHDR(m, nowait, type);
326	return (m);
327}
328
329struct mbuf *
330m_getclr(nowait, type)
331	int nowait, type;
332{
333	register struct mbuf *m;
334
335	MGET(m, nowait, type);
336	if (m == 0)
337		return (0);
338	bzero(mtod(m, caddr_t), MLEN);
339	return (m);
340}
341
342struct mbuf *
343m_free(m)
344	struct mbuf *m;
345{
346	register struct mbuf *n;
347
348	MFREE(m, n);
349	return (n);
350}
351
352void
353m_freem(m)
354	register struct mbuf *m;
355{
356	register struct mbuf *n;
357
358	if (m == NULL)
359		return;
360	do {
361		MFREE(m, n);
362		m = n;
363	} while (m);
364}
365
366/*
367 * Mbuffer utility routines.
368 */
369
370/*
371 * Lesser-used path for M_PREPEND:
372 * allocate new mbuf to prepend to chain,
373 * copy junk along.
374 */
375struct mbuf *
376m_prepend(m, len, how)
377	register struct mbuf *m;
378	int len, how;
379{
380	struct mbuf *mn;
381
382	MGET(mn, how, m->m_type);
383	if (mn == (struct mbuf *)NULL) {
384		m_freem(m);
385		return ((struct mbuf *)NULL);
386	}
387	if (m->m_flags & M_PKTHDR) {
388		M_COPY_PKTHDR(mn, m);
389		m->m_flags &= ~M_PKTHDR;
390	}
391	mn->m_next = m;
392	m = mn;
393	if (len < MHLEN)
394		MH_ALIGN(m, len);
395	m->m_len = len;
396	return (m);
397}
398
399/*
400 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
401 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
402 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
403 */
404#define MCFail (mbstat.m_mcfail)
405
406struct mbuf *
407m_copym(m, off0, len, wait)
408	register struct mbuf *m;
409	int off0, wait;
410	register int len;
411{
412	register struct mbuf *n, **np;
413	register int off = off0;
414	struct mbuf *top;
415	int copyhdr = 0;
416
417	if (off < 0 || len < 0)
418		panic("m_copym");
419	if (off == 0 && m->m_flags & M_PKTHDR)
420		copyhdr = 1;
421	while (off > 0) {
422		if (m == 0)
423			panic("m_copym");
424		if (off < m->m_len)
425			break;
426		off -= m->m_len;
427		m = m->m_next;
428	}
429	np = &top;
430	top = 0;
431	while (len > 0) {
432		if (m == 0) {
433			if (len != M_COPYALL)
434				panic("m_copym");
435			break;
436		}
437		MGET(n, wait, m->m_type);
438		*np = n;
439		if (n == 0)
440			goto nospace;
441		if (copyhdr) {
442			M_COPY_PKTHDR(n, m);
443			if (len == M_COPYALL)
444				n->m_pkthdr.len -= off0;
445			else
446				n->m_pkthdr.len = len;
447			copyhdr = 0;
448		}
449		n->m_len = min(len, m->m_len - off);
450		if (m->m_flags & M_EXT) {
451			n->m_data = m->m_data + off;
452			if(!m->m_ext.ext_ref)
453				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
454			else
455				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
456							m->m_ext.ext_size);
457			n->m_ext = m->m_ext;
458			n->m_flags |= M_EXT;
459		} else
460			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
461			    (unsigned)n->m_len);
462		if (len != M_COPYALL)
463			len -= n->m_len;
464		off = 0;
465		m = m->m_next;
466		np = &n->m_next;
467	}
468	if (top == 0)
469		MCFail++;
470	return (top);
471nospace:
472	m_freem(top);
473	MCFail++;
474	return (0);
475}
476
477/*
478 * Copy an entire packet, including header (which must be present).
479 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
480 */
481struct mbuf *
482m_copypacket(m, how)
483	struct mbuf *m;
484	int how;
485{
486	struct mbuf *top, *n, *o;
487
488	MGET(n, how, m->m_type);
489	top = n;
490	if (!n)
491		goto nospace;
492
493	M_COPY_PKTHDR(n, m);
494	n->m_len = m->m_len;
495	if (m->m_flags & M_EXT) {
496		n->m_data = m->m_data;
497		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
498		n->m_ext = m->m_ext;
499		n->m_flags |= M_EXT;
500	} else {
501		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
502	}
503
504	m = m->m_next;
505	while (m) {
506		MGET(o, how, m->m_type);
507		if (!o)
508			goto nospace;
509
510		n->m_next = o;
511		n = n->m_next;
512
513		n->m_len = m->m_len;
514		if (m->m_flags & M_EXT) {
515			n->m_data = m->m_data;
516			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
517			n->m_ext = m->m_ext;
518			n->m_flags |= M_EXT;
519		} else {
520			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
521		}
522
523		m = m->m_next;
524	}
525	return top;
526nospace:
527	m_freem(top);
528	MCFail++;
529	return 0;
530}
531
532/*
533 * Copy data from an mbuf chain starting "off" bytes from the beginning,
534 * continuing for "len" bytes, into the indicated buffer.
535 */
536void
537m_copydata(m, off, len, cp)
538	register struct mbuf *m;
539	register int off;
540	register int len;
541	caddr_t cp;
542{
543	register unsigned count;
544
545	if (off < 0 || len < 0)
546		panic("m_copydata");
547	while (off > 0) {
548		if (m == 0)
549			panic("m_copydata");
550		if (off < m->m_len)
551			break;
552		off -= m->m_len;
553		m = m->m_next;
554	}
555	while (len > 0) {
556		if (m == 0)
557			panic("m_copydata");
558		count = min(m->m_len - off, len);
559		bcopy(mtod(m, caddr_t) + off, cp, count);
560		len -= count;
561		cp += count;
562		off = 0;
563		m = m->m_next;
564	}
565}
566
567/*
568 * Concatenate mbuf chain n to m.
569 * Both chains must be of the same type (e.g. MT_DATA).
570 * Any m_pkthdr is not updated.
571 */
572void
573m_cat(m, n)
574	register struct mbuf *m, *n;
575{
576	while (m->m_next)
577		m = m->m_next;
578	while (n) {
579		if (m->m_flags & M_EXT ||
580		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
581			/* just join the two chains */
582			m->m_next = n;
583			return;
584		}
585		/* splat the data from one into the other */
586		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
587		    (u_int)n->m_len);
588		m->m_len += n->m_len;
589		n = m_free(n);
590	}
591}
592
593void
594m_adj(mp, req_len)
595	struct mbuf *mp;
596	int req_len;
597{
598	register int len = req_len;
599	register struct mbuf *m;
600	register count;
601
602	if ((m = mp) == NULL)
603		return;
604	if (len >= 0) {
605		/*
606		 * Trim from head.
607		 */
608		while (m != NULL && len > 0) {
609			if (m->m_len <= len) {
610				len -= m->m_len;
611				m->m_len = 0;
612				m = m->m_next;
613			} else {
614				m->m_len -= len;
615				m->m_data += len;
616				len = 0;
617			}
618		}
619		m = mp;
620		if (mp->m_flags & M_PKTHDR)
621			m->m_pkthdr.len -= (req_len - len);
622	} else {
623		/*
624		 * Trim from tail.  Scan the mbuf chain,
625		 * calculating its length and finding the last mbuf.
626		 * If the adjustment only affects this mbuf, then just
627		 * adjust and return.  Otherwise, rescan and truncate
628		 * after the remaining size.
629		 */
630		len = -len;
631		count = 0;
632		for (;;) {
633			count += m->m_len;
634			if (m->m_next == (struct mbuf *)0)
635				break;
636			m = m->m_next;
637		}
638		if (m->m_len >= len) {
639			m->m_len -= len;
640			if (mp->m_flags & M_PKTHDR)
641				mp->m_pkthdr.len -= len;
642			return;
643		}
644		count -= len;
645		if (count < 0)
646			count = 0;
647		/*
648		 * Correct length for chain is "count".
649		 * Find the mbuf with last data, adjust its length,
650		 * and toss data from remaining mbufs on chain.
651		 */
652		m = mp;
653		if (m->m_flags & M_PKTHDR)
654			m->m_pkthdr.len = count;
655		for (; m; m = m->m_next) {
656			if (m->m_len >= count) {
657				m->m_len = count;
658				break;
659			}
660			count -= m->m_len;
661		}
662		while (m->m_next)
663			(m = m->m_next) ->m_len = 0;
664	}
665}
666
667/*
668 * Rearange an mbuf chain so that len bytes are contiguous
669 * and in the data area of an mbuf (so that mtod and dtom
670 * will work for a structure of size len).  Returns the resulting
671 * mbuf chain on success, frees it and returns null on failure.
672 * If there is room, it will add up to max_protohdr-len extra bytes to the
673 * contiguous region in an attempt to avoid being called next time.
674 */
675#define MPFail (mbstat.m_mpfail)
676
677struct mbuf *
678m_pullup(n, len)
679	register struct mbuf *n;
680	int len;
681{
682	register struct mbuf *m;
683	register int count;
684	int space;
685
686	/*
687	 * If first mbuf has no cluster, and has room for len bytes
688	 * without shifting current data, pullup into it,
689	 * otherwise allocate a new mbuf to prepend to the chain.
690	 */
691	if ((n->m_flags & M_EXT) == 0 &&
692	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
693		if (n->m_len >= len)
694			return (n);
695		m = n;
696		n = n->m_next;
697		len -= m->m_len;
698	} else {
699		if (len > MHLEN)
700			goto bad;
701		MGET(m, M_DONTWAIT, n->m_type);
702		if (m == 0)
703			goto bad;
704		m->m_len = 0;
705		if (n->m_flags & M_PKTHDR) {
706			M_COPY_PKTHDR(m, n);
707			n->m_flags &= ~M_PKTHDR;
708		}
709	}
710	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
711	do {
712		count = min(min(max(len, max_protohdr), space), n->m_len);
713		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
714		  (unsigned)count);
715		len -= count;
716		m->m_len += count;
717		n->m_len -= count;
718		space -= count;
719		if (n->m_len)
720			n->m_data += count;
721		else
722			n = m_free(n);
723	} while (len > 0 && n);
724	if (len > 0) {
725		(void) m_free(m);
726		goto bad;
727	}
728	m->m_next = n;
729	return (m);
730bad:
731	m_freem(n);
732	MPFail++;
733	return (0);
734}
735
736/*
737 * Partition an mbuf chain in two pieces, returning the tail --
738 * all but the first len0 bytes.  In case of failure, it returns NULL and
739 * attempts to restore the chain to its original state.
740 */
741struct mbuf *
742m_split(m0, len0, wait)
743	register struct mbuf *m0;
744	int len0, wait;
745{
746	register struct mbuf *m, *n;
747	unsigned len = len0, remain;
748
749	for (m = m0; m && len > m->m_len; m = m->m_next)
750		len -= m->m_len;
751	if (m == 0)
752		return (0);
753	remain = m->m_len - len;
754	if (m0->m_flags & M_PKTHDR) {
755		MGETHDR(n, wait, m0->m_type);
756		if (n == 0)
757			return (0);
758		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
759		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
760		m0->m_pkthdr.len = len0;
761		if (m->m_flags & M_EXT)
762			goto extpacket;
763		if (remain > MHLEN) {
764			/* m can't be the lead packet */
765			MH_ALIGN(n, 0);
766			n->m_next = m_split(m, len, wait);
767			if (n->m_next == 0) {
768				(void) m_free(n);
769				return (0);
770			} else
771				return (n);
772		} else
773			MH_ALIGN(n, remain);
774	} else if (remain == 0) {
775		n = m->m_next;
776		m->m_next = 0;
777		return (n);
778	} else {
779		MGET(n, wait, m->m_type);
780		if (n == 0)
781			return (0);
782		M_ALIGN(n, remain);
783	}
784extpacket:
785	if (m->m_flags & M_EXT) {
786		n->m_flags |= M_EXT;
787		n->m_ext = m->m_ext;
788		if(!m->m_ext.ext_ref)
789			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
790		else
791			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
792						m->m_ext.ext_size);
793		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
794		n->m_data = m->m_data + len;
795	} else {
796		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
797	}
798	n->m_len = remain;
799	m->m_len = len;
800	n->m_next = m->m_next;
801	m->m_next = 0;
802	return (n);
803}
804/*
805 * Routine to copy from device local memory into mbufs.
806 */
807struct mbuf *
808m_devget(buf, totlen, off0, ifp, copy)
809	char *buf;
810	int totlen, off0;
811	struct ifnet *ifp;
812	void (*copy) __P((char *from, caddr_t to, u_int len));
813{
814	register struct mbuf *m;
815	struct mbuf *top = 0, **mp = &top;
816	register int off = off0, len;
817	register char *cp;
818	char *epkt;
819
820	cp = buf;
821	epkt = cp + totlen;
822	if (off) {
823		cp += off + 2 * sizeof(u_short);
824		totlen -= 2 * sizeof(u_short);
825	}
826	MGETHDR(m, M_DONTWAIT, MT_DATA);
827	if (m == 0)
828		return (0);
829	m->m_pkthdr.rcvif = ifp;
830	m->m_pkthdr.len = totlen;
831	m->m_len = MHLEN;
832
833	while (totlen > 0) {
834		if (top) {
835			MGET(m, M_DONTWAIT, MT_DATA);
836			if (m == 0) {
837				m_freem(top);
838				return (0);
839			}
840			m->m_len = MLEN;
841		}
842		len = min(totlen, epkt - cp);
843		if (len >= MINCLSIZE) {
844			MCLGET(m, M_DONTWAIT);
845			if (m->m_flags & M_EXT)
846				m->m_len = len = min(len, MCLBYTES);
847			else
848				len = m->m_len;
849		} else {
850			/*
851			 * Place initial small packet/header at end of mbuf.
852			 */
853			if (len < m->m_len) {
854				if (top == 0 && len + max_linkhdr <= m->m_len)
855					m->m_data += max_linkhdr;
856				m->m_len = len;
857			} else
858				len = m->m_len;
859		}
860		if (copy)
861			copy(cp, mtod(m, caddr_t), (unsigned)len);
862		else
863			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
864		cp += len;
865		*mp = m;
866		mp = &m->m_next;
867		totlen -= len;
868		if (cp == epkt)
869			cp = buf;
870	}
871	return (top);
872}
873
874/*
875 * Copy data from a buffer back into the indicated mbuf chain,
876 * starting "off" bytes from the beginning, extending the mbuf
877 * chain if necessary.
878 */
879void
880m_copyback(m0, off, len, cp)
881	struct	mbuf *m0;
882	register int off;
883	register int len;
884	caddr_t cp;
885{
886	register int mlen;
887	register struct mbuf *m = m0, *n;
888	int totlen = 0;
889
890	if (m0 == 0)
891		return;
892	while (off > (mlen = m->m_len)) {
893		off -= mlen;
894		totlen += mlen;
895		if (m->m_next == 0) {
896			n = m_getclr(M_DONTWAIT, m->m_type);
897			if (n == 0)
898				goto out;
899			n->m_len = min(MLEN, len + off);
900			m->m_next = n;
901		}
902		m = m->m_next;
903	}
904	while (len > 0) {
905		mlen = min (m->m_len - off, len);
906		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
907		cp += mlen;
908		len -= mlen;
909		mlen += off;
910		off = 0;
911		totlen += mlen;
912		if (len == 0)
913			break;
914		if (m->m_next == 0) {
915			n = m_get(M_DONTWAIT, m->m_type);
916			if (n == 0)
917				break;
918			n->m_len = min(MLEN, len);
919			m->m_next = n;
920		}
921		m = m->m_next;
922	}
923out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
924		m->m_pkthdr.len = totlen;
925}
926