uipc_mbuf.c revision 36675
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
34 *	$Id: uipc_mbuf.c,v 1.34 1998/02/20 13:37:38 bde Exp $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/kernel.h>
42#include <sys/sysctl.h>
43#include <sys/domain.h>
44#include <sys/protosw.h>
45
46#include <vm/vm.h>
47#include <vm/vm_kern.h>
48#include <vm/vm_extern.h>
49
50static void mbinit __P((void *));
51SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
52
53struct mbuf *mbutl;
54char	*mclrefcnt;
55struct mbstat mbstat;
56struct mbuf *mmbfree;
57union mcluster *mclfree;
58int	max_linkhdr;
59int	max_protohdr;
60int	max_hdr;
61int	max_datalen;
62
63SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
64	   &max_linkhdr, 0, "");
65SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
66	   &max_protohdr, 0, "");
67SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
68SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
69	   &max_datalen, 0, "");
70SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
71
72static void	m_reclaim __P((void));
73
74/* "number of clusters of pages" */
75#define NCL_INIT	1
76
77#define NMB_INIT	16
78
79/* ARGSUSED*/
80static void
81mbinit(dummy)
82	void *dummy;
83{
84	int s;
85
86	mmbfree = NULL; mclfree = NULL;
87	mbstat.m_msize = MSIZE;
88	mbstat.m_mclbytes = MCLBYTES;
89	mbstat.m_minclsize = MINCLSIZE;
90	mbstat.m_mlen = MLEN;
91	mbstat.m_mhlen = MHLEN;
92
93	s = splimp();
94	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
95		goto bad;
96#if MCLBYTES <= PAGE_SIZE
97	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
98		goto bad;
99#else
100	/* It's OK to call contigmalloc in this context. */
101	if (m_clalloc(16, M_WAIT) == 0)
102		goto bad;
103#endif
104	splx(s);
105	return;
106bad:
107	panic("mbinit");
108}
109
110/*
111 * Allocate at least nmb mbufs and place on mbuf free list.
112 * Must be called at splimp.
113 */
114/* ARGSUSED */
115int
116m_mballoc(nmb, how)
117	register int nmb;
118	int how;
119{
120	register caddr_t p;
121	register int i;
122	int nbytes;
123
124	/* Once we run out of map space, it will be impossible to get
125	 * any more (nothing is ever freed back to the map) (XXX which
126	 * is dumb). (however you are not dead as m_reclaim might
127	 * still be able to free a substantial amount of space).
128	 */
129	if (mb_map_full)
130		return (0);
131
132	nbytes = round_page(nmb * MSIZE);
133	p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
134	if (p == 0 && how == M_WAIT) {
135		mbstat.m_wait++;
136		p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
137	}
138
139	/*
140	 * Either the map is now full, or `how' is M_NOWAIT and there
141	 * are no pages left.
142	 */
143	if (p == NULL)
144		return (0);
145
146	nmb = nbytes / MSIZE;
147	for (i = 0; i < nmb; i++) {
148		((struct mbuf *)p)->m_next = mmbfree;
149		mmbfree = (struct mbuf *)p;
150		p += MSIZE;
151	}
152	mbstat.m_mbufs += nmb;
153	return (1);
154}
155
156#if MCLBYTES > PAGE_SIZE
157static int i_want_my_mcl;
158
159static void
160kproc_mclalloc(void)
161{
162	int status;
163
164	while (1) {
165		tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
166
167		for (; i_want_my_mcl; i_want_my_mcl--) {
168			if (m_clalloc(1, M_WAIT) == 0)
169				printf("m_clalloc failed even in process context!\n");
170		}
171	}
172}
173
174static struct proc *mclallocproc;
175static struct kproc_desc mclalloc_kp = {
176	"mclalloc",
177	kproc_mclalloc,
178	&mclallocproc
179};
180SYSINIT_KT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
181	   &mclalloc_kp);
182#endif
183
184/*
185 * Allocate some number of mbuf clusters
186 * and place on cluster free list.
187 * Must be called at splimp.
188 */
189/* ARGSUSED */
190int
191m_clalloc(ncl, how)
192	register int ncl;
193	int how;
194{
195	register caddr_t p;
196	register int i;
197	int npg;
198
199	/*
200	 * Once we run out of map space, it will be impossible
201	 * to get any more (nothing is ever freed back to the
202	 * map).
203	 */
204	if (mb_map_full) {
205		mbstat.m_drops++;
206		return (0);
207	}
208
209#if MCLBYTES > PAGE_SIZE
210	if (how != M_WAIT) {
211		i_want_my_mcl += ncl;
212		wakeup(&i_want_my_mcl);
213		mbstat.m_wait++;
214		p = 0;
215	} else {
216		p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
217				  ~0ul, PAGE_SIZE, 0, mb_map);
218	}
219#else
220	npg = ncl;
221	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
222				 how != M_WAIT ? M_NOWAIT : M_WAITOK);
223	ncl = ncl * PAGE_SIZE / MCLBYTES;
224#endif
225	/*
226	 * Either the map is now full, or `how' is M_NOWAIT and there
227	 * are no pages left.
228	 */
229	if (p == NULL) {
230		mbstat.m_drops++;
231		return (0);
232	}
233
234	for (i = 0; i < ncl; i++) {
235		((union mcluster *)p)->mcl_next = mclfree;
236		mclfree = (union mcluster *)p;
237		p += MCLBYTES;
238		mbstat.m_clfree++;
239	}
240	mbstat.m_clusters += ncl;
241	return (1);
242}
243
244/*
245 * When MGET failes, ask protocols to free space when short of memory,
246 * then re-attempt to allocate an mbuf.
247 */
248struct mbuf *
249m_retry(i, t)
250	int i, t;
251{
252	register struct mbuf *m;
253
254	m_reclaim();
255#define m_retry(i, t)	(struct mbuf *)0
256	MGET(m, i, t);
257#undef m_retry
258	if (m != NULL) {
259		mbstat.m_wait++;
260	} else {
261		if (i == M_DONTWAIT)
262			mbstat.m_drops++;
263		else
264			panic("Out of mbuf clusters");
265	}
266	return (m);
267}
268
269/*
270 * As above; retry an MGETHDR.
271 */
272struct mbuf *
273m_retryhdr(i, t)
274	int i, t;
275{
276	register struct mbuf *m;
277
278	m_reclaim();
279#define m_retryhdr(i, t) (struct mbuf *)0
280	MGETHDR(m, i, t);
281#undef m_retryhdr
282	if (m != NULL) {
283		mbstat.m_wait++;
284	} else {
285		if (i == M_DONTWAIT)
286			mbstat.m_drops++;
287		else
288			panic("Out of mbuf clusters");
289	}
290	return (m);
291}
292
293static void
294m_reclaim()
295{
296	register struct domain *dp;
297	register struct protosw *pr;
298	int s = splimp();
299
300	for (dp = domains; dp; dp = dp->dom_next)
301		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
302			if (pr->pr_drain)
303				(*pr->pr_drain)();
304	splx(s);
305	mbstat.m_drain++;
306}
307
308/*
309 * Space allocation routines.
310 * These are also available as macros
311 * for critical paths.
312 */
313struct mbuf *
314m_get(how, type)
315	int how, type;
316{
317	register struct mbuf *m;
318
319	MGET(m, how, type);
320	return (m);
321}
322
323struct mbuf *
324m_gethdr(how, type)
325	int how, type;
326{
327	register struct mbuf *m;
328
329	MGETHDR(m, how, type);
330	return (m);
331}
332
333struct mbuf *
334m_getclr(how, type)
335	int how, type;
336{
337	register struct mbuf *m;
338
339	MGET(m, how, type);
340	if (m == 0)
341		return (0);
342	bzero(mtod(m, caddr_t), MLEN);
343	return (m);
344}
345
346struct mbuf *
347m_free(m)
348	struct mbuf *m;
349{
350	register struct mbuf *n;
351
352	MFREE(m, n);
353	return (n);
354}
355
356void
357m_freem(m)
358	register struct mbuf *m;
359{
360	register struct mbuf *n;
361
362	if (m == NULL)
363		return;
364	do {
365		MFREE(m, n);
366		m = n;
367	} while (m);
368}
369
370/*
371 * Mbuffer utility routines.
372 */
373
374/*
375 * Lesser-used path for M_PREPEND:
376 * allocate new mbuf to prepend to chain,
377 * copy junk along.
378 */
379struct mbuf *
380m_prepend(m, len, how)
381	register struct mbuf *m;
382	int len, how;
383{
384	struct mbuf *mn;
385
386	MGET(mn, how, m->m_type);
387	if (mn == (struct mbuf *)NULL) {
388		m_freem(m);
389		return ((struct mbuf *)NULL);
390	}
391	if (m->m_flags & M_PKTHDR) {
392		M_COPY_PKTHDR(mn, m);
393		m->m_flags &= ~M_PKTHDR;
394	}
395	mn->m_next = m;
396	m = mn;
397	if (len < MHLEN)
398		MH_ALIGN(m, len);
399	m->m_len = len;
400	return (m);
401}
402
403/*
404 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
405 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
406 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
407 */
408#define MCFail (mbstat.m_mcfail)
409
410struct mbuf *
411m_copym(m, off0, len, wait)
412	register struct mbuf *m;
413	int off0, wait;
414	register int len;
415{
416	register struct mbuf *n, **np;
417	register int off = off0;
418	struct mbuf *top;
419	int copyhdr = 0;
420
421	if (off < 0 || len < 0)
422		panic("m_copym");
423	if (off == 0 && m->m_flags & M_PKTHDR)
424		copyhdr = 1;
425	while (off > 0) {
426		if (m == 0)
427			panic("m_copym");
428		if (off < m->m_len)
429			break;
430		off -= m->m_len;
431		m = m->m_next;
432	}
433	np = &top;
434	top = 0;
435	while (len > 0) {
436		if (m == 0) {
437			if (len != M_COPYALL)
438				panic("m_copym");
439			break;
440		}
441		MGET(n, wait, m->m_type);
442		*np = n;
443		if (n == 0)
444			goto nospace;
445		if (copyhdr) {
446			M_COPY_PKTHDR(n, m);
447			if (len == M_COPYALL)
448				n->m_pkthdr.len -= off0;
449			else
450				n->m_pkthdr.len = len;
451			copyhdr = 0;
452		}
453		n->m_len = min(len, m->m_len - off);
454		if (m->m_flags & M_EXT) {
455			n->m_data = m->m_data + off;
456			if(!m->m_ext.ext_ref)
457				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
458			else
459				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
460							m->m_ext.ext_size);
461			n->m_ext = m->m_ext;
462			n->m_flags |= M_EXT;
463		} else
464			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
465			    (unsigned)n->m_len);
466		if (len != M_COPYALL)
467			len -= n->m_len;
468		off = 0;
469		m = m->m_next;
470		np = &n->m_next;
471	}
472	if (top == 0)
473		MCFail++;
474	return (top);
475nospace:
476	m_freem(top);
477	MCFail++;
478	return (0);
479}
480
481/*
482 * Copy an entire packet, including header (which must be present).
483 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
484 */
485struct mbuf *
486m_copypacket(m, how)
487	struct mbuf *m;
488	int how;
489{
490	struct mbuf *top, *n, *o;
491
492	MGET(n, how, m->m_type);
493	top = n;
494	if (!n)
495		goto nospace;
496
497	M_COPY_PKTHDR(n, m);
498	n->m_len = m->m_len;
499	if (m->m_flags & M_EXT) {
500		n->m_data = m->m_data;
501		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
502		n->m_ext = m->m_ext;
503		n->m_flags |= M_EXT;
504	} else {
505		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
506	}
507
508	m = m->m_next;
509	while (m) {
510		MGET(o, how, m->m_type);
511		if (!o)
512			goto nospace;
513
514		n->m_next = o;
515		n = n->m_next;
516
517		n->m_len = m->m_len;
518		if (m->m_flags & M_EXT) {
519			n->m_data = m->m_data;
520			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
521			n->m_ext = m->m_ext;
522			n->m_flags |= M_EXT;
523		} else {
524			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
525		}
526
527		m = m->m_next;
528	}
529	return top;
530nospace:
531	m_freem(top);
532	MCFail++;
533	return 0;
534}
535
536/*
537 * Copy data from an mbuf chain starting "off" bytes from the beginning,
538 * continuing for "len" bytes, into the indicated buffer.
539 */
540void
541m_copydata(m, off, len, cp)
542	register struct mbuf *m;
543	register int off;
544	register int len;
545	caddr_t cp;
546{
547	register unsigned count;
548
549	if (off < 0 || len < 0)
550		panic("m_copydata");
551	while (off > 0) {
552		if (m == 0)
553			panic("m_copydata");
554		if (off < m->m_len)
555			break;
556		off -= m->m_len;
557		m = m->m_next;
558	}
559	while (len > 0) {
560		if (m == 0)
561			panic("m_copydata");
562		count = min(m->m_len - off, len);
563		bcopy(mtod(m, caddr_t) + off, cp, count);
564		len -= count;
565		cp += count;
566		off = 0;
567		m = m->m_next;
568	}
569}
570
571/*
572 * Concatenate mbuf chain n to m.
573 * Both chains must be of the same type (e.g. MT_DATA).
574 * Any m_pkthdr is not updated.
575 */
576void
577m_cat(m, n)
578	register struct mbuf *m, *n;
579{
580	while (m->m_next)
581		m = m->m_next;
582	while (n) {
583		if (m->m_flags & M_EXT ||
584		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
585			/* just join the two chains */
586			m->m_next = n;
587			return;
588		}
589		/* splat the data from one into the other */
590		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
591		    (u_int)n->m_len);
592		m->m_len += n->m_len;
593		n = m_free(n);
594	}
595}
596
597void
598m_adj(mp, req_len)
599	struct mbuf *mp;
600	int req_len;
601{
602	register int len = req_len;
603	register struct mbuf *m;
604	register int count;
605
606	if ((m = mp) == NULL)
607		return;
608	if (len >= 0) {
609		/*
610		 * Trim from head.
611		 */
612		while (m != NULL && len > 0) {
613			if (m->m_len <= len) {
614				len -= m->m_len;
615				m->m_len = 0;
616				m = m->m_next;
617			} else {
618				m->m_len -= len;
619				m->m_data += len;
620				len = 0;
621			}
622		}
623		m = mp;
624		if (mp->m_flags & M_PKTHDR)
625			m->m_pkthdr.len -= (req_len - len);
626	} else {
627		/*
628		 * Trim from tail.  Scan the mbuf chain,
629		 * calculating its length and finding the last mbuf.
630		 * If the adjustment only affects this mbuf, then just
631		 * adjust and return.  Otherwise, rescan and truncate
632		 * after the remaining size.
633		 */
634		len = -len;
635		count = 0;
636		for (;;) {
637			count += m->m_len;
638			if (m->m_next == (struct mbuf *)0)
639				break;
640			m = m->m_next;
641		}
642		if (m->m_len >= len) {
643			m->m_len -= len;
644			if (mp->m_flags & M_PKTHDR)
645				mp->m_pkthdr.len -= len;
646			return;
647		}
648		count -= len;
649		if (count < 0)
650			count = 0;
651		/*
652		 * Correct length for chain is "count".
653		 * Find the mbuf with last data, adjust its length,
654		 * and toss data from remaining mbufs on chain.
655		 */
656		m = mp;
657		if (m->m_flags & M_PKTHDR)
658			m->m_pkthdr.len = count;
659		for (; m; m = m->m_next) {
660			if (m->m_len >= count) {
661				m->m_len = count;
662				break;
663			}
664			count -= m->m_len;
665		}
666		while (m->m_next)
667			(m = m->m_next) ->m_len = 0;
668	}
669}
670
671/*
672 * Rearange an mbuf chain so that len bytes are contiguous
673 * and in the data area of an mbuf (so that mtod and dtom
674 * will work for a structure of size len).  Returns the resulting
675 * mbuf chain on success, frees it and returns null on failure.
676 * If there is room, it will add up to max_protohdr-len extra bytes to the
677 * contiguous region in an attempt to avoid being called next time.
678 */
679#define MPFail (mbstat.m_mpfail)
680
681struct mbuf *
682m_pullup(n, len)
683	register struct mbuf *n;
684	int len;
685{
686	register struct mbuf *m;
687	register int count;
688	int space;
689
690	/*
691	 * If first mbuf has no cluster, and has room for len bytes
692	 * without shifting current data, pullup into it,
693	 * otherwise allocate a new mbuf to prepend to the chain.
694	 */
695	if ((n->m_flags & M_EXT) == 0 &&
696	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
697		if (n->m_len >= len)
698			return (n);
699		m = n;
700		n = n->m_next;
701		len -= m->m_len;
702	} else {
703		if (len > MHLEN)
704			goto bad;
705		MGET(m, M_DONTWAIT, n->m_type);
706		if (m == 0)
707			goto bad;
708		m->m_len = 0;
709		if (n->m_flags & M_PKTHDR) {
710			M_COPY_PKTHDR(m, n);
711			n->m_flags &= ~M_PKTHDR;
712		}
713	}
714	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
715	do {
716		count = min(min(max(len, max_protohdr), space), n->m_len);
717		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
718		  (unsigned)count);
719		len -= count;
720		m->m_len += count;
721		n->m_len -= count;
722		space -= count;
723		if (n->m_len)
724			n->m_data += count;
725		else
726			n = m_free(n);
727	} while (len > 0 && n);
728	if (len > 0) {
729		(void) m_free(m);
730		goto bad;
731	}
732	m->m_next = n;
733	return (m);
734bad:
735	m_freem(n);
736	MPFail++;
737	return (0);
738}
739
740/*
741 * Partition an mbuf chain in two pieces, returning the tail --
742 * all but the first len0 bytes.  In case of failure, it returns NULL and
743 * attempts to restore the chain to its original state.
744 */
745struct mbuf *
746m_split(m0, len0, wait)
747	register struct mbuf *m0;
748	int len0, wait;
749{
750	register struct mbuf *m, *n;
751	unsigned len = len0, remain;
752
753	for (m = m0; m && len > m->m_len; m = m->m_next)
754		len -= m->m_len;
755	if (m == 0)
756		return (0);
757	remain = m->m_len - len;
758	if (m0->m_flags & M_PKTHDR) {
759		MGETHDR(n, wait, m0->m_type);
760		if (n == 0)
761			return (0);
762		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
763		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
764		m0->m_pkthdr.len = len0;
765		if (m->m_flags & M_EXT)
766			goto extpacket;
767		if (remain > MHLEN) {
768			/* m can't be the lead packet */
769			MH_ALIGN(n, 0);
770			n->m_next = m_split(m, len, wait);
771			if (n->m_next == 0) {
772				(void) m_free(n);
773				return (0);
774			} else
775				return (n);
776		} else
777			MH_ALIGN(n, remain);
778	} else if (remain == 0) {
779		n = m->m_next;
780		m->m_next = 0;
781		return (n);
782	} else {
783		MGET(n, wait, m->m_type);
784		if (n == 0)
785			return (0);
786		M_ALIGN(n, remain);
787	}
788extpacket:
789	if (m->m_flags & M_EXT) {
790		n->m_flags |= M_EXT;
791		n->m_ext = m->m_ext;
792		if(!m->m_ext.ext_ref)
793			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
794		else
795			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
796						m->m_ext.ext_size);
797		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
798		n->m_data = m->m_data + len;
799	} else {
800		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
801	}
802	n->m_len = remain;
803	m->m_len = len;
804	n->m_next = m->m_next;
805	m->m_next = 0;
806	return (n);
807}
808/*
809 * Routine to copy from device local memory into mbufs.
810 */
811struct mbuf *
812m_devget(buf, totlen, off0, ifp, copy)
813	char *buf;
814	int totlen, off0;
815	struct ifnet *ifp;
816	void (*copy) __P((char *from, caddr_t to, u_int len));
817{
818	register struct mbuf *m;
819	struct mbuf *top = 0, **mp = &top;
820	register int off = off0, len;
821	register char *cp;
822	char *epkt;
823
824	cp = buf;
825	epkt = cp + totlen;
826	if (off) {
827		cp += off + 2 * sizeof(u_short);
828		totlen -= 2 * sizeof(u_short);
829	}
830	MGETHDR(m, M_DONTWAIT, MT_DATA);
831	if (m == 0)
832		return (0);
833	m->m_pkthdr.rcvif = ifp;
834	m->m_pkthdr.len = totlen;
835	m->m_len = MHLEN;
836
837	while (totlen > 0) {
838		if (top) {
839			MGET(m, M_DONTWAIT, MT_DATA);
840			if (m == 0) {
841				m_freem(top);
842				return (0);
843			}
844			m->m_len = MLEN;
845		}
846		len = min(totlen, epkt - cp);
847		if (len >= MINCLSIZE) {
848			MCLGET(m, M_DONTWAIT);
849			if (m->m_flags & M_EXT)
850				m->m_len = len = min(len, MCLBYTES);
851			else
852				len = m->m_len;
853		} else {
854			/*
855			 * Place initial small packet/header at end of mbuf.
856			 */
857			if (len < m->m_len) {
858				if (top == 0 && len + max_linkhdr <= m->m_len)
859					m->m_data += max_linkhdr;
860				m->m_len = len;
861			} else
862				len = m->m_len;
863		}
864		if (copy)
865			copy(cp, mtod(m, caddr_t), (unsigned)len);
866		else
867			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
868		cp += len;
869		*mp = m;
870		mp = &m->m_next;
871		totlen -= len;
872		if (cp == epkt)
873			cp = buf;
874	}
875	return (top);
876}
877
878/*
879 * Copy data from a buffer back into the indicated mbuf chain,
880 * starting "off" bytes from the beginning, extending the mbuf
881 * chain if necessary.
882 */
883void
884m_copyback(m0, off, len, cp)
885	struct	mbuf *m0;
886	register int off;
887	register int len;
888	caddr_t cp;
889{
890	register int mlen;
891	register struct mbuf *m = m0, *n;
892	int totlen = 0;
893
894	if (m0 == 0)
895		return;
896	while (off > (mlen = m->m_len)) {
897		off -= mlen;
898		totlen += mlen;
899		if (m->m_next == 0) {
900			n = m_getclr(M_DONTWAIT, m->m_type);
901			if (n == 0)
902				goto out;
903			n->m_len = min(MLEN, len + off);
904			m->m_next = n;
905		}
906		m = m->m_next;
907	}
908	while (len > 0) {
909		mlen = min (m->m_len - off, len);
910		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
911		cp += mlen;
912		len -= mlen;
913		mlen += off;
914		off = 0;
915		totlen += mlen;
916		if (len == 0)
917			break;
918		if (m->m_next == 0) {
919			n = m_get(M_DONTWAIT, m->m_type);
920			if (n == 0)
921				break;
922			n->m_len = min(MLEN, len);
923			m->m_next = n;
924		}
925		m = m->m_next;
926	}
927out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
928		m->m_pkthdr.len = totlen;
929}
930