uipc_mbuf.c revision 32036
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
34 *	$Id: uipc_mbuf.c,v 1.32 1997/10/28 15:58:22 bde Exp $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/kernel.h>
42#include <sys/sysctl.h>
43#include <sys/domain.h>
44#include <sys/protosw.h>
45
46#include <vm/vm.h>
47#include <vm/vm_kern.h>
48#include <vm/vm_extern.h>
49
50static void mbinit __P((void *));
51SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
52
53struct mbuf *mbutl;
54char	*mclrefcnt;
55struct mbstat mbstat;
56struct mbuf *mmbfree;
57union mcluster *mclfree;
58int	max_linkhdr;
59int	max_protohdr;
60int	max_hdr;
61int	max_datalen;
62
63SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
64	   &max_linkhdr, 0, "");
65SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
66	   &max_protohdr, 0, "");
67SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
68SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
69	   &max_datalen, 0, "");
70SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
71
72static void	m_reclaim __P((void));
73
74/* "number of clusters of pages" */
75#define NCL_INIT	1
76
77#define NMB_INIT	16
78
79/* ARGSUSED*/
80static void
81mbinit(dummy)
82	void *dummy;
83{
84	int s;
85
86	mmbfree = NULL; mclfree = NULL;
87	mbstat.m_msize = MSIZE;
88	mbstat.m_mclbytes = MCLBYTES;
89	mbstat.m_minclsize = MINCLSIZE;
90	mbstat.m_mlen = MLEN;
91	mbstat.m_mhlen = MHLEN;
92
93	s = splimp();
94	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
95		goto bad;
96#if MCLBYTES <= PAGE_SIZE
97	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
98		goto bad;
99#else
100	/* It's OK to call contigmalloc in this context. */
101	if (m_clalloc(16, M_WAIT) == 0)
102		goto bad;
103#endif
104	splx(s);
105	return;
106bad:
107	panic("mbinit");
108}
109
110/*
111 * Allocate at least nmb mbufs and place on mbuf free list.
112 * Must be called at splimp.
113 */
114/* ARGSUSED */
115int
116m_mballoc(nmb, how)
117	register int nmb;
118	int how;
119{
120	register caddr_t p;
121	register int i;
122	int nbytes;
123
124	/* Once we run out of map space, it will be impossible to get
125	 * any more (nothing is ever freed back to the map) (XXX which
126	 * is dumb). (however you are not dead as m_reclaim might
127	 * still be able to free a substantial amount of space).
128	 */
129	if (mb_map_full)
130		return (0);
131
132	nbytes = round_page(nmb * MSIZE);
133	p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
134	if (p == 0 && how == M_WAIT) {
135		mbstat.m_wait++;
136		p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
137	}
138
139	/*
140	 * Either the map is now full, or `how' is M_NOWAIT and there
141	 * are no pages left.
142	 */
143	if (p == NULL)
144		return (0);
145
146	nmb = nbytes / MSIZE;
147	for (i = 0; i < nmb; i++) {
148		((struct mbuf *)p)->m_next = mmbfree;
149		mmbfree = (struct mbuf *)p;
150		p += MSIZE;
151	}
152	mbstat.m_mbufs += nmb;
153	return (1);
154}
155
156#if MCLBYTES > PAGE_SIZE
157static int i_want_my_mcl;
158
159static void
160kproc_mclalloc(void)
161{
162	int status;
163
164	while (1) {
165		tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
166
167		for (; i_want_my_mcl; i_want_my_mcl--) {
168			if (m_clalloc(1, M_WAIT) == 0)
169				printf("m_clalloc failed even in process context!\n");
170		}
171	}
172}
173
174static struct proc *mclallocproc;
175static struct kproc_desc mclalloc_kp = {
176	"mclalloc",
177	kproc_mclalloc,
178	&mclallocproc
179};
180SYSINIT_KT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
181	   &mclalloc_kp);
182#endif
183
184/*
185 * Allocate some number of mbuf clusters
186 * and place on cluster free list.
187 * Must be called at splimp.
188 */
189/* ARGSUSED */
190int
191m_clalloc(ncl, how)
192	register int ncl;
193	int how;
194{
195	register caddr_t p;
196	register int i;
197	int npg;
198
199	/*
200	 * Once we run out of map space, it will be impossible
201	 * to get any more (nothing is ever freed back to the
202	 * map).
203	 */
204	if (mb_map_full) {
205		mbstat.m_drops++;
206		return (0);
207	}
208
209#if MCLBYTES > PAGE_SIZE
210	if (how != M_WAIT) {
211		i_want_my_mcl += ncl;
212		wakeup(&i_want_my_mcl);
213		mbstat.m_wait++;
214		p = 0;
215	} else {
216		p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
217				  ~0ul, PAGE_SIZE, 0, mb_map);
218	}
219#else
220	npg = ncl;
221	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
222				 how != M_WAIT ? M_NOWAIT : M_WAITOK);
223	ncl = ncl * PAGE_SIZE / MCLBYTES;
224#endif
225	/*
226	 * Either the map is now full, or `how' is M_NOWAIT and there
227	 * are no pages left.
228	 */
229	if (p == NULL) {
230		mbstat.m_drops++;
231		return (0);
232	}
233
234	for (i = 0; i < ncl; i++) {
235		((union mcluster *)p)->mcl_next = mclfree;
236		mclfree = (union mcluster *)p;
237		p += MCLBYTES;
238		mbstat.m_clfree++;
239	}
240	mbstat.m_clusters += ncl;
241	return (1);
242}
243
244/*
245 * When MGET failes, ask protocols to free space when short of memory,
246 * then re-attempt to allocate an mbuf.
247 */
248struct mbuf *
249m_retry(i, t)
250	int i, t;
251{
252	register struct mbuf *m;
253
254	m_reclaim();
255#define m_retry(i, t)	(struct mbuf *)0
256	MGET(m, i, t);
257#undef m_retry
258	if (m != NULL)
259		mbstat.m_wait++;
260	else
261		mbstat.m_drops++;
262	return (m);
263}
264
265/*
266 * As above; retry an MGETHDR.
267 */
268struct mbuf *
269m_retryhdr(i, t)
270	int i, t;
271{
272	register struct mbuf *m;
273
274	m_reclaim();
275#define m_retryhdr(i, t) (struct mbuf *)0
276	MGETHDR(m, i, t);
277#undef m_retryhdr
278	if (m != NULL)
279		mbstat.m_wait++;
280	else
281		mbstat.m_drops++;
282	return (m);
283}
284
285static void
286m_reclaim()
287{
288	register struct domain *dp;
289	register struct protosw *pr;
290	int s = splimp();
291
292	for (dp = domains; dp; dp = dp->dom_next)
293		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
294			if (pr->pr_drain)
295				(*pr->pr_drain)();
296	splx(s);
297	mbstat.m_drain++;
298}
299
300/*
301 * Space allocation routines.
302 * These are also available as macros
303 * for critical paths.
304 */
305struct mbuf *
306m_get(how, type)
307	int how, type;
308{
309	register struct mbuf *m;
310
311	MGET(m, how, type);
312	return (m);
313}
314
315struct mbuf *
316m_gethdr(how, type)
317	int how, type;
318{
319	register struct mbuf *m;
320
321	MGETHDR(m, how, type);
322	return (m);
323}
324
325struct mbuf *
326m_getclr(how, type)
327	int how, type;
328{
329	register struct mbuf *m;
330
331	MGET(m, how, type);
332	if (m == 0)
333		return (0);
334	bzero(mtod(m, caddr_t), MLEN);
335	return (m);
336}
337
338struct mbuf *
339m_free(m)
340	struct mbuf *m;
341{
342	register struct mbuf *n;
343
344	MFREE(m, n);
345	return (n);
346}
347
348void
349m_freem(m)
350	register struct mbuf *m;
351{
352	register struct mbuf *n;
353
354	if (m == NULL)
355		return;
356	do {
357		MFREE(m, n);
358		m = n;
359	} while (m);
360}
361
362/*
363 * Mbuffer utility routines.
364 */
365
366/*
367 * Lesser-used path for M_PREPEND:
368 * allocate new mbuf to prepend to chain,
369 * copy junk along.
370 */
371struct mbuf *
372m_prepend(m, len, how)
373	register struct mbuf *m;
374	int len, how;
375{
376	struct mbuf *mn;
377
378	MGET(mn, how, m->m_type);
379	if (mn == (struct mbuf *)NULL) {
380		m_freem(m);
381		return ((struct mbuf *)NULL);
382	}
383	if (m->m_flags & M_PKTHDR) {
384		M_COPY_PKTHDR(mn, m);
385		m->m_flags &= ~M_PKTHDR;
386	}
387	mn->m_next = m;
388	m = mn;
389	if (len < MHLEN)
390		MH_ALIGN(m, len);
391	m->m_len = len;
392	return (m);
393}
394
395/*
396 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
397 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
398 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
399 */
400#define MCFail (mbstat.m_mcfail)
401
402struct mbuf *
403m_copym(m, off0, len, wait)
404	register struct mbuf *m;
405	int off0, wait;
406	register int len;
407{
408	register struct mbuf *n, **np;
409	register int off = off0;
410	struct mbuf *top;
411	int copyhdr = 0;
412
413	if (off < 0 || len < 0)
414		panic("m_copym");
415	if (off == 0 && m->m_flags & M_PKTHDR)
416		copyhdr = 1;
417	while (off > 0) {
418		if (m == 0)
419			panic("m_copym");
420		if (off < m->m_len)
421			break;
422		off -= m->m_len;
423		m = m->m_next;
424	}
425	np = &top;
426	top = 0;
427	while (len > 0) {
428		if (m == 0) {
429			if (len != M_COPYALL)
430				panic("m_copym");
431			break;
432		}
433		MGET(n, wait, m->m_type);
434		*np = n;
435		if (n == 0)
436			goto nospace;
437		if (copyhdr) {
438			M_COPY_PKTHDR(n, m);
439			if (len == M_COPYALL)
440				n->m_pkthdr.len -= off0;
441			else
442				n->m_pkthdr.len = len;
443			copyhdr = 0;
444		}
445		n->m_len = min(len, m->m_len - off);
446		if (m->m_flags & M_EXT) {
447			n->m_data = m->m_data + off;
448			if(!m->m_ext.ext_ref)
449				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
450			else
451				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
452							m->m_ext.ext_size);
453			n->m_ext = m->m_ext;
454			n->m_flags |= M_EXT;
455		} else
456			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
457			    (unsigned)n->m_len);
458		if (len != M_COPYALL)
459			len -= n->m_len;
460		off = 0;
461		m = m->m_next;
462		np = &n->m_next;
463	}
464	if (top == 0)
465		MCFail++;
466	return (top);
467nospace:
468	m_freem(top);
469	MCFail++;
470	return (0);
471}
472
473/*
474 * Copy an entire packet, including header (which must be present).
475 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
476 */
477struct mbuf *
478m_copypacket(m, how)
479	struct mbuf *m;
480	int how;
481{
482	struct mbuf *top, *n, *o;
483
484	MGET(n, how, m->m_type);
485	top = n;
486	if (!n)
487		goto nospace;
488
489	M_COPY_PKTHDR(n, m);
490	n->m_len = m->m_len;
491	if (m->m_flags & M_EXT) {
492		n->m_data = m->m_data;
493		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
494		n->m_ext = m->m_ext;
495		n->m_flags |= M_EXT;
496	} else {
497		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
498	}
499
500	m = m->m_next;
501	while (m) {
502		MGET(o, how, m->m_type);
503		if (!o)
504			goto nospace;
505
506		n->m_next = o;
507		n = n->m_next;
508
509		n->m_len = m->m_len;
510		if (m->m_flags & M_EXT) {
511			n->m_data = m->m_data;
512			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
513			n->m_ext = m->m_ext;
514			n->m_flags |= M_EXT;
515		} else {
516			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
517		}
518
519		m = m->m_next;
520	}
521	return top;
522nospace:
523	m_freem(top);
524	MCFail++;
525	return 0;
526}
527
528/*
529 * Copy data from an mbuf chain starting "off" bytes from the beginning,
530 * continuing for "len" bytes, into the indicated buffer.
531 */
532void
533m_copydata(m, off, len, cp)
534	register struct mbuf *m;
535	register int off;
536	register int len;
537	caddr_t cp;
538{
539	register unsigned count;
540
541	if (off < 0 || len < 0)
542		panic("m_copydata");
543	while (off > 0) {
544		if (m == 0)
545			panic("m_copydata");
546		if (off < m->m_len)
547			break;
548		off -= m->m_len;
549		m = m->m_next;
550	}
551	while (len > 0) {
552		if (m == 0)
553			panic("m_copydata");
554		count = min(m->m_len - off, len);
555		bcopy(mtod(m, caddr_t) + off, cp, count);
556		len -= count;
557		cp += count;
558		off = 0;
559		m = m->m_next;
560	}
561}
562
563/*
564 * Concatenate mbuf chain n to m.
565 * Both chains must be of the same type (e.g. MT_DATA).
566 * Any m_pkthdr is not updated.
567 */
568void
569m_cat(m, n)
570	register struct mbuf *m, *n;
571{
572	while (m->m_next)
573		m = m->m_next;
574	while (n) {
575		if (m->m_flags & M_EXT ||
576		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
577			/* just join the two chains */
578			m->m_next = n;
579			return;
580		}
581		/* splat the data from one into the other */
582		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
583		    (u_int)n->m_len);
584		m->m_len += n->m_len;
585		n = m_free(n);
586	}
587}
588
589void
590m_adj(mp, req_len)
591	struct mbuf *mp;
592	int req_len;
593{
594	register int len = req_len;
595	register struct mbuf *m;
596	register count;
597
598	if ((m = mp) == NULL)
599		return;
600	if (len >= 0) {
601		/*
602		 * Trim from head.
603		 */
604		while (m != NULL && len > 0) {
605			if (m->m_len <= len) {
606				len -= m->m_len;
607				m->m_len = 0;
608				m = m->m_next;
609			} else {
610				m->m_len -= len;
611				m->m_data += len;
612				len = 0;
613			}
614		}
615		m = mp;
616		if (mp->m_flags & M_PKTHDR)
617			m->m_pkthdr.len -= (req_len - len);
618	} else {
619		/*
620		 * Trim from tail.  Scan the mbuf chain,
621		 * calculating its length and finding the last mbuf.
622		 * If the adjustment only affects this mbuf, then just
623		 * adjust and return.  Otherwise, rescan and truncate
624		 * after the remaining size.
625		 */
626		len = -len;
627		count = 0;
628		for (;;) {
629			count += m->m_len;
630			if (m->m_next == (struct mbuf *)0)
631				break;
632			m = m->m_next;
633		}
634		if (m->m_len >= len) {
635			m->m_len -= len;
636			if (mp->m_flags & M_PKTHDR)
637				mp->m_pkthdr.len -= len;
638			return;
639		}
640		count -= len;
641		if (count < 0)
642			count = 0;
643		/*
644		 * Correct length for chain is "count".
645		 * Find the mbuf with last data, adjust its length,
646		 * and toss data from remaining mbufs on chain.
647		 */
648		m = mp;
649		if (m->m_flags & M_PKTHDR)
650			m->m_pkthdr.len = count;
651		for (; m; m = m->m_next) {
652			if (m->m_len >= count) {
653				m->m_len = count;
654				break;
655			}
656			count -= m->m_len;
657		}
658		while (m->m_next)
659			(m = m->m_next) ->m_len = 0;
660	}
661}
662
663/*
664 * Rearange an mbuf chain so that len bytes are contiguous
665 * and in the data area of an mbuf (so that mtod and dtom
666 * will work for a structure of size len).  Returns the resulting
667 * mbuf chain on success, frees it and returns null on failure.
668 * If there is room, it will add up to max_protohdr-len extra bytes to the
669 * contiguous region in an attempt to avoid being called next time.
670 */
671#define MPFail (mbstat.m_mpfail)
672
673struct mbuf *
674m_pullup(n, len)
675	register struct mbuf *n;
676	int len;
677{
678	register struct mbuf *m;
679	register int count;
680	int space;
681
682	/*
683	 * If first mbuf has no cluster, and has room for len bytes
684	 * without shifting current data, pullup into it,
685	 * otherwise allocate a new mbuf to prepend to the chain.
686	 */
687	if ((n->m_flags & M_EXT) == 0 &&
688	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
689		if (n->m_len >= len)
690			return (n);
691		m = n;
692		n = n->m_next;
693		len -= m->m_len;
694	} else {
695		if (len > MHLEN)
696			goto bad;
697		MGET(m, M_DONTWAIT, n->m_type);
698		if (m == 0)
699			goto bad;
700		m->m_len = 0;
701		if (n->m_flags & M_PKTHDR) {
702			M_COPY_PKTHDR(m, n);
703			n->m_flags &= ~M_PKTHDR;
704		}
705	}
706	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
707	do {
708		count = min(min(max(len, max_protohdr), space), n->m_len);
709		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
710		  (unsigned)count);
711		len -= count;
712		m->m_len += count;
713		n->m_len -= count;
714		space -= count;
715		if (n->m_len)
716			n->m_data += count;
717		else
718			n = m_free(n);
719	} while (len > 0 && n);
720	if (len > 0) {
721		(void) m_free(m);
722		goto bad;
723	}
724	m->m_next = n;
725	return (m);
726bad:
727	m_freem(n);
728	MPFail++;
729	return (0);
730}
731
732/*
733 * Partition an mbuf chain in two pieces, returning the tail --
734 * all but the first len0 bytes.  In case of failure, it returns NULL and
735 * attempts to restore the chain to its original state.
736 */
737struct mbuf *
738m_split(m0, len0, wait)
739	register struct mbuf *m0;
740	int len0, wait;
741{
742	register struct mbuf *m, *n;
743	unsigned len = len0, remain;
744
745	for (m = m0; m && len > m->m_len; m = m->m_next)
746		len -= m->m_len;
747	if (m == 0)
748		return (0);
749	remain = m->m_len - len;
750	if (m0->m_flags & M_PKTHDR) {
751		MGETHDR(n, wait, m0->m_type);
752		if (n == 0)
753			return (0);
754		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
755		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
756		m0->m_pkthdr.len = len0;
757		if (m->m_flags & M_EXT)
758			goto extpacket;
759		if (remain > MHLEN) {
760			/* m can't be the lead packet */
761			MH_ALIGN(n, 0);
762			n->m_next = m_split(m, len, wait);
763			if (n->m_next == 0) {
764				(void) m_free(n);
765				return (0);
766			} else
767				return (n);
768		} else
769			MH_ALIGN(n, remain);
770	} else if (remain == 0) {
771		n = m->m_next;
772		m->m_next = 0;
773		return (n);
774	} else {
775		MGET(n, wait, m->m_type);
776		if (n == 0)
777			return (0);
778		M_ALIGN(n, remain);
779	}
780extpacket:
781	if (m->m_flags & M_EXT) {
782		n->m_flags |= M_EXT;
783		n->m_ext = m->m_ext;
784		if(!m->m_ext.ext_ref)
785			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
786		else
787			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
788						m->m_ext.ext_size);
789		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
790		n->m_data = m->m_data + len;
791	} else {
792		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
793	}
794	n->m_len = remain;
795	m->m_len = len;
796	n->m_next = m->m_next;
797	m->m_next = 0;
798	return (n);
799}
800/*
801 * Routine to copy from device local memory into mbufs.
802 */
803struct mbuf *
804m_devget(buf, totlen, off0, ifp, copy)
805	char *buf;
806	int totlen, off0;
807	struct ifnet *ifp;
808	void (*copy) __P((char *from, caddr_t to, u_int len));
809{
810	register struct mbuf *m;
811	struct mbuf *top = 0, **mp = &top;
812	register int off = off0, len;
813	register char *cp;
814	char *epkt;
815
816	cp = buf;
817	epkt = cp + totlen;
818	if (off) {
819		cp += off + 2 * sizeof(u_short);
820		totlen -= 2 * sizeof(u_short);
821	}
822	MGETHDR(m, M_DONTWAIT, MT_DATA);
823	if (m == 0)
824		return (0);
825	m->m_pkthdr.rcvif = ifp;
826	m->m_pkthdr.len = totlen;
827	m->m_len = MHLEN;
828
829	while (totlen > 0) {
830		if (top) {
831			MGET(m, M_DONTWAIT, MT_DATA);
832			if (m == 0) {
833				m_freem(top);
834				return (0);
835			}
836			m->m_len = MLEN;
837		}
838		len = min(totlen, epkt - cp);
839		if (len >= MINCLSIZE) {
840			MCLGET(m, M_DONTWAIT);
841			if (m->m_flags & M_EXT)
842				m->m_len = len = min(len, MCLBYTES);
843			else
844				len = m->m_len;
845		} else {
846			/*
847			 * Place initial small packet/header at end of mbuf.
848			 */
849			if (len < m->m_len) {
850				if (top == 0 && len + max_linkhdr <= m->m_len)
851					m->m_data += max_linkhdr;
852				m->m_len = len;
853			} else
854				len = m->m_len;
855		}
856		if (copy)
857			copy(cp, mtod(m, caddr_t), (unsigned)len);
858		else
859			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
860		cp += len;
861		*mp = m;
862		mp = &m->m_next;
863		totlen -= len;
864		if (cp == epkt)
865			cp = buf;
866	}
867	return (top);
868}
869
870/*
871 * Copy data from a buffer back into the indicated mbuf chain,
872 * starting "off" bytes from the beginning, extending the mbuf
873 * chain if necessary.
874 */
875void
876m_copyback(m0, off, len, cp)
877	struct	mbuf *m0;
878	register int off;
879	register int len;
880	caddr_t cp;
881{
882	register int mlen;
883	register struct mbuf *m = m0, *n;
884	int totlen = 0;
885
886	if (m0 == 0)
887		return;
888	while (off > (mlen = m->m_len)) {
889		off -= mlen;
890		totlen += mlen;
891		if (m->m_next == 0) {
892			n = m_getclr(M_DONTWAIT, m->m_type);
893			if (n == 0)
894				goto out;
895			n->m_len = min(MLEN, len + off);
896			m->m_next = n;
897		}
898		m = m->m_next;
899	}
900	while (len > 0) {
901		mlen = min (m->m_len - off, len);
902		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
903		cp += mlen;
904		len -= mlen;
905		mlen += off;
906		off = 0;
907		totlen += mlen;
908		if (len == 0)
909			break;
910		if (m->m_next == 0) {
911			n = m_get(M_DONTWAIT, m->m_type);
912			if (n == 0)
913				break;
914			n->m_len = min(MLEN, len);
915			m->m_next = n;
916		}
917		m = m->m_next;
918	}
919out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
920		m->m_pkthdr.len = totlen;
921}
922