uipc_mbuf.c revision 27845
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
34 *	$Id: uipc_mbuf.c,v 1.29 1997/02/24 20:30:55 wollman Exp $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/malloc.h>
40#define MBTYPES
41#include <sys/mbuf.h>
42#include <sys/kernel.h>
43#include <sys/sysctl.h>
44#include <sys/domain.h>
45#include <sys/protosw.h>
46
47#include <vm/vm.h>
48#include <vm/vm_kern.h>
49#include <vm/vm_extern.h>
50
51static void mbinit __P((void *));
52SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
53
54struct mbuf *mbutl;
55char	*mclrefcnt;
56struct mbstat mbstat;
57struct mbuf *mmbfree;
58union mcluster *mclfree;
59int	max_linkhdr;
60int	max_protohdr;
61int	max_hdr;
62int	max_datalen;
63
64SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
65	   &max_linkhdr, 0, "");
66SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
67	   &max_protohdr, 0, "");
68SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
69SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
70	   &max_datalen, 0, "");
71SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
72
73static void	m_reclaim __P((void));
74
75/* "number of clusters of pages" */
76#define NCL_INIT	1
77
78#define NMB_INIT	16
79
80/* ARGSUSED*/
81static void
82mbinit(dummy)
83	void *dummy;
84{
85	int s;
86
87	mmbfree = NULL; mclfree = NULL;
88	mbstat.m_msize = MSIZE;
89	mbstat.m_mclbytes = MCLBYTES;
90	mbstat.m_minclsize = MINCLSIZE;
91	mbstat.m_mlen = MLEN;
92	mbstat.m_mhlen = MHLEN;
93
94	s = splimp();
95	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
96		goto bad;
97#if MCLBYTES <= PAGE_SIZE
98	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
99		goto bad;
100#else
101	/* It's OK to call contigmalloc in this context. */
102	if (m_clalloc(16, 0) == 0)
103		goto bad;
104#endif
105	splx(s);
106	return;
107bad:
108	panic("mbinit");
109}
110
111/*
112 * Allocate at least nmb mbufs and place on mbuf free list.
113 * Must be called at splimp.
114 */
115/* ARGSUSED */
116int
117m_mballoc(nmb, nowait)
118	register int nmb;
119	int nowait;
120{
121	register caddr_t p;
122	register int i;
123	int nbytes;
124
125	/* Once we run out of map space, it will be impossible to get
126	 * any more (nothing is ever freed back to the map) (XXX which
127	 * is dumb). (however you are not dead as m_reclaim might
128	 * still be able to free a substantial amount of space).
129	 */
130	if (mb_map_full)
131		return (0);
132
133	nbytes = round_page(nmb * MSIZE);
134	p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
135	if (p == 0 && !nowait) {
136		mbstat.m_wait++;
137		p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
138	}
139
140	/*
141	 * Either the map is now full, or this is nowait and there
142	 * are no pages left.
143	 */
144	if (p == NULL)
145		return (0);
146
147	nmb = nbytes / MSIZE;
148	for (i = 0; i < nmb; i++) {
149		((struct mbuf *)p)->m_next = mmbfree;
150		mmbfree = (struct mbuf *)p;
151		p += MSIZE;
152	}
153	mbstat.m_mbufs += nmb;
154	return (1);
155}
156
157#if MCLBYTES > PAGE_SIZE
158static int i_want_my_mcl;
159
160static void
161kproc_mclalloc(void)
162{
163	int status;
164
165	while (1) {
166		tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
167
168		for (; i_want_my_mcl; i_want_my_mcl--) {
169			if (m_clalloc(1, 0) == 0)
170				printf("m_clalloc failed even in process context!\n");
171		}
172	}
173}
174
175static struct proc *mclallocproc;
176static struct kproc_desc mclalloc_kp = {
177	"mclalloc",
178	kproc_mclalloc,
179	&mclallocproc
180};
181SYSINIT_KT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
182	   &mclalloc_kp);
183#endif
184
185/*
186 * Allocate some number of mbuf clusters
187 * and place on cluster free list.
188 * Must be called at splimp.
189 */
190/* ARGSUSED */
191int
192m_clalloc(ncl, nowait)
193	register int ncl;
194	int nowait;
195{
196	register caddr_t p;
197	register int i;
198	int npg;
199
200	/*
201	 * Once we run out of map space, it will be impossible
202	 * to get any more (nothing is ever freed back to the
203	 * map).
204	 */
205	if (mb_map_full) {
206		mbstat.m_drops++;
207		return (0);
208	}
209
210#if MCLBYTES > PAGE_SIZE
211	if (nowait) {
212		i_want_my_mcl += ncl;
213		wakeup(&i_want_my_mcl);
214		mbstat.m_wait++;
215		p = 0;
216	} else {
217		p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
218				  ~0ul, PAGE_SIZE, 0, mb_map);
219	}
220#else
221	npg = ncl;
222	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
223				 nowait ? M_NOWAIT : M_WAITOK);
224	ncl = ncl * PAGE_SIZE / MCLBYTES;
225#endif
226	/*
227	 * Either the map is now full, or this is nowait and there
228	 * are no pages left.
229	 */
230	if (p == NULL) {
231		mbstat.m_drops++;
232		return (0);
233	}
234
235	for (i = 0; i < ncl; i++) {
236		((union mcluster *)p)->mcl_next = mclfree;
237		mclfree = (union mcluster *)p;
238		p += MCLBYTES;
239		mbstat.m_clfree++;
240	}
241	mbstat.m_clusters += ncl;
242	return (1);
243}
244
245/*
246 * When MGET failes, ask protocols to free space when short of memory,
247 * then re-attempt to allocate an mbuf.
248 */
249struct mbuf *
250m_retry(i, t)
251	int i, t;
252{
253	register struct mbuf *m;
254
255	m_reclaim();
256#define m_retry(i, t)	(struct mbuf *)0
257	MGET(m, i, t);
258#undef m_retry
259	if (m != NULL)
260		mbstat.m_wait++;
261	else
262		mbstat.m_drops++;
263	return (m);
264}
265
266/*
267 * As above; retry an MGETHDR.
268 */
269struct mbuf *
270m_retryhdr(i, t)
271	int i, t;
272{
273	register struct mbuf *m;
274
275	m_reclaim();
276#define m_retryhdr(i, t) (struct mbuf *)0
277	MGETHDR(m, i, t);
278#undef m_retryhdr
279	if (m != NULL)
280		mbstat.m_wait++;
281	else
282		mbstat.m_drops++;
283	return (m);
284}
285
286static void
287m_reclaim()
288{
289	register struct domain *dp;
290	register struct protosw *pr;
291	int s = splimp();
292
293	for (dp = domains; dp; dp = dp->dom_next)
294		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
295			if (pr->pr_drain)
296				(*pr->pr_drain)();
297	splx(s);
298	mbstat.m_drain++;
299}
300
301/*
302 * Space allocation routines.
303 * These are also available as macros
304 * for critical paths.
305 */
306struct mbuf *
307m_get(nowait, type)
308	int nowait, type;
309{
310	register struct mbuf *m;
311
312	MGET(m, nowait, type);
313	return (m);
314}
315
316struct mbuf *
317m_gethdr(nowait, type)
318	int nowait, type;
319{
320	register struct mbuf *m;
321
322	MGETHDR(m, nowait, type);
323	return (m);
324}
325
326struct mbuf *
327m_getclr(nowait, type)
328	int nowait, type;
329{
330	register struct mbuf *m;
331
332	MGET(m, nowait, type);
333	if (m == 0)
334		return (0);
335	bzero(mtod(m, caddr_t), MLEN);
336	return (m);
337}
338
339struct mbuf *
340m_free(m)
341	struct mbuf *m;
342{
343	register struct mbuf *n;
344
345	MFREE(m, n);
346	return (n);
347}
348
349void
350m_freem(m)
351	register struct mbuf *m;
352{
353	register struct mbuf *n;
354
355	if (m == NULL)
356		return;
357	do {
358		MFREE(m, n);
359		m = n;
360	} while (m);
361}
362
363/*
364 * Mbuffer utility routines.
365 */
366
367/*
368 * Lesser-used path for M_PREPEND:
369 * allocate new mbuf to prepend to chain,
370 * copy junk along.
371 */
372struct mbuf *
373m_prepend(m, len, how)
374	register struct mbuf *m;
375	int len, how;
376{
377	struct mbuf *mn;
378
379	MGET(mn, how, m->m_type);
380	if (mn == (struct mbuf *)NULL) {
381		m_freem(m);
382		return ((struct mbuf *)NULL);
383	}
384	if (m->m_flags & M_PKTHDR) {
385		M_COPY_PKTHDR(mn, m);
386		m->m_flags &= ~M_PKTHDR;
387	}
388	mn->m_next = m;
389	m = mn;
390	if (len < MHLEN)
391		MH_ALIGN(m, len);
392	m->m_len = len;
393	return (m);
394}
395
396/*
397 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
398 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
399 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
400 */
401#define MCFail (mbstat.m_mcfail)
402
403struct mbuf *
404m_copym(m, off0, len, wait)
405	register struct mbuf *m;
406	int off0, wait;
407	register int len;
408{
409	register struct mbuf *n, **np;
410	register int off = off0;
411	struct mbuf *top;
412	int copyhdr = 0;
413
414	if (off < 0 || len < 0)
415		panic("m_copym");
416	if (off == 0 && m->m_flags & M_PKTHDR)
417		copyhdr = 1;
418	while (off > 0) {
419		if (m == 0)
420			panic("m_copym");
421		if (off < m->m_len)
422			break;
423		off -= m->m_len;
424		m = m->m_next;
425	}
426	np = &top;
427	top = 0;
428	while (len > 0) {
429		if (m == 0) {
430			if (len != M_COPYALL)
431				panic("m_copym");
432			break;
433		}
434		MGET(n, wait, m->m_type);
435		*np = n;
436		if (n == 0)
437			goto nospace;
438		if (copyhdr) {
439			M_COPY_PKTHDR(n, m);
440			if (len == M_COPYALL)
441				n->m_pkthdr.len -= off0;
442			else
443				n->m_pkthdr.len = len;
444			copyhdr = 0;
445		}
446		n->m_len = min(len, m->m_len - off);
447		if (m->m_flags & M_EXT) {
448			n->m_data = m->m_data + off;
449			if(!m->m_ext.ext_ref)
450				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
451			else
452				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
453							m->m_ext.ext_size);
454			n->m_ext = m->m_ext;
455			n->m_flags |= M_EXT;
456		} else
457			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
458			    (unsigned)n->m_len);
459		if (len != M_COPYALL)
460			len -= n->m_len;
461		off = 0;
462		m = m->m_next;
463		np = &n->m_next;
464	}
465	if (top == 0)
466		MCFail++;
467	return (top);
468nospace:
469	m_freem(top);
470	MCFail++;
471	return (0);
472}
473
474/*
475 * Copy an entire packet, including header (which must be present).
476 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
477 */
478struct mbuf *
479m_copypacket(m, how)
480	struct mbuf *m;
481	int how;
482{
483	struct mbuf *top, *n, *o;
484
485	MGET(n, how, m->m_type);
486	top = n;
487	if (!n)
488		goto nospace;
489
490	M_COPY_PKTHDR(n, m);
491	n->m_len = m->m_len;
492	if (m->m_flags & M_EXT) {
493		n->m_data = m->m_data;
494		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
495		n->m_ext = m->m_ext;
496		n->m_flags |= M_EXT;
497	} else {
498		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
499	}
500
501	m = m->m_next;
502	while (m) {
503		MGET(o, how, m->m_type);
504		if (!o)
505			goto nospace;
506
507		n->m_next = o;
508		n = n->m_next;
509
510		n->m_len = m->m_len;
511		if (m->m_flags & M_EXT) {
512			n->m_data = m->m_data;
513			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
514			n->m_ext = m->m_ext;
515			n->m_flags |= M_EXT;
516		} else {
517			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
518		}
519
520		m = m->m_next;
521	}
522	return top;
523nospace:
524	m_freem(top);
525	MCFail++;
526	return 0;
527}
528
529/*
530 * Copy data from an mbuf chain starting "off" bytes from the beginning,
531 * continuing for "len" bytes, into the indicated buffer.
532 */
533void
534m_copydata(m, off, len, cp)
535	register struct mbuf *m;
536	register int off;
537	register int len;
538	caddr_t cp;
539{
540	register unsigned count;
541
542	if (off < 0 || len < 0)
543		panic("m_copydata");
544	while (off > 0) {
545		if (m == 0)
546			panic("m_copydata");
547		if (off < m->m_len)
548			break;
549		off -= m->m_len;
550		m = m->m_next;
551	}
552	while (len > 0) {
553		if (m == 0)
554			panic("m_copydata");
555		count = min(m->m_len - off, len);
556		bcopy(mtod(m, caddr_t) + off, cp, count);
557		len -= count;
558		cp += count;
559		off = 0;
560		m = m->m_next;
561	}
562}
563
564/*
565 * Concatenate mbuf chain n to m.
566 * Both chains must be of the same type (e.g. MT_DATA).
567 * Any m_pkthdr is not updated.
568 */
569void
570m_cat(m, n)
571	register struct mbuf *m, *n;
572{
573	while (m->m_next)
574		m = m->m_next;
575	while (n) {
576		if (m->m_flags & M_EXT ||
577		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
578			/* just join the two chains */
579			m->m_next = n;
580			return;
581		}
582		/* splat the data from one into the other */
583		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
584		    (u_int)n->m_len);
585		m->m_len += n->m_len;
586		n = m_free(n);
587	}
588}
589
590void
591m_adj(mp, req_len)
592	struct mbuf *mp;
593	int req_len;
594{
595	register int len = req_len;
596	register struct mbuf *m;
597	register count;
598
599	if ((m = mp) == NULL)
600		return;
601	if (len >= 0) {
602		/*
603		 * Trim from head.
604		 */
605		while (m != NULL && len > 0) {
606			if (m->m_len <= len) {
607				len -= m->m_len;
608				m->m_len = 0;
609				m = m->m_next;
610			} else {
611				m->m_len -= len;
612				m->m_data += len;
613				len = 0;
614			}
615		}
616		m = mp;
617		if (mp->m_flags & M_PKTHDR)
618			m->m_pkthdr.len -= (req_len - len);
619	} else {
620		/*
621		 * Trim from tail.  Scan the mbuf chain,
622		 * calculating its length and finding the last mbuf.
623		 * If the adjustment only affects this mbuf, then just
624		 * adjust and return.  Otherwise, rescan and truncate
625		 * after the remaining size.
626		 */
627		len = -len;
628		count = 0;
629		for (;;) {
630			count += m->m_len;
631			if (m->m_next == (struct mbuf *)0)
632				break;
633			m = m->m_next;
634		}
635		if (m->m_len >= len) {
636			m->m_len -= len;
637			if (mp->m_flags & M_PKTHDR)
638				mp->m_pkthdr.len -= len;
639			return;
640		}
641		count -= len;
642		if (count < 0)
643			count = 0;
644		/*
645		 * Correct length for chain is "count".
646		 * Find the mbuf with last data, adjust its length,
647		 * and toss data from remaining mbufs on chain.
648		 */
649		m = mp;
650		if (m->m_flags & M_PKTHDR)
651			m->m_pkthdr.len = count;
652		for (; m; m = m->m_next) {
653			if (m->m_len >= count) {
654				m->m_len = count;
655				break;
656			}
657			count -= m->m_len;
658		}
659		while (m->m_next)
660			(m = m->m_next) ->m_len = 0;
661	}
662}
663
664/*
665 * Rearange an mbuf chain so that len bytes are contiguous
666 * and in the data area of an mbuf (so that mtod and dtom
667 * will work for a structure of size len).  Returns the resulting
668 * mbuf chain on success, frees it and returns null on failure.
669 * If there is room, it will add up to max_protohdr-len extra bytes to the
670 * contiguous region in an attempt to avoid being called next time.
671 */
672#define MPFail (mbstat.m_mpfail)
673
674struct mbuf *
675m_pullup(n, len)
676	register struct mbuf *n;
677	int len;
678{
679	register struct mbuf *m;
680	register int count;
681	int space;
682
683	/*
684	 * If first mbuf has no cluster, and has room for len bytes
685	 * without shifting current data, pullup into it,
686	 * otherwise allocate a new mbuf to prepend to the chain.
687	 */
688	if ((n->m_flags & M_EXT) == 0 &&
689	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
690		if (n->m_len >= len)
691			return (n);
692		m = n;
693		n = n->m_next;
694		len -= m->m_len;
695	} else {
696		if (len > MHLEN)
697			goto bad;
698		MGET(m, M_DONTWAIT, n->m_type);
699		if (m == 0)
700			goto bad;
701		m->m_len = 0;
702		if (n->m_flags & M_PKTHDR) {
703			M_COPY_PKTHDR(m, n);
704			n->m_flags &= ~M_PKTHDR;
705		}
706	}
707	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
708	do {
709		count = min(min(max(len, max_protohdr), space), n->m_len);
710		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
711		  (unsigned)count);
712		len -= count;
713		m->m_len += count;
714		n->m_len -= count;
715		space -= count;
716		if (n->m_len)
717			n->m_data += count;
718		else
719			n = m_free(n);
720	} while (len > 0 && n);
721	if (len > 0) {
722		(void) m_free(m);
723		goto bad;
724	}
725	m->m_next = n;
726	return (m);
727bad:
728	m_freem(n);
729	MPFail++;
730	return (0);
731}
732
733/*
734 * Partition an mbuf chain in two pieces, returning the tail --
735 * all but the first len0 bytes.  In case of failure, it returns NULL and
736 * attempts to restore the chain to its original state.
737 */
738struct mbuf *
739m_split(m0, len0, wait)
740	register struct mbuf *m0;
741	int len0, wait;
742{
743	register struct mbuf *m, *n;
744	unsigned len = len0, remain;
745
746	for (m = m0; m && len > m->m_len; m = m->m_next)
747		len -= m->m_len;
748	if (m == 0)
749		return (0);
750	remain = m->m_len - len;
751	if (m0->m_flags & M_PKTHDR) {
752		MGETHDR(n, wait, m0->m_type);
753		if (n == 0)
754			return (0);
755		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
756		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
757		m0->m_pkthdr.len = len0;
758		if (m->m_flags & M_EXT)
759			goto extpacket;
760		if (remain > MHLEN) {
761			/* m can't be the lead packet */
762			MH_ALIGN(n, 0);
763			n->m_next = m_split(m, len, wait);
764			if (n->m_next == 0) {
765				(void) m_free(n);
766				return (0);
767			} else
768				return (n);
769		} else
770			MH_ALIGN(n, remain);
771	} else if (remain == 0) {
772		n = m->m_next;
773		m->m_next = 0;
774		return (n);
775	} else {
776		MGET(n, wait, m->m_type);
777		if (n == 0)
778			return (0);
779		M_ALIGN(n, remain);
780	}
781extpacket:
782	if (m->m_flags & M_EXT) {
783		n->m_flags |= M_EXT;
784		n->m_ext = m->m_ext;
785		if(!m->m_ext.ext_ref)
786			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
787		else
788			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
789						m->m_ext.ext_size);
790		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
791		n->m_data = m->m_data + len;
792	} else {
793		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
794	}
795	n->m_len = remain;
796	m->m_len = len;
797	n->m_next = m->m_next;
798	m->m_next = 0;
799	return (n);
800}
801/*
802 * Routine to copy from device local memory into mbufs.
803 */
804struct mbuf *
805m_devget(buf, totlen, off0, ifp, copy)
806	char *buf;
807	int totlen, off0;
808	struct ifnet *ifp;
809	void (*copy) __P((char *from, caddr_t to, u_int len));
810{
811	register struct mbuf *m;
812	struct mbuf *top = 0, **mp = &top;
813	register int off = off0, len;
814	register char *cp;
815	char *epkt;
816
817	cp = buf;
818	epkt = cp + totlen;
819	if (off) {
820		cp += off + 2 * sizeof(u_short);
821		totlen -= 2 * sizeof(u_short);
822	}
823	MGETHDR(m, M_DONTWAIT, MT_DATA);
824	if (m == 0)
825		return (0);
826	m->m_pkthdr.rcvif = ifp;
827	m->m_pkthdr.len = totlen;
828	m->m_len = MHLEN;
829
830	while (totlen > 0) {
831		if (top) {
832			MGET(m, M_DONTWAIT, MT_DATA);
833			if (m == 0) {
834				m_freem(top);
835				return (0);
836			}
837			m->m_len = MLEN;
838		}
839		len = min(totlen, epkt - cp);
840		if (len >= MINCLSIZE) {
841			MCLGET(m, M_DONTWAIT);
842			if (m->m_flags & M_EXT)
843				m->m_len = len = min(len, MCLBYTES);
844			else
845				len = m->m_len;
846		} else {
847			/*
848			 * Place initial small packet/header at end of mbuf.
849			 */
850			if (len < m->m_len) {
851				if (top == 0 && len + max_linkhdr <= m->m_len)
852					m->m_data += max_linkhdr;
853				m->m_len = len;
854			} else
855				len = m->m_len;
856		}
857		if (copy)
858			copy(cp, mtod(m, caddr_t), (unsigned)len);
859		else
860			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
861		cp += len;
862		*mp = m;
863		mp = &m->m_next;
864		totlen -= len;
865		if (cp == epkt)
866			cp = buf;
867	}
868	return (top);
869}
870
871/*
872 * Copy data from a buffer back into the indicated mbuf chain,
873 * starting "off" bytes from the beginning, extending the mbuf
874 * chain if necessary.
875 */
876void
877m_copyback(m0, off, len, cp)
878	struct	mbuf *m0;
879	register int off;
880	register int len;
881	caddr_t cp;
882{
883	register int mlen;
884	register struct mbuf *m = m0, *n;
885	int totlen = 0;
886
887	if (m0 == 0)
888		return;
889	while (off > (mlen = m->m_len)) {
890		off -= mlen;
891		totlen += mlen;
892		if (m->m_next == 0) {
893			n = m_getclr(M_DONTWAIT, m->m_type);
894			if (n == 0)
895				goto out;
896			n->m_len = min(MLEN, len + off);
897			m->m_next = n;
898		}
899		m = m->m_next;
900	}
901	while (len > 0) {
902		mlen = min (m->m_len - off, len);
903		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
904		cp += mlen;
905		len -= mlen;
906		mlen += off;
907		off = 0;
908		totlen += mlen;
909		if (len == 0)
910			break;
911		if (m->m_next == 0) {
912			n = m_get(M_DONTWAIT, m->m_type);
913			if (n == 0)
914				break;
915			n->m_len = min(MLEN, len);
916			m->m_next = n;
917		}
918		m = m->m_next;
919	}
920out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
921		m->m_pkthdr.len = totlen;
922}
923