uipc_mbuf.c revision 21737
18837Sjkh/*
28837Sjkh * Copyright (c) 1982, 1986, 1988, 1991, 1993
38837Sjkh *	The Regents of the University of California.  All rights reserved.
48837Sjkh *
58837Sjkh * Redistribution and use in source and binary forms, with or without
68837Sjkh * modification, are permitted provided that the following conditions
712661Speter * are met:
88837Sjkh * 1. Redistributions of source code must retain the above copyright
98837Sjkh *    notice, this list of conditions and the following disclaimer.
108837Sjkh * 2. Redistributions in binary form must reproduce the above copyright
118837Sjkh *    notice, this list of conditions and the following disclaimer in the
128837Sjkh *    documentation and/or other materials provided with the distribution.
138837Sjkh * 3. All advertising materials mentioning features or use of this software
148837Sjkh *    must display the following acknowledgement:
158837Sjkh *	This product includes software developed by the University of
168881Srgrimes *	California, Berkeley and its contributors.
178881Srgrimes * 4. Neither the name of the University nor the names of its contributors
188837Sjkh *    may be used to endorse or promote products derived from this software
198837Sjkh *    without specific prior written permission.
208837Sjkh *
218837Sjkh * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
228837Sjkh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
238837Sjkh * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
248837Sjkh * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
258837Sjkh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
268837Sjkh * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
278837Sjkh * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
288837Sjkh * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
298837Sjkh * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
308837Sjkh * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
318837Sjkh * SUCH DAMAGE.
328837Sjkh *
338837Sjkh *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
348837Sjkh * $FreeBSD: head/sys/kern/uipc_mbuf.c 21737 1997-01-15 20:46:02Z dg $
358837Sjkh */
368837Sjkh
378837Sjkh#include <sys/param.h>
388837Sjkh#include <sys/systm.h>
398837Sjkh#include <sys/proc.h>
408837Sjkh#include <sys/malloc.h>
418837Sjkh#define MBTYPES
428837Sjkh#include <sys/mbuf.h>
438837Sjkh#include <sys/kernel.h>
448791Sjkh#include <sys/syslog.h>
458791Sjkh#include <sys/domain.h>
468837Sjkh#include <sys/protosw.h>
478837Sjkh
488791Sjkh#include <vm/vm.h>
498791Sjkh#include <vm/vm_param.h>
508791Sjkh#include <vm/vm_kern.h>
518837Sjkh#include <vm/vm_extern.h>
528837Sjkh
538837Sjkhstatic void mbinit __P((void *));
548837SjkhSYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
558837Sjkh
568860Sjkhstruct mbuf *mbutl;
578860Sjkhchar	*mclrefcnt;
588837Sjkhstruct mbstat mbstat;
598837Sjkhstruct mbuf *mmbfree;
608837Sjkhunion mcluster *mclfree;
6112661Speterint	max_linkhdr;
629202Srgrimesint	max_protohdr;
638860Sjkhint	max_hdr;
6412661Speterint	max_datalen;
658837Sjkh
668837Sjkhstatic void	m_reclaim __P((void));
6712661Speter
6812661Speter/* "number of clusters of pages" */
6912661Speter#define NCL_INIT	1
7012661Speter
7112661Speter#define NMB_INIT	16
7212661Speter
7312661Speter/* ARGSUSED*/
748837Sjkhstatic void
758837Sjkhmbinit(dummy)
768837Sjkh	void *dummy;
7712661Speter{
788837Sjkh	int s;
798837Sjkh
808837Sjkh	mmbfree = NULL; mclfree = NULL;
818837Sjkh	s = splimp();
8212661Speter	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
838837Sjkh		goto bad;
8412661Speter	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
858837Sjkh		goto bad;
8612661Speter	splx(s);
8712661Speter	return;
8812661Speterbad:
899202Srgrimes	panic("mbinit");
9012661Speter}
9112661Speter
9212661Speter/*
9312661Speter * Allocate at least nmb mbufs and place on mbuf free list.
9412661Speter * Must be called at splimp.
9512661Speter */
9612661Speter/* ARGSUSED */
978837Sjkhint
988837Sjkhm_mballoc(nmb, nowait)
998837Sjkh	register int nmb;
1008837Sjkh	int nowait;
1018837Sjkh{
1028837Sjkh	register caddr_t p;
10312661Speter	register int i;
1048860Sjkh	int nbytes;
1058837Sjkh
1068837Sjkh	/* Once we run out of map space, it will be impossible to get
10712661Speter	 * any more (nothing is ever freed back to the map) (XXX which
10812661Speter	 * is dumb). (however you are not dead as m_reclaim might
10912661Speter	 * still be able to free a substantial amount of space).
11012661Speter	 */
11112661Speter	if (mb_map_full)
11212661Speter		return (0);
11312661Speter
1148837Sjkh	nbytes = round_page(nmb * MSIZE);
1158837Sjkh	p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
1168837Sjkh	/*
117	 * Either the map is now full, or this is nowait and there
118	 * are no pages left.
119	 */
120	if (p == NULL)
121		return (0);
122
123	nmb = nbytes / MSIZE;
124	for (i = 0; i < nmb; i++) {
125		((struct mbuf *)p)->m_next = mmbfree;
126		mmbfree = (struct mbuf *)p;
127		p += MSIZE;
128	}
129	mbstat.m_mbufs += nmb;
130	return (1);
131}
132
133/*
134 * Allocate some number of mbuf clusters
135 * and place on cluster free list.
136 * Must be called at splimp.
137 */
138/* ARGSUSED */
139int
140m_clalloc(ncl, nowait)
141	register int ncl;
142	int nowait;
143{
144	register caddr_t p;
145	register int i;
146	int npg;
147
148	/*
149	 * Once we run out of map space, it will be impossible
150	 * to get any more (nothing is ever freed back to the
151	 * map).
152	 */
153	if (mb_map_full)
154		return (0);
155
156	npg = ncl;
157	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
158				 nowait ? M_NOWAIT : M_WAITOK);
159	/*
160	 * Either the map is now full, or this is nowait and there
161	 * are no pages left.
162	 */
163	if (p == NULL)
164		return (0);
165
166	ncl = ncl * PAGE_SIZE / MCLBYTES;
167	for (i = 0; i < ncl; i++) {
168		((union mcluster *)p)->mcl_next = mclfree;
169		mclfree = (union mcluster *)p;
170		p += MCLBYTES;
171		mbstat.m_clfree++;
172	}
173	mbstat.m_clusters += ncl;
174	return (1);
175}
176
177/*
178 * When MGET failes, ask protocols to free space when short of memory,
179 * then re-attempt to allocate an mbuf.
180 */
181struct mbuf *
182m_retry(i, t)
183	int i, t;
184{
185	register struct mbuf *m;
186
187	m_reclaim();
188#define m_retry(i, t)	(struct mbuf *)0
189	MGET(m, i, t);
190#undef m_retry
191	if (m != NULL)
192		mbstat.m_wait++;
193	else
194		mbstat.m_drops++;
195	return (m);
196}
197
198/*
199 * As above; retry an MGETHDR.
200 */
201struct mbuf *
202m_retryhdr(i, t)
203	int i, t;
204{
205	register struct mbuf *m;
206
207	m_reclaim();
208#define m_retryhdr(i, t) (struct mbuf *)0
209	MGETHDR(m, i, t);
210#undef m_retryhdr
211	if (m != NULL)
212		mbstat.m_wait++;
213	else
214		mbstat.m_drops++;
215	return (m);
216}
217
218static void
219m_reclaim()
220{
221	register struct domain *dp;
222	register struct protosw *pr;
223	int s = splimp();
224
225	for (dp = domains; dp; dp = dp->dom_next)
226		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
227			if (pr->pr_drain)
228				(*pr->pr_drain)();
229	splx(s);
230	mbstat.m_drain++;
231}
232
233/*
234 * Space allocation routines.
235 * These are also available as macros
236 * for critical paths.
237 */
238struct mbuf *
239m_get(nowait, type)
240	int nowait, type;
241{
242	register struct mbuf *m;
243
244	MGET(m, nowait, type);
245	return (m);
246}
247
248struct mbuf *
249m_gethdr(nowait, type)
250	int nowait, type;
251{
252	register struct mbuf *m;
253
254	MGETHDR(m, nowait, type);
255	return (m);
256}
257
258struct mbuf *
259m_getclr(nowait, type)
260	int nowait, type;
261{
262	register struct mbuf *m;
263
264	MGET(m, nowait, type);
265	if (m == 0)
266		return (0);
267	bzero(mtod(m, caddr_t), MLEN);
268	return (m);
269}
270
271struct mbuf *
272m_free(m)
273	struct mbuf *m;
274{
275	register struct mbuf *n;
276
277	MFREE(m, n);
278	return (n);
279}
280
281void
282m_freem(m)
283	register struct mbuf *m;
284{
285	register struct mbuf *n;
286
287	if (m == NULL)
288		return;
289	do {
290		MFREE(m, n);
291		m = n;
292	} while (m);
293}
294
295/*
296 * Mbuffer utility routines.
297 */
298
299/*
300 * Lesser-used path for M_PREPEND:
301 * allocate new mbuf to prepend to chain,
302 * copy junk along.
303 */
304struct mbuf *
305m_prepend(m, len, how)
306	register struct mbuf *m;
307	int len, how;
308{
309	struct mbuf *mn;
310
311	MGET(mn, how, m->m_type);
312	if (mn == (struct mbuf *)NULL) {
313		m_freem(m);
314		return ((struct mbuf *)NULL);
315	}
316	if (m->m_flags & M_PKTHDR) {
317		M_COPY_PKTHDR(mn, m);
318		m->m_flags &= ~M_PKTHDR;
319	}
320	mn->m_next = m;
321	m = mn;
322	if (len < MHLEN)
323		MH_ALIGN(m, len);
324	m->m_len = len;
325	return (m);
326}
327
328/*
329 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
330 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
331 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
332 */
333static int MCFail;
334
335struct mbuf *
336m_copym(m, off0, len, wait)
337	register struct mbuf *m;
338	int off0, wait;
339	register int len;
340{
341	register struct mbuf *n, **np;
342	register int off = off0;
343	struct mbuf *top;
344	int copyhdr = 0;
345
346	if (off < 0 || len < 0)
347		panic("m_copym");
348	if (off == 0 && m->m_flags & M_PKTHDR)
349		copyhdr = 1;
350	while (off > 0) {
351		if (m == 0)
352			panic("m_copym");
353		if (off < m->m_len)
354			break;
355		off -= m->m_len;
356		m = m->m_next;
357	}
358	np = &top;
359	top = 0;
360	while (len > 0) {
361		if (m == 0) {
362			if (len != M_COPYALL)
363				panic("m_copym");
364			break;
365		}
366		MGET(n, wait, m->m_type);
367		*np = n;
368		if (n == 0)
369			goto nospace;
370		if (copyhdr) {
371			M_COPY_PKTHDR(n, m);
372			if (len == M_COPYALL)
373				n->m_pkthdr.len -= off0;
374			else
375				n->m_pkthdr.len = len;
376			copyhdr = 0;
377		}
378		n->m_len = min(len, m->m_len - off);
379		if (m->m_flags & M_EXT) {
380			n->m_data = m->m_data + off;
381			if(!m->m_ext.ext_ref)
382				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
383			else
384				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
385							m->m_ext.ext_size);
386			n->m_ext = m->m_ext;
387			n->m_flags |= M_EXT;
388		} else
389			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
390			    (unsigned)n->m_len);
391		if (len != M_COPYALL)
392			len -= n->m_len;
393		off = 0;
394		m = m->m_next;
395		np = &n->m_next;
396	}
397	if (top == 0)
398		MCFail++;
399	return (top);
400nospace:
401	m_freem(top);
402	MCFail++;
403	return (0);
404}
405
406/*
407 * Copy an entire packet, including header (which must be present).
408 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
409 */
410struct mbuf *
411m_copypacket(m, how)
412	struct mbuf *m;
413	int how;
414{
415	struct mbuf *top, *n, *o;
416
417	MGET(n, how, m->m_type);
418	top = n;
419	if (!n)
420		goto nospace;
421
422	M_COPY_PKTHDR(n, m);
423	n->m_len = m->m_len;
424	if (m->m_flags & M_EXT) {
425		n->m_data = m->m_data;
426		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
427		n->m_ext = m->m_ext;
428		n->m_flags |= M_EXT;
429	} else {
430		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
431	}
432
433	m = m->m_next;
434	while (m) {
435		MGET(o, how, m->m_type);
436		if (!o)
437			goto nospace;
438
439		n->m_next = o;
440		n = n->m_next;
441
442		n->m_len = m->m_len;
443		if (m->m_flags & M_EXT) {
444			n->m_data = m->m_data;
445			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
446			n->m_ext = m->m_ext;
447			n->m_flags |= M_EXT;
448		} else {
449			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
450		}
451
452		m = m->m_next;
453	}
454	return top;
455nospace:
456	m_freem(top);
457	MCFail++;
458	return 0;
459}
460
461/*
462 * Copy data from an mbuf chain starting "off" bytes from the beginning,
463 * continuing for "len" bytes, into the indicated buffer.
464 */
465void
466m_copydata(m, off, len, cp)
467	register struct mbuf *m;
468	register int off;
469	register int len;
470	caddr_t cp;
471{
472	register unsigned count;
473
474	if (off < 0 || len < 0)
475		panic("m_copydata");
476	while (off > 0) {
477		if (m == 0)
478			panic("m_copydata");
479		if (off < m->m_len)
480			break;
481		off -= m->m_len;
482		m = m->m_next;
483	}
484	while (len > 0) {
485		if (m == 0)
486			panic("m_copydata");
487		count = min(m->m_len - off, len);
488		bcopy(mtod(m, caddr_t) + off, cp, count);
489		len -= count;
490		cp += count;
491		off = 0;
492		m = m->m_next;
493	}
494}
495
496/*
497 * Concatenate mbuf chain n to m.
498 * Both chains must be of the same type (e.g. MT_DATA).
499 * Any m_pkthdr is not updated.
500 */
501void
502m_cat(m, n)
503	register struct mbuf *m, *n;
504{
505	while (m->m_next)
506		m = m->m_next;
507	while (n) {
508		if (m->m_flags & M_EXT ||
509		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
510			/* just join the two chains */
511			m->m_next = n;
512			return;
513		}
514		/* splat the data from one into the other */
515		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
516		    (u_int)n->m_len);
517		m->m_len += n->m_len;
518		n = m_free(n);
519	}
520}
521
522void
523m_adj(mp, req_len)
524	struct mbuf *mp;
525	int req_len;
526{
527	register int len = req_len;
528	register struct mbuf *m;
529	register count;
530
531	if ((m = mp) == NULL)
532		return;
533	if (len >= 0) {
534		/*
535		 * Trim from head.
536		 */
537		while (m != NULL && len > 0) {
538			if (m->m_len <= len) {
539				len -= m->m_len;
540				m->m_len = 0;
541				m = m->m_next;
542			} else {
543				m->m_len -= len;
544				m->m_data += len;
545				len = 0;
546			}
547		}
548		m = mp;
549		if (mp->m_flags & M_PKTHDR)
550			m->m_pkthdr.len -= (req_len - len);
551	} else {
552		/*
553		 * Trim from tail.  Scan the mbuf chain,
554		 * calculating its length and finding the last mbuf.
555		 * If the adjustment only affects this mbuf, then just
556		 * adjust and return.  Otherwise, rescan and truncate
557		 * after the remaining size.
558		 */
559		len = -len;
560		count = 0;
561		for (;;) {
562			count += m->m_len;
563			if (m->m_next == (struct mbuf *)0)
564				break;
565			m = m->m_next;
566		}
567		if (m->m_len >= len) {
568			m->m_len -= len;
569			if (mp->m_flags & M_PKTHDR)
570				mp->m_pkthdr.len -= len;
571			return;
572		}
573		count -= len;
574		if (count < 0)
575			count = 0;
576		/*
577		 * Correct length for chain is "count".
578		 * Find the mbuf with last data, adjust its length,
579		 * and toss data from remaining mbufs on chain.
580		 */
581		m = mp;
582		if (m->m_flags & M_PKTHDR)
583			m->m_pkthdr.len = count;
584		for (; m; m = m->m_next) {
585			if (m->m_len >= count) {
586				m->m_len = count;
587				break;
588			}
589			count -= m->m_len;
590		}
591		while (m->m_next)
592			(m = m->m_next) ->m_len = 0;
593	}
594}
595
596/*
597 * Rearange an mbuf chain so that len bytes are contiguous
598 * and in the data area of an mbuf (so that mtod and dtom
599 * will work for a structure of size len).  Returns the resulting
600 * mbuf chain on success, frees it and returns null on failure.
601 * If there is room, it will add up to max_protohdr-len extra bytes to the
602 * contiguous region in an attempt to avoid being called next time.
603 */
604static int MPFail;
605
606struct mbuf *
607m_pullup(n, len)
608	register struct mbuf *n;
609	int len;
610{
611	register struct mbuf *m;
612	register int count;
613	int space;
614
615	/*
616	 * If first mbuf has no cluster, and has room for len bytes
617	 * without shifting current data, pullup into it,
618	 * otherwise allocate a new mbuf to prepend to the chain.
619	 */
620	if ((n->m_flags & M_EXT) == 0 &&
621	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
622		if (n->m_len >= len)
623			return (n);
624		m = n;
625		n = n->m_next;
626		len -= m->m_len;
627	} else {
628		if (len > MHLEN)
629			goto bad;
630		MGET(m, M_DONTWAIT, n->m_type);
631		if (m == 0)
632			goto bad;
633		m->m_len = 0;
634		if (n->m_flags & M_PKTHDR) {
635			M_COPY_PKTHDR(m, n);
636			n->m_flags &= ~M_PKTHDR;
637		}
638	}
639	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
640	do {
641		count = min(min(max(len, max_protohdr), space), n->m_len);
642		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
643		  (unsigned)count);
644		len -= count;
645		m->m_len += count;
646		n->m_len -= count;
647		space -= count;
648		if (n->m_len)
649			n->m_data += count;
650		else
651			n = m_free(n);
652	} while (len > 0 && n);
653	if (len > 0) {
654		(void) m_free(m);
655		goto bad;
656	}
657	m->m_next = n;
658	return (m);
659bad:
660	m_freem(n);
661	MPFail++;
662	return (0);
663}
664
665/*
666 * Partition an mbuf chain in two pieces, returning the tail --
667 * all but the first len0 bytes.  In case of failure, it returns NULL and
668 * attempts to restore the chain to its original state.
669 */
670struct mbuf *
671m_split(m0, len0, wait)
672	register struct mbuf *m0;
673	int len0, wait;
674{
675	register struct mbuf *m, *n;
676	unsigned len = len0, remain;
677
678	for (m = m0; m && len > m->m_len; m = m->m_next)
679		len -= m->m_len;
680	if (m == 0)
681		return (0);
682	remain = m->m_len - len;
683	if (m0->m_flags & M_PKTHDR) {
684		MGETHDR(n, wait, m0->m_type);
685		if (n == 0)
686			return (0);
687		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
688		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
689		m0->m_pkthdr.len = len0;
690		if (m->m_flags & M_EXT)
691			goto extpacket;
692		if (remain > MHLEN) {
693			/* m can't be the lead packet */
694			MH_ALIGN(n, 0);
695			n->m_next = m_split(m, len, wait);
696			if (n->m_next == 0) {
697				(void) m_free(n);
698				return (0);
699			} else
700				return (n);
701		} else
702			MH_ALIGN(n, remain);
703	} else if (remain == 0) {
704		n = m->m_next;
705		m->m_next = 0;
706		return (n);
707	} else {
708		MGET(n, wait, m->m_type);
709		if (n == 0)
710			return (0);
711		M_ALIGN(n, remain);
712	}
713extpacket:
714	if (m->m_flags & M_EXT) {
715		n->m_flags |= M_EXT;
716		n->m_ext = m->m_ext;
717		if(!m->m_ext.ext_ref)
718			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
719		else
720			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
721						m->m_ext.ext_size);
722		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
723		n->m_data = m->m_data + len;
724	} else {
725		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
726	}
727	n->m_len = remain;
728	m->m_len = len;
729	n->m_next = m->m_next;
730	m->m_next = 0;
731	return (n);
732}
733/*
734 * Routine to copy from device local memory into mbufs.
735 */
736struct mbuf *
737m_devget(buf, totlen, off0, ifp, copy)
738	char *buf;
739	int totlen, off0;
740	struct ifnet *ifp;
741	void (*copy) __P((char *from, caddr_t to, u_int len));
742{
743	register struct mbuf *m;
744	struct mbuf *top = 0, **mp = &top;
745	register int off = off0, len;
746	register char *cp;
747	char *epkt;
748
749	cp = buf;
750	epkt = cp + totlen;
751	if (off) {
752		cp += off + 2 * sizeof(u_short);
753		totlen -= 2 * sizeof(u_short);
754	}
755	MGETHDR(m, M_DONTWAIT, MT_DATA);
756	if (m == 0)
757		return (0);
758	m->m_pkthdr.rcvif = ifp;
759	m->m_pkthdr.len = totlen;
760	m->m_len = MHLEN;
761
762	while (totlen > 0) {
763		if (top) {
764			MGET(m, M_DONTWAIT, MT_DATA);
765			if (m == 0) {
766				m_freem(top);
767				return (0);
768			}
769			m->m_len = MLEN;
770		}
771		len = min(totlen, epkt - cp);
772		if (len >= MINCLSIZE) {
773			MCLGET(m, M_DONTWAIT);
774			if (m->m_flags & M_EXT)
775				m->m_len = len = min(len, MCLBYTES);
776			else
777				len = m->m_len;
778		} else {
779			/*
780			 * Place initial small packet/header at end of mbuf.
781			 */
782			if (len < m->m_len) {
783				if (top == 0 && len + max_linkhdr <= m->m_len)
784					m->m_data += max_linkhdr;
785				m->m_len = len;
786			} else
787				len = m->m_len;
788		}
789		if (copy)
790			copy(cp, mtod(m, caddr_t), (unsigned)len);
791		else
792			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
793		cp += len;
794		*mp = m;
795		mp = &m->m_next;
796		totlen -= len;
797		if (cp == epkt)
798			cp = buf;
799	}
800	return (top);
801}
802
803/*
804 * Copy data from a buffer back into the indicated mbuf chain,
805 * starting "off" bytes from the beginning, extending the mbuf
806 * chain if necessary.
807 */
808void
809m_copyback(m0, off, len, cp)
810	struct	mbuf *m0;
811	register int off;
812	register int len;
813	caddr_t cp;
814{
815	register int mlen;
816	register struct mbuf *m = m0, *n;
817	int totlen = 0;
818
819	if (m0 == 0)
820		return;
821	while (off > (mlen = m->m_len)) {
822		off -= mlen;
823		totlen += mlen;
824		if (m->m_next == 0) {
825			n = m_getclr(M_DONTWAIT, m->m_type);
826			if (n == 0)
827				goto out;
828			n->m_len = min(MLEN, len + off);
829			m->m_next = n;
830		}
831		m = m->m_next;
832	}
833	while (len > 0) {
834		mlen = min (m->m_len - off, len);
835		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
836		cp += mlen;
837		len -= mlen;
838		mlen += off;
839		off = 0;
840		totlen += mlen;
841		if (len == 0)
842			break;
843		if (m->m_next == 0) {
844			n = m_get(M_DONTWAIT, m->m_type);
845			if (n == 0)
846				break;
847			n->m_len = min(MLEN, len);
848			m->m_next = n;
849		}
850		m = m->m_next;
851	}
852out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
853		m->m_pkthdr.len = totlen;
854}
855