uipc_mbuf.c revision 12662
131567Ssef/*
231899Ssef * Copyright (c) 1982, 1986, 1988, 1991, 1993
331899Ssef *	The Regents of the University of California.  All rights reserved.
431899Ssef *
531899Ssef * Redistribution and use in source and binary forms, with or without
631899Ssef * modification, are permitted provided that the following conditions
731899Ssef * are met:
831899Ssef * 1. Redistributions of source code must retain the above copyright
931899Ssef *    notice, this list of conditions and the following disclaimer.
1031899Ssef * 2. Redistributions in binary form must reproduce the above copyright
1131899Ssef *    notice, this list of conditions and the following disclaimer in the
1231899Ssef *    documentation and/or other materials provided with the distribution.
1331899Ssef * 3. All advertising materials mentioning features or use of this software
1431899Ssef *    must display the following acknowledgement:
1531899Ssef *	This product includes software developed by the University of
1631899Ssef *	California, Berkeley and its contributors.
1731899Ssef * 4. Neither the name of the University nor the names of its contributors
1831899Ssef *    may be used to endorse or promote products derived from this software
1931899Ssef *    without specific prior written permission.
2031899Ssef *
2131899Ssef * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2231899Ssef * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2331899Ssef * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2431899Ssef * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2531899Ssef * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2631899Ssef * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2731899Ssef * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2831899Ssef * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2931899Ssef * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3031899Ssef * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3131899Ssef * SUCH DAMAGE.
3232275Scharnier *
3332275Scharnier *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
3439908Ssef * $Id: uipc_mbuf.c,v 1.15 1995/12/02 18:58:42 bde Exp $
3532275Scharnier */
3632275Scharnier
3731899Ssef#include <sys/param.h>
3831567Ssef#include <sys/systm.h>
3931567Ssef#include <sys/proc.h>
4031567Ssef#include <sys/malloc.h>
4131567Ssef#define MBTYPES
4231567Ssef#include <sys/mbuf.h>
4332275Scharnier#include <sys/kernel.h>
4432275Scharnier#include <sys/syslog.h>
4532275Scharnier#include <sys/domain.h>
4632275Scharnier#include <sys/protosw.h>
4731567Ssef
4831567Ssef#include <vm/vm.h>
4931567Ssef#include <vm/vm_param.h>
5031579Speter#include <vm/vm_kern.h>
5131567Ssef#include <vm/vm_extern.h>
5231567Ssef
5331567Ssefstatic void mbinit __P((void *));
5431567SsefSYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
5531567Ssef
5639908Ssefstruct mbuf *mbutl;
5739908Ssefchar	*mclrefcnt;
5839908Ssefstruct mbstat mbstat;
5939908Ssefunion mcluster *mclfree;
6039908Ssefint	max_linkhdr;
6131567Ssefint	max_protohdr;
6231567Ssefint	max_hdr;
6331567Ssefint	max_datalen;
6431567Ssef
6539908Ssef/* ARGSUSED*/
6631567Ssefstatic void
6731567Ssefmbinit(dummy)
6831567Ssef	void *dummy;
6931567Ssef{
7031567Ssef	int s;
7131567Ssef
7231567Ssef#if CLBYTES < 4096
7331567Ssef#define NCL_INIT	(4096/CLBYTES)
7431567Ssef#else
7531567Ssef#define NCL_INIT	1
7631567Ssef#endif
7731567Ssef	s = splimp();
7831567Ssef	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
7932275Scharnier		goto bad;
8032275Scharnier	splx(s);
8132275Scharnier	return;
8232275Scharnierbad:
8332275Scharnier	panic("mbinit");
8431567Ssef}
8531567Ssef
8631567Ssef/*
8738897Ssef * Allocate some number of mbuf clusters
8838897Ssef * and place on cluster free list.
8938897Ssef * Must be called at splimp.
9038897Ssef */
9131567Ssef/* ARGSUSED */
9231567Ssefint
9331567Ssefm_clalloc(ncl, nowait)
9431567Ssef	register int ncl;
9531567Ssef	int nowait;
9639908Ssef{
9739908Ssef	register caddr_t p;
9839908Ssef	register int i;
9939908Ssef	int npg;
10031567Ssef
10131580Speter	/*
10231567Ssef	 * Once we run out of map space, it will be impossible
10339908Ssef	 * to get any more (nothing is ever freed back to the
10431567Ssef	 * map).
10531567Ssef	 */
10631567Ssef	if (mb_map_full)
10731567Ssef		return (0);
10831567Ssef
10931567Ssef	npg = ncl * CLSIZE;
11031567Ssef	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
11131567Ssef				 nowait ? M_NOWAIT : M_WAITOK);
11231567Ssef	/*
11331567Ssef	 * Either the map is now full, or this is nowait and there
11431567Ssef	 * are no pages left.
11531567Ssef	 */
11631567Ssef	if (p == NULL)
11731567Ssef		return (0);
11831567Ssef
11931567Ssef	ncl = ncl * CLBYTES / MCLBYTES;
12031567Ssef	for (i = 0; i < ncl; i++) {
12131567Ssef		((union mcluster *)p)->mcl_next = mclfree;
12231567Ssef		mclfree = (union mcluster *)p;
12331567Ssef		p += MCLBYTES;
12431567Ssef		mbstat.m_clfree++;
12531567Ssef	}
12631580Speter	mbstat.m_clusters += ncl;
12731567Ssef	return (1);
12831567Ssef}
12931567Ssef
13031567Ssef/*
13131567Ssef * When MGET failes, ask protocols to free space when short of memory,
13231567Ssef * then re-attempt to allocate an mbuf.
13338897Ssef */
13438897Ssefstruct mbuf *
13538897Ssefm_retry(i, t)
13638897Ssef	int i, t;
13738897Ssef{
13831567Ssef	register struct mbuf *m;
13931567Ssef
14031567Ssef	m_reclaim();
14132275Scharnier#define m_retry(i, t)	(struct mbuf *)0
14231567Ssef	MGET(m, i, t);
14331567Ssef#undef m_retry
14431567Ssef	if (m != NULL)
14531567Ssef		mbstat.m_wait++;
14631567Ssef	else
14731567Ssef		mbstat.m_drops++;
14831567Ssef	return (m);
14932275Scharnier}
15038520Scracauer
15131567Ssef/*
15232306Sjmg * As above; retry an MGETHDR.
15331567Ssef */
15431567Ssefstruct mbuf *
15531567Ssefm_retryhdr(i, t)
15631567Ssef	int i, t;
15731567Ssef{
15832275Scharnier	register struct mbuf *m;
15931567Ssef
16031567Ssef	m_reclaim();
16131567Ssef#define m_retryhdr(i, t) (struct mbuf *)0
16231567Ssef	MGETHDR(m, i, t);
16331567Ssef#undef m_retryhdr
16431567Ssef	if (m != NULL)
16531567Ssef		mbstat.m_wait++;
16631567Ssef	else
16731567Ssef		mbstat.m_drops++;
16831567Ssef	return (m);
16931582Ssef}
17031567Ssef
17131567Ssefvoid
17232275Scharnierm_reclaim()
17332275Scharnier{
17432275Scharnier	register struct domain *dp;
17532275Scharnier	register struct protosw *pr;
17632275Scharnier	int s = splimp();
17731567Ssef
17831567Ssef	for (dp = domains; dp; dp = dp->dom_next)
17931567Ssef		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
18031567Ssef			if (pr->pr_drain)
18131567Ssef				(*pr->pr_drain)();
18231567Ssef	splx(s);
18331567Ssef	mbstat.m_drain++;
18431567Ssef}
18531567Ssef
18631567Ssef/*
18731567Ssef * Space allocation routines.
18831567Ssef * These are also available as macros
18931567Ssef * for critical paths.
19031567Ssef */
19131567Ssefstruct mbuf *
19231567Ssefm_get(nowait, type)
19331567Ssef	int nowait, type;
19431567Ssef{
19531567Ssef	register struct mbuf *m;
19631567Ssef
19731567Ssef	MGET(m, nowait, type);
19831567Ssef	return (m);
19931567Ssef}
20031567Ssef
20131567Ssefstruct mbuf *
20231567Ssefm_gethdr(nowait, type)
20331567Ssef	int nowait, type;
20431567Ssef{
20531567Ssef	register struct mbuf *m;
20631567Ssef
20731567Ssef	MGETHDR(m, nowait, type);
20831567Ssef	return (m);
20931567Ssef}
21031567Ssef
21131567Ssefstruct mbuf *
21231567Ssefm_getclr(nowait, type)
21331567Ssef	int nowait, type;
21431567Ssef{
21531567Ssef	register struct mbuf *m;
21631567Ssef
21731567Ssef	MGET(m, nowait, type);
21832275Scharnier	if (m == 0)
21931567Ssef		return (0);
22031567Ssef	bzero(mtod(m, caddr_t), MLEN);
22131567Ssef	return (m);
22231567Ssef}
22331567Ssef
22431567Ssefstruct mbuf *
22531567Ssefm_free(m)
22631567Ssef	struct mbuf *m;
22731567Ssef{
22831567Ssef	register struct mbuf *n;
22931567Ssef
23031567Ssef	MFREE(m, n);
23131567Ssef	return (n);
23231567Ssef}
23331567Ssef
23431567Ssefvoid
23531567Ssefm_freem(m)
23631567Ssef	register struct mbuf *m;
23731567Ssef{
23831567Ssef	register struct mbuf *n;
23937453Sbde
24038520Scracauer	if (m == NULL)
24131567Ssef		return;
24231567Ssef	do {
24337453Sbde		MFREE(m, n);
24431567Ssef		m = n;
24531567Ssef	} while (m);
24631567Ssef}
24731567Ssef
24831567Ssef/*
24931567Ssef * Mbuffer utility routines.
25031567Ssef */
25131567Ssef
25231567Ssef/*
25331567Ssef * Lesser-used path for M_PREPEND:
25431691Ssef * allocate new mbuf to prepend to chain,
25532275Scharnier * copy junk along.
25631567Ssef */
25738520Scracauerstruct mbuf *
25838520Scracauerm_prepend(m, len, how)
25938520Scracauer	register struct mbuf *m;
26038520Scracauer	int len, how;
26138520Scracauer{
26238520Scracauer	struct mbuf *mn;
26331567Ssef
26431567Ssef	MGET(mn, how, m->m_type);
265	if (mn == (struct mbuf *)NULL) {
266		m_freem(m);
267		return ((struct mbuf *)NULL);
268	}
269	if (m->m_flags & M_PKTHDR) {
270		M_COPY_PKTHDR(mn, m);
271		m->m_flags &= ~M_PKTHDR;
272	}
273	mn->m_next = m;
274	m = mn;
275	if (len < MHLEN)
276		MH_ALIGN(m, len);
277	m->m_len = len;
278	return (m);
279}
280
281/*
282 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
283 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
284 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
285 */
286int MCFail;
287
288struct mbuf *
289m_copym(m, off0, len, wait)
290	register struct mbuf *m;
291	int off0, wait;
292	register int len;
293{
294	register struct mbuf *n, **np;
295	register int off = off0;
296	struct mbuf *top;
297	int copyhdr = 0;
298
299	if (off < 0 || len < 0)
300		panic("m_copym");
301	if (off == 0 && m->m_flags & M_PKTHDR)
302		copyhdr = 1;
303	while (off > 0) {
304		if (m == 0)
305			panic("m_copym");
306		if (off < m->m_len)
307			break;
308		off -= m->m_len;
309		m = m->m_next;
310	}
311	np = &top;
312	top = 0;
313	while (len > 0) {
314		if (m == 0) {
315			if (len != M_COPYALL)
316				panic("m_copym");
317			break;
318		}
319		MGET(n, wait, m->m_type);
320		*np = n;
321		if (n == 0)
322			goto nospace;
323		if (copyhdr) {
324			M_COPY_PKTHDR(n, m);
325			if (len == M_COPYALL)
326				n->m_pkthdr.len -= off0;
327			else
328				n->m_pkthdr.len = len;
329			copyhdr = 0;
330		}
331		n->m_len = min(len, m->m_len - off);
332		if (m->m_flags & M_EXT) {
333			n->m_data = m->m_data + off;
334			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
335			n->m_ext = m->m_ext;
336			n->m_flags |= M_EXT;
337		} else
338			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
339			    (unsigned)n->m_len);
340		if (len != M_COPYALL)
341			len -= n->m_len;
342		off = 0;
343		m = m->m_next;
344		np = &n->m_next;
345	}
346	if (top == 0)
347		MCFail++;
348	return (top);
349nospace:
350	m_freem(top);
351	MCFail++;
352	return (0);
353}
354
355/*
356 * Copy data from an mbuf chain starting "off" bytes from the beginning,
357 * continuing for "len" bytes, into the indicated buffer.
358 */
359void
360m_copydata(m, off, len, cp)
361	register struct mbuf *m;
362	register int off;
363	register int len;
364	caddr_t cp;
365{
366	register unsigned count;
367
368	if (off < 0 || len < 0)
369		panic("m_copydata");
370	while (off > 0) {
371		if (m == 0)
372			panic("m_copydata");
373		if (off < m->m_len)
374			break;
375		off -= m->m_len;
376		m = m->m_next;
377	}
378	while (len > 0) {
379		if (m == 0)
380			panic("m_copydata");
381		count = min(m->m_len - off, len);
382		bcopy(mtod(m, caddr_t) + off, cp, count);
383		len -= count;
384		cp += count;
385		off = 0;
386		m = m->m_next;
387	}
388}
389
390/*
391 * Concatenate mbuf chain n to m.
392 * Both chains must be of the same type (e.g. MT_DATA).
393 * Any m_pkthdr is not updated.
394 */
395void
396m_cat(m, n)
397	register struct mbuf *m, *n;
398{
399	while (m->m_next)
400		m = m->m_next;
401	while (n) {
402		if (m->m_flags & M_EXT ||
403		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
404			/* just join the two chains */
405			m->m_next = n;
406			return;
407		}
408		/* splat the data from one into the other */
409		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
410		    (u_int)n->m_len);
411		m->m_len += n->m_len;
412		n = m_free(n);
413	}
414}
415
416void
417m_adj(mp, req_len)
418	struct mbuf *mp;
419	int req_len;
420{
421	register int len = req_len;
422	register struct mbuf *m;
423	register count;
424
425	if ((m = mp) == NULL)
426		return;
427	if (len >= 0) {
428		/*
429		 * Trim from head.
430		 */
431		while (m != NULL && len > 0) {
432			if (m->m_len <= len) {
433				len -= m->m_len;
434				m->m_len = 0;
435				m = m->m_next;
436			} else {
437				m->m_len -= len;
438				m->m_data += len;
439				len = 0;
440			}
441		}
442		m = mp;
443		if (mp->m_flags & M_PKTHDR)
444			m->m_pkthdr.len -= (req_len - len);
445	} else {
446		/*
447		 * Trim from tail.  Scan the mbuf chain,
448		 * calculating its length and finding the last mbuf.
449		 * If the adjustment only affects this mbuf, then just
450		 * adjust and return.  Otherwise, rescan and truncate
451		 * after the remaining size.
452		 */
453		len = -len;
454		count = 0;
455		for (;;) {
456			count += m->m_len;
457			if (m->m_next == (struct mbuf *)0)
458				break;
459			m = m->m_next;
460		}
461		if (m->m_len >= len) {
462			m->m_len -= len;
463			if (mp->m_flags & M_PKTHDR)
464				mp->m_pkthdr.len -= len;
465			return;
466		}
467		count -= len;
468		if (count < 0)
469			count = 0;
470		/*
471		 * Correct length for chain is "count".
472		 * Find the mbuf with last data, adjust its length,
473		 * and toss data from remaining mbufs on chain.
474		 */
475		m = mp;
476		if (m->m_flags & M_PKTHDR)
477			m->m_pkthdr.len = count;
478		for (; m; m = m->m_next) {
479			if (m->m_len >= count) {
480				m->m_len = count;
481				break;
482			}
483			count -= m->m_len;
484		}
485		while (m->m_next)
486			(m = m->m_next) ->m_len = 0;
487	}
488}
489
490/*
491 * Rearange an mbuf chain so that len bytes are contiguous
492 * and in the data area of an mbuf (so that mtod and dtom
493 * will work for a structure of size len).  Returns the resulting
494 * mbuf chain on success, frees it and returns null on failure.
495 * If there is room, it will add up to max_protohdr-len extra bytes to the
496 * contiguous region in an attempt to avoid being called next time.
497 */
498int MPFail;
499
500struct mbuf *
501m_pullup(n, len)
502	register struct mbuf *n;
503	int len;
504{
505	register struct mbuf *m;
506	register int count;
507	int space;
508
509	/*
510	 * If first mbuf has no cluster, and has room for len bytes
511	 * without shifting current data, pullup into it,
512	 * otherwise allocate a new mbuf to prepend to the chain.
513	 */
514	if ((n->m_flags & M_EXT) == 0 &&
515	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
516		if (n->m_len >= len)
517			return (n);
518		m = n;
519		n = n->m_next;
520		len -= m->m_len;
521	} else {
522		if (len > MHLEN)
523			goto bad;
524		MGET(m, M_DONTWAIT, n->m_type);
525		if (m == 0)
526			goto bad;
527		m->m_len = 0;
528		if (n->m_flags & M_PKTHDR) {
529			M_COPY_PKTHDR(m, n);
530			n->m_flags &= ~M_PKTHDR;
531		}
532	}
533	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
534	do {
535		count = min(min(max(len, max_protohdr), space), n->m_len);
536		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
537		  (unsigned)count);
538		len -= count;
539		m->m_len += count;
540		n->m_len -= count;
541		space -= count;
542		if (n->m_len)
543			n->m_data += count;
544		else
545			n = m_free(n);
546	} while (len > 0 && n);
547	if (len > 0) {
548		(void) m_free(m);
549		goto bad;
550	}
551	m->m_next = n;
552	return (m);
553bad:
554	m_freem(n);
555	MPFail++;
556	return (0);
557}
558
559/*
560 * Partition an mbuf chain in two pieces, returning the tail --
561 * all but the first len0 bytes.  In case of failure, it returns NULL and
562 * attempts to restore the chain to its original state.
563 */
564struct mbuf *
565m_split(m0, len0, wait)
566	register struct mbuf *m0;
567	int len0, wait;
568{
569	register struct mbuf *m, *n;
570	unsigned len = len0, remain;
571
572	for (m = m0; m && len > m->m_len; m = m->m_next)
573		len -= m->m_len;
574	if (m == 0)
575		return (0);
576	remain = m->m_len - len;
577	if (m0->m_flags & M_PKTHDR) {
578		MGETHDR(n, wait, m0->m_type);
579		if (n == 0)
580			return (0);
581		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
582		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
583		m0->m_pkthdr.len = len0;
584		if (m->m_flags & M_EXT)
585			goto extpacket;
586		if (remain > MHLEN) {
587			/* m can't be the lead packet */
588			MH_ALIGN(n, 0);
589			n->m_next = m_split(m, len, wait);
590			if (n->m_next == 0) {
591				(void) m_free(n);
592				return (0);
593			} else
594				return (n);
595		} else
596			MH_ALIGN(n, remain);
597	} else if (remain == 0) {
598		n = m->m_next;
599		m->m_next = 0;
600		return (n);
601	} else {
602		MGET(n, wait, m->m_type);
603		if (n == 0)
604			return (0);
605		M_ALIGN(n, remain);
606	}
607extpacket:
608	if (m->m_flags & M_EXT) {
609		n->m_flags |= M_EXT;
610		n->m_ext = m->m_ext;
611		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
612		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
613		n->m_data = m->m_data + len;
614	} else {
615		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
616	}
617	n->m_len = remain;
618	m->m_len = len;
619	n->m_next = m->m_next;
620	m->m_next = 0;
621	return (n);
622}
623/*
624 * Routine to copy from device local memory into mbufs.
625 */
626struct mbuf *
627m_devget(buf, totlen, off0, ifp, copy)
628	char *buf;
629	int totlen, off0;
630	struct ifnet *ifp;
631	void (*copy) __P((char *from, caddr_t to, u_int len));
632{
633	register struct mbuf *m;
634	struct mbuf *top = 0, **mp = &top;
635	register int off = off0, len;
636	register char *cp;
637	char *epkt;
638
639	cp = buf;
640	epkt = cp + totlen;
641	if (off) {
642		cp += off + 2 * sizeof(u_short);
643		totlen -= 2 * sizeof(u_short);
644	}
645	MGETHDR(m, M_DONTWAIT, MT_DATA);
646	if (m == 0)
647		return (0);
648	m->m_pkthdr.rcvif = ifp;
649	m->m_pkthdr.len = totlen;
650	m->m_len = MHLEN;
651
652	while (totlen > 0) {
653		if (top) {
654			MGET(m, M_DONTWAIT, MT_DATA);
655			if (m == 0) {
656				m_freem(top);
657				return (0);
658			}
659			m->m_len = MLEN;
660		}
661		len = min(totlen, epkt - cp);
662		if (len >= MINCLSIZE) {
663			MCLGET(m, M_DONTWAIT);
664			if (m->m_flags & M_EXT)
665				m->m_len = len = min(len, MCLBYTES);
666			else
667				len = m->m_len;
668		} else {
669			/*
670			 * Place initial small packet/header at end of mbuf.
671			 */
672			if (len < m->m_len) {
673				if (top == 0 && len + max_linkhdr <= m->m_len)
674					m->m_data += max_linkhdr;
675				m->m_len = len;
676			} else
677				len = m->m_len;
678		}
679		if (copy)
680			copy(cp, mtod(m, caddr_t), (unsigned)len);
681		else
682			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
683		cp += len;
684		*mp = m;
685		mp = &m->m_next;
686		totlen -= len;
687		if (cp == epkt)
688			cp = buf;
689	}
690	return (top);
691}
692
693/*
694 * Copy data from a buffer back into the indicated mbuf chain,
695 * starting "off" bytes from the beginning, extending the mbuf
696 * chain if necessary.
697 */
698void
699m_copyback(m0, off, len, cp)
700	struct	mbuf *m0;
701	register int off;
702	register int len;
703	caddr_t cp;
704{
705	register int mlen;
706	register struct mbuf *m = m0, *n;
707	int totlen = 0;
708
709	if (m0 == 0)
710		return;
711	while (off > (mlen = m->m_len)) {
712		off -= mlen;
713		totlen += mlen;
714		if (m->m_next == 0) {
715			n = m_getclr(M_DONTWAIT, m->m_type);
716			if (n == 0)
717				goto out;
718			n->m_len = min(MLEN, len + off);
719			m->m_next = n;
720		}
721		m = m->m_next;
722	}
723	while (len > 0) {
724		mlen = min (m->m_len - off, len);
725		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
726		cp += mlen;
727		len -= mlen;
728		mlen += off;
729		off = 0;
730		totlen += mlen;
731		if (len == 0)
732			break;
733		if (m->m_next == 0) {
734			n = m_get(M_DONTWAIT, m->m_type);
735			if (n == 0)
736				break;
737			n->m_len = min(MLEN, len);
738			m->m_next = n;
739		}
740		m = m->m_next;
741	}
742out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
743		m->m_pkthdr.len = totlen;
744}
745