uipc_mbuf.c revision 78592
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 78592 2001-06-22 06:35:32Z bmilekic $
35 */
36
37#include "opt_param.h"
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/lock.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/sysctl.h>
45#include <sys/domain.h>
46#include <sys/protosw.h>
47
48int	max_linkhdr;
49int	max_protohdr;
50int	max_hdr;
51int	max_datalen;
52
53/*
54 * sysctl(8) exported objects
55 */
56SYSCTL_DECL(_kern_ipc);
57SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
58	   &max_linkhdr, 0, "");
59SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
60	   &max_protohdr, 0, "");
61SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
62SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
63	   &max_datalen, 0, "");
64
65/*
66 * struct mbuf *
67 * m_getm(m, len, how, type)
68 *
69 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
70 * best) and return a pointer to the top of the allocated chain. If m is
71 * non-null, then we assume that it is a single mbuf or an mbuf chain to
72 * which we want len bytes worth of mbufs and/or clusters attached, and so
73 * if we succeed in allocating it, we will just return a pointer to m.
74 *
75 * If we happen to fail at any point during the allocation, we will free
76 * up everything we have already allocated and return NULL.
77 *
78 */
79struct mbuf *
80m_getm(struct mbuf *m, int len, int how, int type)
81{
82	struct mbuf *top, *tail, *mp, *mtail = NULL;
83
84	KASSERT(len >= 0, ("len is < 0 in m_getm"));
85
86	MGET(mp, how, type);
87	if (mp == NULL)
88		return (NULL);
89	else if (len > MINCLSIZE) {
90		MCLGET(mp, how);
91		if ((mp->m_flags & M_EXT) == 0) {
92			m_free(mp);
93			return (NULL);
94		}
95	}
96	mp->m_len = 0;
97	len -= M_TRAILINGSPACE(mp);
98
99	if (m != NULL)
100		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
101	else
102		m = mp;
103
104	top = tail = mp;
105	while (len > 0) {
106		MGET(mp, how, type);
107		if (mp == NULL)
108			goto failed;
109
110		tail->m_next = mp;
111		tail = mp;
112		if (len > MINCLSIZE) {
113			MCLGET(mp, how);
114			if ((mp->m_flags & M_EXT) == 0)
115				goto failed;
116		}
117
118		mp->m_len = 0;
119		len -= M_TRAILINGSPACE(mp);
120	}
121
122	if (mtail != NULL)
123		mtail->m_next = top;
124	return (m);
125
126failed:
127	m_freem(top);
128	return (NULL);
129}
130
131void
132m_freem(struct mbuf *m)
133{
134	struct mbuf *n;
135
136	if (m == NULL)
137		return;
138	do {
139		MFREE(m, n);
140		m = n;
141	} while (m);
142}
143
144/*
145 * Lesser-used path for M_PREPEND:
146 * allocate new mbuf to prepend to chain,
147 * copy junk along.
148 */
149struct mbuf *
150m_prepend(struct mbuf *m, int len, int how)
151{
152	struct mbuf *mn;
153
154	MGET(mn, how, m->m_type);
155	if (mn == NULL) {
156		m_freem(m);
157		return (NULL);
158	}
159	if (m->m_flags & M_PKTHDR) {
160		M_COPY_PKTHDR(mn, m);
161		m->m_flags &= ~M_PKTHDR;
162	}
163	mn->m_next = m;
164	m = mn;
165	if (len < MHLEN)
166		MH_ALIGN(m, len);
167	m->m_len = len;
168	return (m);
169}
170
171/*
172 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
173 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
174 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
175 * Note that the copy is read-only, because clusters are not copied,
176 * only their reference counts are incremented.
177 */
178struct mbuf *
179m_copym(struct mbuf *m, int off0, int len, int wait)
180{
181	struct mbuf *n, **np;
182	int off = off0;
183	struct mbuf *top;
184	int copyhdr = 0;
185
186	KASSERT(off >= 0, ("m_copym, negative off %d", off));
187	KASSERT(len >= 0, ("m_copym, negative len %d", len));
188	if (off == 0 && m->m_flags & M_PKTHDR)
189		copyhdr = 1;
190	while (off > 0) {
191		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
192		if (off < m->m_len)
193			break;
194		off -= m->m_len;
195		m = m->m_next;
196	}
197	np = &top;
198	top = 0;
199	while (len > 0) {
200		if (m == NULL) {
201			KASSERT(len == M_COPYALL,
202			    ("m_copym, length > size of mbuf chain"));
203			break;
204		}
205		MGET(n, wait, m->m_type);
206		*np = n;
207		if (n == NULL)
208			goto nospace;
209		if (copyhdr) {
210			M_COPY_PKTHDR(n, m);
211			if (len == M_COPYALL)
212				n->m_pkthdr.len -= off0;
213			else
214				n->m_pkthdr.len = len;
215			copyhdr = 0;
216		}
217		n->m_len = min(len, m->m_len - off);
218		if (m->m_flags & M_EXT) {
219			n->m_data = m->m_data + off;
220			n->m_ext = m->m_ext;
221			n->m_flags |= M_EXT;
222			MEXT_ADD_REF(m);
223		} else
224			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
225			    (unsigned)n->m_len);
226		if (len != M_COPYALL)
227			len -= n->m_len;
228		off = 0;
229		m = m->m_next;
230		np = &n->m_next;
231	}
232	if (top == NULL)
233		mbstat.m_mcfail++;	/* XXX: No consistency. */
234
235	return (top);
236nospace:
237	m_freem(top);
238	mbstat.m_mcfail++;	/* XXX: No consistency. */
239	return (NULL);
240}
241
242/*
243 * Copy an entire packet, including header (which must be present).
244 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
245 * Note that the copy is read-only, because clusters are not copied,
246 * only their reference counts are incremented.
247 * Preserve alignment of the first mbuf so if the creator has left
248 * some room at the beginning (e.g. for inserting protocol headers)
249 * the copies still have the room available.
250 */
251struct mbuf *
252m_copypacket(struct mbuf *m, int how)
253{
254	struct mbuf *top, *n, *o;
255
256	MGET(n, how, m->m_type);
257	top = n;
258	if (n == NULL)
259		goto nospace;
260
261	M_COPY_PKTHDR(n, m);
262	n->m_len = m->m_len;
263	if (m->m_flags & M_EXT) {
264		n->m_data = m->m_data;
265		n->m_ext = m->m_ext;
266		n->m_flags |= M_EXT;
267		MEXT_ADD_REF(m);
268	} else {
269		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
270		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
271	}
272
273	m = m->m_next;
274	while (m) {
275		MGET(o, how, m->m_type);
276		if (o == NULL)
277			goto nospace;
278
279		n->m_next = o;
280		n = n->m_next;
281
282		n->m_len = m->m_len;
283		if (m->m_flags & M_EXT) {
284			n->m_data = m->m_data;
285			n->m_ext = m->m_ext;
286			n->m_flags |= M_EXT;
287			MEXT_ADD_REF(m);
288		} else {
289			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
290		}
291
292		m = m->m_next;
293	}
294	return top;
295nospace:
296	m_freem(top);
297	mbstat.m_mcfail++;	/* XXX: No consistency. */
298	return (NULL);
299}
300
301/*
302 * Copy data from an mbuf chain starting "off" bytes from the beginning,
303 * continuing for "len" bytes, into the indicated buffer.
304 */
305void
306m_copydata(struct mbuf *m, int off, int len, caddr_t cp)
307{
308	unsigned count;
309
310	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
311	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
312	while (off > 0) {
313		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
314		if (off < m->m_len)
315			break;
316		off -= m->m_len;
317		m = m->m_next;
318	}
319	while (len > 0) {
320		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
321		count = min(m->m_len - off, len);
322		bcopy(mtod(m, caddr_t) + off, cp, count);
323		len -= count;
324		cp += count;
325		off = 0;
326		m = m->m_next;
327	}
328}
329
330/*
331 * Copy a packet header mbuf chain into a completely new chain, including
332 * copying any mbuf clusters.  Use this instead of m_copypacket() when
333 * you need a writable copy of an mbuf chain.
334 */
335struct mbuf *
336m_dup(struct mbuf *m, int how)
337{
338	struct mbuf **p, *top = NULL;
339	int remain, moff, nsize;
340
341	/* Sanity check */
342	if (m == NULL)
343		return (NULL);
344	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
345
346	/* While there's more data, get a new mbuf, tack it on, and fill it */
347	remain = m->m_pkthdr.len;
348	moff = 0;
349	p = &top;
350	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
351		struct mbuf *n;
352
353		/* Get the next new mbuf */
354		MGET(n, how, m->m_type);
355		if (n == NULL)
356			goto nospace;
357		if (top == NULL) {		/* first one, must be PKTHDR */
358			M_COPY_PKTHDR(n, m);
359			nsize = MHLEN;
360		} else				/* not the first one */
361			nsize = MLEN;
362		if (remain >= MINCLSIZE) {
363			MCLGET(n, how);
364			if ((n->m_flags & M_EXT) == 0) {
365				(void)m_free(n);
366				goto nospace;
367			}
368			nsize = MCLBYTES;
369		}
370		n->m_len = 0;
371
372		/* Link it into the new chain */
373		*p = n;
374		p = &n->m_next;
375
376		/* Copy data from original mbuf(s) into new mbuf */
377		while (n->m_len < nsize && m != NULL) {
378			int chunk = min(nsize - n->m_len, m->m_len - moff);
379
380			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
381			moff += chunk;
382			n->m_len += chunk;
383			remain -= chunk;
384			if (moff == m->m_len) {
385				m = m->m_next;
386				moff = 0;
387			}
388		}
389
390		/* Check correct total mbuf length */
391		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
392		    	("%s: bogus m_pkthdr.len", __FUNCTION__));
393	}
394	return (top);
395
396nospace:
397	m_freem(top);
398	mbstat.m_mcfail++;	/* XXX: No consistency. */
399	return (NULL);
400}
401
402/*
403 * Concatenate mbuf chain n to m.
404 * Both chains must be of the same type (e.g. MT_DATA).
405 * Any m_pkthdr is not updated.
406 */
407void
408m_cat(struct mbuf *m, struct mbuf *n)
409{
410	while (m->m_next)
411		m = m->m_next;
412	while (n) {
413		if (m->m_flags & M_EXT ||
414		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
415			/* just join the two chains */
416			m->m_next = n;
417			return;
418		}
419		/* splat the data from one into the other */
420		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
421		    (u_int)n->m_len);
422		m->m_len += n->m_len;
423		n = m_free(n);
424	}
425}
426
427void
428m_adj(struct mbuf *mp, int req_len)
429{
430	int len = req_len;
431	struct mbuf *m;
432	int count;
433
434	if ((m = mp) == NULL)
435		return;
436	if (len >= 0) {
437		/*
438		 * Trim from head.
439		 */
440		while (m != NULL && len > 0) {
441			if (m->m_len <= len) {
442				len -= m->m_len;
443				m->m_len = 0;
444				m = m->m_next;
445			} else {
446				m->m_len -= len;
447				m->m_data += len;
448				len = 0;
449			}
450		}
451		m = mp;
452		if (mp->m_flags & M_PKTHDR)
453			m->m_pkthdr.len -= (req_len - len);
454	} else {
455		/*
456		 * Trim from tail.  Scan the mbuf chain,
457		 * calculating its length and finding the last mbuf.
458		 * If the adjustment only affects this mbuf, then just
459		 * adjust and return.  Otherwise, rescan and truncate
460		 * after the remaining size.
461		 */
462		len = -len;
463		count = 0;
464		for (;;) {
465			count += m->m_len;
466			if (m->m_next == (struct mbuf *)0)
467				break;
468			m = m->m_next;
469		}
470		if (m->m_len >= len) {
471			m->m_len -= len;
472			if (mp->m_flags & M_PKTHDR)
473				mp->m_pkthdr.len -= len;
474			return;
475		}
476		count -= len;
477		if (count < 0)
478			count = 0;
479		/*
480		 * Correct length for chain is "count".
481		 * Find the mbuf with last data, adjust its length,
482		 * and toss data from remaining mbufs on chain.
483		 */
484		m = mp;
485		if (m->m_flags & M_PKTHDR)
486			m->m_pkthdr.len = count;
487		for (; m; m = m->m_next) {
488			if (m->m_len >= count) {
489				m->m_len = count;
490				break;
491			}
492			count -= m->m_len;
493		}
494		while (m->m_next)
495			(m = m->m_next) ->m_len = 0;
496	}
497}
498
499/*
500 * Rearange an mbuf chain so that len bytes are contiguous
501 * and in the data area of an mbuf (so that mtod and dtom
502 * will work for a structure of size len).  Returns the resulting
503 * mbuf chain on success, frees it and returns null on failure.
504 * If there is room, it will add up to max_protohdr-len extra bytes to the
505 * contiguous region in an attempt to avoid being called next time.
506 */
507struct mbuf *
508m_pullup(struct mbuf *n, int len)
509{
510	struct mbuf *m;
511	int count;
512	int space;
513
514	/*
515	 * If first mbuf has no cluster, and has room for len bytes
516	 * without shifting current data, pullup into it,
517	 * otherwise allocate a new mbuf to prepend to the chain.
518	 */
519	if ((n->m_flags & M_EXT) == 0 &&
520	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
521		if (n->m_len >= len)
522			return (n);
523		m = n;
524		n = n->m_next;
525		len -= m->m_len;
526	} else {
527		if (len > MHLEN)
528			goto bad;
529		MGET(m, M_DONTWAIT, n->m_type);
530		if (m == NULL)
531			goto bad;
532		m->m_len = 0;
533		if (n->m_flags & M_PKTHDR) {
534			M_COPY_PKTHDR(m, n);
535			n->m_flags &= ~M_PKTHDR;
536		}
537	}
538	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
539	do {
540		count = min(min(max(len, max_protohdr), space), n->m_len);
541		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
542		  (unsigned)count);
543		len -= count;
544		m->m_len += count;
545		n->m_len -= count;
546		space -= count;
547		if (n->m_len)
548			n->m_data += count;
549		else
550			n = m_free(n);
551	} while (len > 0 && n);
552	if (len > 0) {
553		(void) m_free(m);
554		goto bad;
555	}
556	m->m_next = n;
557	return (m);
558bad:
559	m_freem(n);
560	mbstat.m_mpfail++;	/* XXX: No consistency. */
561	return (NULL);
562}
563
564/*
565 * Partition an mbuf chain in two pieces, returning the tail --
566 * all but the first len0 bytes.  In case of failure, it returns NULL and
567 * attempts to restore the chain to its original state.
568 */
569struct mbuf *
570m_split(struct mbuf *m0, int len0, int wait)
571{
572	struct mbuf *m, *n;
573	unsigned len = len0, remain;
574
575	for (m = m0; m && len > m->m_len; m = m->m_next)
576		len -= m->m_len;
577	if (m == NULL)
578		return (NULL);
579	remain = m->m_len - len;
580	if (m0->m_flags & M_PKTHDR) {
581		MGETHDR(n, wait, m0->m_type);
582		if (n == NULL)
583			return (NULL);
584		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
585		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
586		m0->m_pkthdr.len = len0;
587		if (m->m_flags & M_EXT)
588			goto extpacket;
589		if (remain > MHLEN) {
590			/* m can't be the lead packet */
591			MH_ALIGN(n, 0);
592			n->m_next = m_split(m, len, wait);
593			if (n->m_next == NULL) {
594				(void) m_free(n);
595				return (NULL);
596			} else
597				return (n);
598		} else
599			MH_ALIGN(n, remain);
600	} else if (remain == 0) {
601		n = m->m_next;
602		m->m_next = NULL;
603		return (n);
604	} else {
605		MGET(n, wait, m->m_type);
606		if (n == NULL)
607			return (NULL);
608		M_ALIGN(n, remain);
609	}
610extpacket:
611	if (m->m_flags & M_EXT) {
612		n->m_flags |= M_EXT;
613		n->m_ext = m->m_ext;
614		MEXT_ADD_REF(m);
615		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
616		n->m_data = m->m_data + len;
617	} else {
618		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
619	}
620	n->m_len = remain;
621	m->m_len = len;
622	n->m_next = m->m_next;
623	m->m_next = NULL;
624	return (n);
625}
626/*
627 * Routine to copy from device local memory into mbufs.
628 * Note that `off' argument is offset into first mbuf of target chain from
629 * which to begin copying the data to.
630 */
631struct mbuf *
632m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
633	 void (*copy)(char *from, caddr_t to, u_int len))
634{
635	struct mbuf *m;
636	struct mbuf *top = 0, **mp = &top;
637	int len;
638
639	if (off < 0 || off > MHLEN)
640		return (NULL);
641
642	MGETHDR(m, M_DONTWAIT, MT_DATA);
643	if (m == NULL)
644		return (NULL);
645	m->m_pkthdr.rcvif = ifp;
646	m->m_pkthdr.len = totlen;
647	len = MHLEN;
648
649	while (totlen > 0) {
650		if (top) {
651			MGET(m, M_DONTWAIT, MT_DATA);
652			if (m == NULL) {
653				m_freem(top);
654				return (NULL);
655			}
656			len = MLEN;
657		}
658		if (totlen + off >= MINCLSIZE) {
659			MCLGET(m, M_DONTWAIT);
660			if (m->m_flags & M_EXT)
661				len = MCLBYTES;
662		} else {
663			/*
664			 * Place initial small packet/header at end of mbuf.
665			 */
666			if (top == NULL && totlen + off + max_linkhdr <= len) {
667				m->m_data += max_linkhdr;
668				len -= max_linkhdr;
669			}
670		}
671		if (off) {
672			m->m_data += off;
673			len -= off;
674			off = 0;
675		}
676		m->m_len = len = min(totlen, len);
677		if (copy)
678			copy(buf, mtod(m, caddr_t), (unsigned)len);
679		else
680			bcopy(buf, mtod(m, caddr_t), (unsigned)len);
681		buf += len;
682		*mp = m;
683		mp = &m->m_next;
684		totlen -= len;
685	}
686	return (top);
687}
688
689/*
690 * Copy data from a buffer back into the indicated mbuf chain,
691 * starting "off" bytes from the beginning, extending the mbuf
692 * chain if necessary.
693 */
694void
695m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
696{
697	int mlen;
698	struct mbuf *m = m0, *n;
699	int totlen = 0;
700
701	if (m0 == NULL)
702		return;
703	while (off > (mlen = m->m_len)) {
704		off -= mlen;
705		totlen += mlen;
706		if (m->m_next == NULL) {
707			n = m_get_clrd(M_DONTWAIT, m->m_type);
708			if (n == NULL)
709				goto out;
710			n->m_len = min(MLEN, len + off);
711			m->m_next = n;
712		}
713		m = m->m_next;
714	}
715	while (len > 0) {
716		mlen = min (m->m_len - off, len);
717		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
718		cp += mlen;
719		len -= mlen;
720		mlen += off;
721		off = 0;
722		totlen += mlen;
723		if (len == 0)
724			break;
725		if (m->m_next == NULL) {
726			n = m_get(M_DONTWAIT, m->m_type);
727			if (n == NULL)
728				break;
729			n->m_len = min(MLEN, len);
730			m->m_next = n;
731		}
732		m = m->m_next;
733	}
734out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
735		m->m_pkthdr.len = totlen;
736}
737
738void
739m_print(const struct mbuf *m)
740{
741	int len;
742	const struct mbuf *m2;
743
744	len = m->m_pkthdr.len;
745	m2 = m;
746	while (len) {
747		printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
748		len -= m2->m_len;
749		m2 = m2->m_next;
750	}
751	return;
752}
753