Deleted Added
full compact
uipc_mbuf.c (139804) uipc_mbuf.c (141616)
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
30 */
31
32#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 139804 2005-01-06 23:35:40Z imp $");
33__FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 141616 2005-02-10 12:02:37Z phk $");
34
35#include "opt_mac.h"
36#include "opt_param.h"
37#include "opt_mbuf_stress_test.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/mac.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/sysctl.h>
48#include <sys/domain.h>
49#include <sys/protosw.h>
50#include <sys/uio.h>
51
52int max_linkhdr;
53int max_protohdr;
54int max_hdr;
55int max_datalen;
56#ifdef MBUF_STRESS_TEST
57int m_defragpackets;
58int m_defragbytes;
59int m_defraguseless;
60int m_defragfailure;
61int m_defragrandomfailures;
62#endif
63
64/*
65 * sysctl(8) exported objects
66 */
67SYSCTL_DECL(_kern_ipc);
68SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
69 &max_linkhdr, 0, "");
70SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
71 &max_protohdr, 0, "");
72SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
73SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
74 &max_datalen, 0, "");
75#ifdef MBUF_STRESS_TEST
76SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
77 &m_defragpackets, 0, "");
78SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
79 &m_defragbytes, 0, "");
80SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
81 &m_defraguseless, 0, "");
82SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
83 &m_defragfailure, 0, "");
84SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
85 &m_defragrandomfailures, 0, "");
86#endif
87
88/*
89 * Malloc-type for external ext_buf ref counts.
90 */
34
35#include "opt_mac.h"
36#include "opt_param.h"
37#include "opt_mbuf_stress_test.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/mac.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/sysctl.h>
48#include <sys/domain.h>
49#include <sys/protosw.h>
50#include <sys/uio.h>
51
52int max_linkhdr;
53int max_protohdr;
54int max_hdr;
55int max_datalen;
56#ifdef MBUF_STRESS_TEST
57int m_defragpackets;
58int m_defragbytes;
59int m_defraguseless;
60int m_defragfailure;
61int m_defragrandomfailures;
62#endif
63
64/*
65 * sysctl(8) exported objects
66 */
67SYSCTL_DECL(_kern_ipc);
68SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
69 &max_linkhdr, 0, "");
70SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
71 &max_protohdr, 0, "");
72SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
73SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
74 &max_datalen, 0, "");
75#ifdef MBUF_STRESS_TEST
76SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
77 &m_defragpackets, 0, "");
78SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
79 &m_defragbytes, 0, "");
80SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
81 &m_defraguseless, 0, "");
82SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
83 &m_defragfailure, 0, "");
84SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
85 &m_defragrandomfailures, 0, "");
86#endif
87
88/*
89 * Malloc-type for external ext_buf ref counts.
90 */
91MALLOC_DEFINE(M_MBUF, "mbextcnt", "mbuf external ref counts");
91static MALLOC_DEFINE(M_MBUF, "mbextcnt", "mbuf external ref counts");
92
93/*
94 * Allocate a given length worth of mbufs and/or clusters (whatever fits
95 * best) and return a pointer to the top of the allocated chain. If an
96 * existing mbuf chain is provided, then we will append the new chain
97 * to the existing one but still return the top of the newly allocated
98 * chain.
99 */
100struct mbuf *
101m_getm(struct mbuf *m, int len, int how, short type)
102{
103 struct mbuf *mb, *top, *cur, *mtail;
104 int num, rem;
105 int i;
106
107 KASSERT(len >= 0, ("m_getm(): len is < 0"));
108
109 /* If m != NULL, we will append to the end of that chain. */
110 if (m != NULL)
111 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
112 else
113 mtail = NULL;
114
115 /*
116 * Calculate how many mbufs+clusters ("packets") we need and how much
117 * leftover there is after that and allocate the first mbuf+cluster
118 * if required.
119 */
120 num = len / MCLBYTES;
121 rem = len % MCLBYTES;
122 top = cur = NULL;
123 if (num > 0) {
124 if ((top = cur = m_getcl(how, type, 0)) == NULL)
125 goto failed;
126 top->m_len = 0;
127 }
128 num--;
129
130 for (i = 0; i < num; i++) {
131 mb = m_getcl(how, type, 0);
132 if (mb == NULL)
133 goto failed;
134 mb->m_len = 0;
135 cur = (cur->m_next = mb);
136 }
137 if (rem > 0) {
138 mb = (rem > MINCLSIZE) ?
139 m_getcl(how, type, 0) : m_get(how, type);
140 if (mb == NULL)
141 goto failed;
142 mb->m_len = 0;
143 if (cur == NULL)
144 top = mb;
145 else
146 cur->m_next = mb;
147 }
148
149 if (mtail != NULL)
150 mtail->m_next = top;
151 return top;
152failed:
153 if (top != NULL)
154 m_freem(top);
155 return NULL;
156}
157
158/*
159 * Free an entire chain of mbufs and associated external buffers, if
160 * applicable.
161 */
162void
163m_freem(struct mbuf *mb)
164{
165
166 while (mb != NULL)
167 mb = m_free(mb);
168}
169
170/*-
171 * Configure a provided mbuf to refer to the provided external storage
172 * buffer and setup a reference count for said buffer. If the setting
173 * up of the reference count fails, the M_EXT bit will not be set. If
174 * successfull, the M_EXT bit is set in the mbuf's flags.
175 *
176 * Arguments:
177 * mb The existing mbuf to which to attach the provided buffer.
178 * buf The address of the provided external storage buffer.
179 * size The size of the provided buffer.
180 * freef A pointer to a routine that is responsible for freeing the
181 * provided external storage buffer.
182 * args A pointer to an argument structure (of any type) to be passed
183 * to the provided freef routine (may be NULL).
184 * flags Any other flags to be passed to the provided mbuf.
185 * type The type that the external storage buffer should be
186 * labeled with.
187 *
188 * Returns:
189 * Nothing.
190 */
191void
192m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
193 void (*freef)(void *, void *), void *args, int flags, int type)
194{
195 u_int *ref_cnt = NULL;
196
197 /* XXX Shouldn't be adding EXT_CLUSTER with this API */
198 if (type == EXT_CLUSTER)
199 ref_cnt = (u_int *)uma_find_refcnt(zone_clust,
200 mb->m_ext.ext_buf);
201 else if (type == EXT_EXTREF)
202 ref_cnt = mb->m_ext.ref_cnt;
203 mb->m_ext.ref_cnt = (ref_cnt == NULL) ?
204 malloc(sizeof(u_int), M_MBUF, M_NOWAIT) : (u_int *)ref_cnt;
205 if (mb->m_ext.ref_cnt != NULL) {
206 *(mb->m_ext.ref_cnt) = 1;
207 mb->m_flags |= (M_EXT | flags);
208 mb->m_ext.ext_buf = buf;
209 mb->m_data = mb->m_ext.ext_buf;
210 mb->m_ext.ext_size = size;
211 mb->m_ext.ext_free = freef;
212 mb->m_ext.ext_args = args;
213 mb->m_ext.ext_type = type;
214 }
215}
216
217/*
218 * Non-directly-exported function to clean up after mbufs with M_EXT
219 * storage attached to them if the reference count hits 0.
220 */
221void
222mb_free_ext(struct mbuf *m)
223{
224 u_int cnt;
225
226 /*
227 * This is tricky. We need to make sure to decrement the
228 * refcount in a safe way but to also clean up if we're the
229 * last reference. This method seems to do it without race.
230 */
231 do {
232 cnt = *(m->m_ext.ref_cnt);
233 if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) {
234 if (cnt == 1) {
235 /*
236 * Do the free, should be safe.
237 */
238 if (m->m_ext.ext_type == EXT_PACKET) {
239 uma_zfree(zone_pack, m);
240 return;
241 } else if (m->m_ext.ext_type == EXT_CLUSTER) {
242 uma_zfree(zone_clust, m->m_ext.ext_buf);
243 m->m_ext.ext_buf = NULL;
244 } else {
245 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
246 m->m_ext.ext_args);
247 if (m->m_ext.ext_type != EXT_EXTREF)
248 free(m->m_ext.ref_cnt, M_MBUF);
249 m->m_ext.ext_buf = NULL;
250 }
251 }
252 /* Decrement (and potentially free) done, safely. */
253 break;
254 }
255 } while (1);
256 uma_zfree(zone_mbuf, m);
257}
258
259/*
260 * "Move" mbuf pkthdr from "from" to "to".
261 * "from" must have M_PKTHDR set, and "to" must be empty.
262 */
263void
264m_move_pkthdr(struct mbuf *to, struct mbuf *from)
265{
266
267#if 0
268 /* see below for why these are not enabled */
269 M_ASSERTPKTHDR(to);
270 /* Note: with MAC, this may not be a good assertion. */
271 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
272 ("m_move_pkthdr: to has tags"));
273#endif
274 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
275#ifdef MAC
276 /*
277 * XXXMAC: It could be this should also occur for non-MAC?
278 */
279 if (to->m_flags & M_PKTHDR)
280 m_tag_delete_chain(to, NULL);
281#endif
282 to->m_flags = from->m_flags & M_COPYFLAGS;
283 to->m_data = to->m_pktdat;
284 to->m_pkthdr = from->m_pkthdr; /* especially tags */
285 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
286 from->m_flags &= ~M_PKTHDR;
287}
288
289/*
290 * Duplicate "from"'s mbuf pkthdr in "to".
291 * "from" must have M_PKTHDR set, and "to" must be empty.
292 * In particular, this does a deep copy of the packet tags.
293 */
294int
295m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
296{
297
298#if 0
299 /*
300 * The mbuf allocator only initializes the pkthdr
301 * when the mbuf is allocated with MGETHDR. Many users
302 * (e.g. m_copy*, m_prepend) use MGET and then
303 * smash the pkthdr as needed causing these
304 * assertions to trip. For now just disable them.
305 */
306 M_ASSERTPKTHDR(to);
307 /* Note: with MAC, this may not be a good assertion. */
308 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
309#endif
310 MBUF_CHECKSLEEP(how);
311#ifdef MAC
312 if (to->m_flags & M_PKTHDR)
313 m_tag_delete_chain(to, NULL);
314#endif
315 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
316 if ((to->m_flags & M_EXT) == 0)
317 to->m_data = to->m_pktdat;
318 to->m_pkthdr = from->m_pkthdr;
319 SLIST_INIT(&to->m_pkthdr.tags);
320 return (m_tag_copy_chain(to, from, MBTOM(how)));
321}
322
323/*
324 * Lesser-used path for M_PREPEND:
325 * allocate new mbuf to prepend to chain,
326 * copy junk along.
327 */
328struct mbuf *
329m_prepend(struct mbuf *m, int len, int how)
330{
331 struct mbuf *mn;
332
333 if (m->m_flags & M_PKTHDR)
334 MGETHDR(mn, how, m->m_type);
335 else
336 MGET(mn, how, m->m_type);
337 if (mn == NULL) {
338 m_freem(m);
339 return (NULL);
340 }
341 if (m->m_flags & M_PKTHDR)
342 M_MOVE_PKTHDR(mn, m);
343 mn->m_next = m;
344 m = mn;
345 if (len < MHLEN)
346 MH_ALIGN(m, len);
347 m->m_len = len;
348 return (m);
349}
350
351/*
352 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
353 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
354 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
355 * Note that the copy is read-only, because clusters are not copied,
356 * only their reference counts are incremented.
357 */
358struct mbuf *
359m_copym(struct mbuf *m, int off0, int len, int wait)
360{
361 struct mbuf *n, **np;
362 int off = off0;
363 struct mbuf *top;
364 int copyhdr = 0;
365
366 KASSERT(off >= 0, ("m_copym, negative off %d", off));
367 KASSERT(len >= 0, ("m_copym, negative len %d", len));
368 MBUF_CHECKSLEEP(wait);
369 if (off == 0 && m->m_flags & M_PKTHDR)
370 copyhdr = 1;
371 while (off > 0) {
372 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
373 if (off < m->m_len)
374 break;
375 off -= m->m_len;
376 m = m->m_next;
377 }
378 np = &top;
379 top = 0;
380 while (len > 0) {
381 if (m == NULL) {
382 KASSERT(len == M_COPYALL,
383 ("m_copym, length > size of mbuf chain"));
384 break;
385 }
386 if (copyhdr)
387 MGETHDR(n, wait, m->m_type);
388 else
389 MGET(n, wait, m->m_type);
390 *np = n;
391 if (n == NULL)
392 goto nospace;
393 if (copyhdr) {
394 if (!m_dup_pkthdr(n, m, wait))
395 goto nospace;
396 if (len == M_COPYALL)
397 n->m_pkthdr.len -= off0;
398 else
399 n->m_pkthdr.len = len;
400 copyhdr = 0;
401 }
402 n->m_len = min(len, m->m_len - off);
403 if (m->m_flags & M_EXT) {
404 n->m_data = m->m_data + off;
405 n->m_ext = m->m_ext;
406 n->m_flags |= M_EXT;
407 MEXT_ADD_REF(m);
408 } else
409 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
410 (u_int)n->m_len);
411 if (len != M_COPYALL)
412 len -= n->m_len;
413 off = 0;
414 m = m->m_next;
415 np = &n->m_next;
416 }
417 if (top == NULL)
418 mbstat.m_mcfail++; /* XXX: No consistency. */
419
420 return (top);
421nospace:
422 m_freem(top);
423 mbstat.m_mcfail++; /* XXX: No consistency. */
424 return (NULL);
425}
426
427/*
428 * Copy an entire packet, including header (which must be present).
429 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
430 * Note that the copy is read-only, because clusters are not copied,
431 * only their reference counts are incremented.
432 * Preserve alignment of the first mbuf so if the creator has left
433 * some room at the beginning (e.g. for inserting protocol headers)
434 * the copies still have the room available.
435 */
436struct mbuf *
437m_copypacket(struct mbuf *m, int how)
438{
439 struct mbuf *top, *n, *o;
440
441 MBUF_CHECKSLEEP(how);
442 MGET(n, how, m->m_type);
443 top = n;
444 if (n == NULL)
445 goto nospace;
446
447 if (!m_dup_pkthdr(n, m, how))
448 goto nospace;
449 n->m_len = m->m_len;
450 if (m->m_flags & M_EXT) {
451 n->m_data = m->m_data;
452 n->m_ext = m->m_ext;
453 n->m_flags |= M_EXT;
454 MEXT_ADD_REF(m);
455 } else {
456 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
457 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
458 }
459
460 m = m->m_next;
461 while (m) {
462 MGET(o, how, m->m_type);
463 if (o == NULL)
464 goto nospace;
465
466 n->m_next = o;
467 n = n->m_next;
468
469 n->m_len = m->m_len;
470 if (m->m_flags & M_EXT) {
471 n->m_data = m->m_data;
472 n->m_ext = m->m_ext;
473 n->m_flags |= M_EXT;
474 MEXT_ADD_REF(m);
475 } else {
476 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
477 }
478
479 m = m->m_next;
480 }
481 return top;
482nospace:
483 m_freem(top);
484 mbstat.m_mcfail++; /* XXX: No consistency. */
485 return (NULL);
486}
487
488/*
489 * Copy data from an mbuf chain starting "off" bytes from the beginning,
490 * continuing for "len" bytes, into the indicated buffer.
491 */
492void
493m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
494{
495 u_int count;
496
497 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
498 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
499 while (off > 0) {
500 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
501 if (off < m->m_len)
502 break;
503 off -= m->m_len;
504 m = m->m_next;
505 }
506 while (len > 0) {
507 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
508 count = min(m->m_len - off, len);
509 bcopy(mtod(m, caddr_t) + off, cp, count);
510 len -= count;
511 cp += count;
512 off = 0;
513 m = m->m_next;
514 }
515}
516
517/*
518 * Copy a packet header mbuf chain into a completely new chain, including
519 * copying any mbuf clusters. Use this instead of m_copypacket() when
520 * you need a writable copy of an mbuf chain.
521 */
522struct mbuf *
523m_dup(struct mbuf *m, int how)
524{
525 struct mbuf **p, *top = NULL;
526 int remain, moff, nsize;
527
528 MBUF_CHECKSLEEP(how);
529 /* Sanity check */
530 if (m == NULL)
531 return (NULL);
532 M_ASSERTPKTHDR(m);
533
534 /* While there's more data, get a new mbuf, tack it on, and fill it */
535 remain = m->m_pkthdr.len;
536 moff = 0;
537 p = &top;
538 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
539 struct mbuf *n;
540
541 /* Get the next new mbuf */
542 if (remain >= MINCLSIZE) {
543 n = m_getcl(how, m->m_type, 0);
544 nsize = MCLBYTES;
545 } else {
546 n = m_get(how, m->m_type);
547 nsize = MLEN;
548 }
549 if (n == NULL)
550 goto nospace;
551
552 if (top == NULL) { /* First one, must be PKTHDR */
553 if (!m_dup_pkthdr(n, m, how)) {
554 m_free(n);
555 goto nospace;
556 }
557 nsize = MHLEN;
558 }
559 n->m_len = 0;
560
561 /* Link it into the new chain */
562 *p = n;
563 p = &n->m_next;
564
565 /* Copy data from original mbuf(s) into new mbuf */
566 while (n->m_len < nsize && m != NULL) {
567 int chunk = min(nsize - n->m_len, m->m_len - moff);
568
569 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
570 moff += chunk;
571 n->m_len += chunk;
572 remain -= chunk;
573 if (moff == m->m_len) {
574 m = m->m_next;
575 moff = 0;
576 }
577 }
578
579 /* Check correct total mbuf length */
580 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
581 ("%s: bogus m_pkthdr.len", __func__));
582 }
583 return (top);
584
585nospace:
586 m_freem(top);
587 mbstat.m_mcfail++; /* XXX: No consistency. */
588 return (NULL);
589}
590
591/*
592 * Concatenate mbuf chain n to m.
593 * Both chains must be of the same type (e.g. MT_DATA).
594 * Any m_pkthdr is not updated.
595 */
596void
597m_cat(struct mbuf *m, struct mbuf *n)
598{
599 while (m->m_next)
600 m = m->m_next;
601 while (n) {
602 if (m->m_flags & M_EXT ||
603 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
604 /* just join the two chains */
605 m->m_next = n;
606 return;
607 }
608 /* splat the data from one into the other */
609 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
610 (u_int)n->m_len);
611 m->m_len += n->m_len;
612 n = m_free(n);
613 }
614}
615
616void
617m_adj(struct mbuf *mp, int req_len)
618{
619 int len = req_len;
620 struct mbuf *m;
621 int count;
622
623 if ((m = mp) == NULL)
624 return;
625 if (len >= 0) {
626 /*
627 * Trim from head.
628 */
629 while (m != NULL && len > 0) {
630 if (m->m_len <= len) {
631 len -= m->m_len;
632 m->m_len = 0;
633 m = m->m_next;
634 } else {
635 m->m_len -= len;
636 m->m_data += len;
637 len = 0;
638 }
639 }
640 m = mp;
641 if (mp->m_flags & M_PKTHDR)
642 m->m_pkthdr.len -= (req_len - len);
643 } else {
644 /*
645 * Trim from tail. Scan the mbuf chain,
646 * calculating its length and finding the last mbuf.
647 * If the adjustment only affects this mbuf, then just
648 * adjust and return. Otherwise, rescan and truncate
649 * after the remaining size.
650 */
651 len = -len;
652 count = 0;
653 for (;;) {
654 count += m->m_len;
655 if (m->m_next == (struct mbuf *)0)
656 break;
657 m = m->m_next;
658 }
659 if (m->m_len >= len) {
660 m->m_len -= len;
661 if (mp->m_flags & M_PKTHDR)
662 mp->m_pkthdr.len -= len;
663 return;
664 }
665 count -= len;
666 if (count < 0)
667 count = 0;
668 /*
669 * Correct length for chain is "count".
670 * Find the mbuf with last data, adjust its length,
671 * and toss data from remaining mbufs on chain.
672 */
673 m = mp;
674 if (m->m_flags & M_PKTHDR)
675 m->m_pkthdr.len = count;
676 for (; m; m = m->m_next) {
677 if (m->m_len >= count) {
678 m->m_len = count;
679 break;
680 }
681 count -= m->m_len;
682 }
683 while (m->m_next)
684 (m = m->m_next) ->m_len = 0;
685 }
686}
687
688/*
689 * Rearange an mbuf chain so that len bytes are contiguous
690 * and in the data area of an mbuf (so that mtod and dtom
691 * will work for a structure of size len). Returns the resulting
692 * mbuf chain on success, frees it and returns null on failure.
693 * If there is room, it will add up to max_protohdr-len extra bytes to the
694 * contiguous region in an attempt to avoid being called next time.
695 */
696struct mbuf *
697m_pullup(struct mbuf *n, int len)
698{
699 struct mbuf *m;
700 int count;
701 int space;
702
703 /*
704 * If first mbuf has no cluster, and has room for len bytes
705 * without shifting current data, pullup into it,
706 * otherwise allocate a new mbuf to prepend to the chain.
707 */
708 if ((n->m_flags & M_EXT) == 0 &&
709 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
710 if (n->m_len >= len)
711 return (n);
712 m = n;
713 n = n->m_next;
714 len -= m->m_len;
715 } else {
716 if (len > MHLEN)
717 goto bad;
718 MGET(m, M_DONTWAIT, n->m_type);
719 if (m == NULL)
720 goto bad;
721 m->m_len = 0;
722 if (n->m_flags & M_PKTHDR)
723 M_MOVE_PKTHDR(m, n);
724 }
725 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
726 do {
727 count = min(min(max(len, max_protohdr), space), n->m_len);
728 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
729 (u_int)count);
730 len -= count;
731 m->m_len += count;
732 n->m_len -= count;
733 space -= count;
734 if (n->m_len)
735 n->m_data += count;
736 else
737 n = m_free(n);
738 } while (len > 0 && n);
739 if (len > 0) {
740 (void) m_free(m);
741 goto bad;
742 }
743 m->m_next = n;
744 return (m);
745bad:
746 m_freem(n);
747 mbstat.m_mpfail++; /* XXX: No consistency. */
748 return (NULL);
749}
750
751/*
752 * Partition an mbuf chain in two pieces, returning the tail --
753 * all but the first len0 bytes. In case of failure, it returns NULL and
754 * attempts to restore the chain to its original state.
755 *
756 * Note that the resulting mbufs might be read-only, because the new
757 * mbuf can end up sharing an mbuf cluster with the original mbuf if
758 * the "breaking point" happens to lie within a cluster mbuf. Use the
759 * M_WRITABLE() macro to check for this case.
760 */
761struct mbuf *
762m_split(struct mbuf *m0, int len0, int wait)
763{
764 struct mbuf *m, *n;
765 u_int len = len0, remain;
766
767 MBUF_CHECKSLEEP(wait);
768 for (m = m0; m && len > m->m_len; m = m->m_next)
769 len -= m->m_len;
770 if (m == NULL)
771 return (NULL);
772 remain = m->m_len - len;
773 if (m0->m_flags & M_PKTHDR) {
774 MGETHDR(n, wait, m0->m_type);
775 if (n == NULL)
776 return (NULL);
777 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
778 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
779 m0->m_pkthdr.len = len0;
780 if (m->m_flags & M_EXT)
781 goto extpacket;
782 if (remain > MHLEN) {
783 /* m can't be the lead packet */
784 MH_ALIGN(n, 0);
785 n->m_next = m_split(m, len, wait);
786 if (n->m_next == NULL) {
787 (void) m_free(n);
788 return (NULL);
789 } else {
790 n->m_len = 0;
791 return (n);
792 }
793 } else
794 MH_ALIGN(n, remain);
795 } else if (remain == 0) {
796 n = m->m_next;
797 m->m_next = NULL;
798 return (n);
799 } else {
800 MGET(n, wait, m->m_type);
801 if (n == NULL)
802 return (NULL);
803 M_ALIGN(n, remain);
804 }
805extpacket:
806 if (m->m_flags & M_EXT) {
807 n->m_flags |= M_EXT;
808 n->m_ext = m->m_ext;
809 MEXT_ADD_REF(m);
810 n->m_data = m->m_data + len;
811 } else {
812 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
813 }
814 n->m_len = remain;
815 m->m_len = len;
816 n->m_next = m->m_next;
817 m->m_next = NULL;
818 return (n);
819}
820/*
821 * Routine to copy from device local memory into mbufs.
822 * Note that `off' argument is offset into first mbuf of target chain from
823 * which to begin copying the data to.
824 */
825struct mbuf *
826m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
827 void (*copy)(char *from, caddr_t to, u_int len))
828{
829 struct mbuf *m;
830 struct mbuf *top = NULL, **mp = &top;
831 int len;
832
833 if (off < 0 || off > MHLEN)
834 return (NULL);
835
836 while (totlen > 0) {
837 if (top == NULL) { /* First one, must be PKTHDR */
838 if (totlen + off >= MINCLSIZE) {
839 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
840 len = MCLBYTES;
841 } else {
842 m = m_gethdr(M_DONTWAIT, MT_DATA);
843 len = MHLEN;
844
845 /* Place initial small packet/header at end of mbuf */
846 if (m && totlen + off + max_linkhdr <= MLEN) {
847 m->m_data += max_linkhdr;
848 len -= max_linkhdr;
849 }
850 }
851 if (m == NULL)
852 return NULL;
853 m->m_pkthdr.rcvif = ifp;
854 m->m_pkthdr.len = totlen;
855 } else {
856 if (totlen + off >= MINCLSIZE) {
857 m = m_getcl(M_DONTWAIT, MT_DATA, 0);
858 len = MCLBYTES;
859 } else {
860 m = m_get(M_DONTWAIT, MT_DATA);
861 len = MLEN;
862 }
863 if (m == NULL) {
864 m_freem(top);
865 return NULL;
866 }
867 }
868 if (off) {
869 m->m_data += off;
870 len -= off;
871 off = 0;
872 }
873 m->m_len = len = min(totlen, len);
874 if (copy)
875 copy(buf, mtod(m, caddr_t), (u_int)len);
876 else
877 bcopy(buf, mtod(m, caddr_t), (u_int)len);
878 buf += len;
879 *mp = m;
880 mp = &m->m_next;
881 totlen -= len;
882 }
883 return (top);
884}
885
886/*
887 * Copy data from a buffer back into the indicated mbuf chain,
888 * starting "off" bytes from the beginning, extending the mbuf
889 * chain if necessary.
890 */
891void
892m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
893{
894 int mlen;
895 struct mbuf *m = m0, *n;
896 int totlen = 0;
897
898 if (m0 == NULL)
899 return;
900 while (off > (mlen = m->m_len)) {
901 off -= mlen;
902 totlen += mlen;
903 if (m->m_next == NULL) {
904 n = m_get(M_DONTWAIT, m->m_type);
905 if (n == NULL)
906 goto out;
907 bzero(mtod(n, caddr_t), MLEN);
908 n->m_len = min(MLEN, len + off);
909 m->m_next = n;
910 }
911 m = m->m_next;
912 }
913 while (len > 0) {
914 mlen = min (m->m_len - off, len);
915 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
916 cp += mlen;
917 len -= mlen;
918 mlen += off;
919 off = 0;
920 totlen += mlen;
921 if (len == 0)
922 break;
923 if (m->m_next == NULL) {
924 n = m_get(M_DONTWAIT, m->m_type);
925 if (n == NULL)
926 break;
927 n->m_len = min(MLEN, len);
928 m->m_next = n;
929 }
930 m = m->m_next;
931 }
932out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
933 m->m_pkthdr.len = totlen;
934}
935
936/*
937 * Append the specified data to the indicated mbuf chain,
938 * Extend the mbuf chain if the new data does not fit in
939 * existing space.
940 *
941 * Return 1 if able to complete the job; otherwise 0.
942 */
943int
944m_append(struct mbuf *m0, int len, c_caddr_t cp)
945{
946 struct mbuf *m, *n;
947 int remainder, space;
948
949 for (m = m0; m->m_next != NULL; m = m->m_next)
950 ;
951 remainder = len;
952 space = M_TRAILINGSPACE(m);
953 if (space > 0) {
954 /*
955 * Copy into available space.
956 */
957 if (space > remainder)
958 space = remainder;
959 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
960 m->m_len += space;
961 cp += space, remainder -= space;
962 }
963 while (remainder > 0) {
964 /*
965 * Allocate a new mbuf; could check space
966 * and allocate a cluster instead.
967 */
968 n = m_get(M_DONTWAIT, m->m_type);
969 if (n == NULL)
970 break;
971 n->m_len = min(MLEN, remainder);
972 bcopy(cp, mtod(n, caddr_t), n->m_len);
973 cp += n->m_len, remainder -= n->m_len;
974 m->m_next = n;
975 m = n;
976 }
977 if (m0->m_flags & M_PKTHDR)
978 m0->m_pkthdr.len += len - remainder;
979 return (remainder == 0);
980}
981
982/*
983 * Apply function f to the data in an mbuf chain starting "off" bytes from
984 * the beginning, continuing for "len" bytes.
985 */
986int
987m_apply(struct mbuf *m, int off, int len,
988 int (*f)(void *, void *, u_int), void *arg)
989{
990 u_int count;
991 int rval;
992
993 KASSERT(off >= 0, ("m_apply, negative off %d", off));
994 KASSERT(len >= 0, ("m_apply, negative len %d", len));
995 while (off > 0) {
996 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
997 if (off < m->m_len)
998 break;
999 off -= m->m_len;
1000 m = m->m_next;
1001 }
1002 while (len > 0) {
1003 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1004 count = min(m->m_len - off, len);
1005 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1006 if (rval)
1007 return (rval);
1008 len -= count;
1009 off = 0;
1010 m = m->m_next;
1011 }
1012 return (0);
1013}
1014
1015/*
1016 * Return a pointer to mbuf/offset of location in mbuf chain.
1017 */
1018struct mbuf *
1019m_getptr(struct mbuf *m, int loc, int *off)
1020{
1021
1022 while (loc >= 0) {
1023 /* Normal end of search. */
1024 if (m->m_len > loc) {
1025 *off = loc;
1026 return (m);
1027 } else {
1028 loc -= m->m_len;
1029 if (m->m_next == NULL) {
1030 if (loc == 0) {
1031 /* Point at the end of valid data. */
1032 *off = m->m_len;
1033 return (m);
1034 }
1035 return (NULL);
1036 }
1037 m = m->m_next;
1038 }
1039 }
1040 return (NULL);
1041}
1042
1043void
1044m_print(const struct mbuf *m, int maxlen)
1045{
1046 int len;
1047 int pdata;
1048 const struct mbuf *m2;
1049
1050 if (m->m_flags & M_PKTHDR)
1051 len = m->m_pkthdr.len;
1052 else
1053 len = -1;
1054 m2 = m;
1055 while (m2 != NULL && (len == -1 || len)) {
1056 pdata = m2->m_len;
1057 if (maxlen != -1 && pdata > maxlen)
1058 pdata = maxlen;
1059 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1060 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1061 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1062 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1063 if (pdata)
1064 printf(", %*D\n", m2->m_len, (u_char *)m2->m_data, "-");
1065 if (len != -1)
1066 len -= m2->m_len;
1067 m2 = m2->m_next;
1068 }
1069 if (len > 0)
1070 printf("%d bytes unaccounted for.\n", len);
1071 return;
1072}
1073
1074u_int
1075m_fixhdr(struct mbuf *m0)
1076{
1077 u_int len;
1078
1079 len = m_length(m0, NULL);
1080 m0->m_pkthdr.len = len;
1081 return (len);
1082}
1083
1084u_int
1085m_length(struct mbuf *m0, struct mbuf **last)
1086{
1087 struct mbuf *m;
1088 u_int len;
1089
1090 len = 0;
1091 for (m = m0; m != NULL; m = m->m_next) {
1092 len += m->m_len;
1093 if (m->m_next == NULL)
1094 break;
1095 }
1096 if (last != NULL)
1097 *last = m;
1098 return (len);
1099}
1100
1101/*
1102 * Defragment a mbuf chain, returning the shortest possible
1103 * chain of mbufs and clusters. If allocation fails and
1104 * this cannot be completed, NULL will be returned, but
1105 * the passed in chain will be unchanged. Upon success,
1106 * the original chain will be freed, and the new chain
1107 * will be returned.
1108 *
1109 * If a non-packet header is passed in, the original
1110 * mbuf (chain?) will be returned unharmed.
1111 */
1112struct mbuf *
1113m_defrag(struct mbuf *m0, int how)
1114{
1115 struct mbuf *m_new = NULL, *m_final = NULL;
1116 int progress = 0, length;
1117
1118 MBUF_CHECKSLEEP(how);
1119 if (!(m0->m_flags & M_PKTHDR))
1120 return (m0);
1121
1122 m_fixhdr(m0); /* Needed sanity check */
1123
1124#ifdef MBUF_STRESS_TEST
1125 if (m_defragrandomfailures) {
1126 int temp = arc4random() & 0xff;
1127 if (temp == 0xba)
1128 goto nospace;
1129 }
1130#endif
1131
1132 if (m0->m_pkthdr.len > MHLEN)
1133 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1134 else
1135 m_final = m_gethdr(how, MT_DATA);
1136
1137 if (m_final == NULL)
1138 goto nospace;
1139
1140 if (m_dup_pkthdr(m_final, m0, how) == 0)
1141 goto nospace;
1142
1143 m_new = m_final;
1144
1145 while (progress < m0->m_pkthdr.len) {
1146 length = m0->m_pkthdr.len - progress;
1147 if (length > MCLBYTES)
1148 length = MCLBYTES;
1149
1150 if (m_new == NULL) {
1151 if (length > MLEN)
1152 m_new = m_getcl(how, MT_DATA, 0);
1153 else
1154 m_new = m_get(how, MT_DATA);
1155 if (m_new == NULL)
1156 goto nospace;
1157 }
1158
1159 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1160 progress += length;
1161 m_new->m_len = length;
1162 if (m_new != m_final)
1163 m_cat(m_final, m_new);
1164 m_new = NULL;
1165 }
1166#ifdef MBUF_STRESS_TEST
1167 if (m0->m_next == NULL)
1168 m_defraguseless++;
1169#endif
1170 m_freem(m0);
1171 m0 = m_final;
1172#ifdef MBUF_STRESS_TEST
1173 m_defragpackets++;
1174 m_defragbytes += m0->m_pkthdr.len;
1175#endif
1176 return (m0);
1177nospace:
1178#ifdef MBUF_STRESS_TEST
1179 m_defragfailure++;
1180#endif
1181 if (m_new)
1182 m_free(m_new);
1183 if (m_final)
1184 m_freem(m_final);
1185 return (NULL);
1186}
1187
1188#ifdef MBUF_STRESS_TEST
1189
1190/*
1191 * Fragment an mbuf chain. There's no reason you'd ever want to do
1192 * this in normal usage, but it's great for stress testing various
1193 * mbuf consumers.
1194 *
1195 * If fragmentation is not possible, the original chain will be
1196 * returned.
1197 *
1198 * Possible length values:
1199 * 0 no fragmentation will occur
1200 * > 0 each fragment will be of the specified length
1201 * -1 each fragment will be the same random value in length
1202 * -2 each fragment's length will be entirely random
1203 * (Random values range from 1 to 256)
1204 */
1205struct mbuf *
1206m_fragment(struct mbuf *m0, int how, int length)
1207{
1208 struct mbuf *m_new = NULL, *m_final = NULL;
1209 int progress = 0;
1210
1211 if (!(m0->m_flags & M_PKTHDR))
1212 return (m0);
1213
1214 if ((length == 0) || (length < -2))
1215 return (m0);
1216
1217 m_fixhdr(m0); /* Needed sanity check */
1218
1219 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1220
1221 if (m_final == NULL)
1222 goto nospace;
1223
1224 if (m_dup_pkthdr(m_final, m0, how) == 0)
1225 goto nospace;
1226
1227 m_new = m_final;
1228
1229 if (length == -1)
1230 length = 1 + (arc4random() & 255);
1231
1232 while (progress < m0->m_pkthdr.len) {
1233 int fraglen;
1234
1235 if (length > 0)
1236 fraglen = length;
1237 else
1238 fraglen = 1 + (arc4random() & 255);
1239 if (fraglen > m0->m_pkthdr.len - progress)
1240 fraglen = m0->m_pkthdr.len - progress;
1241
1242 if (fraglen > MCLBYTES)
1243 fraglen = MCLBYTES;
1244
1245 if (m_new == NULL) {
1246 m_new = m_getcl(how, MT_DATA, 0);
1247 if (m_new == NULL)
1248 goto nospace;
1249 }
1250
1251 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1252 progress += fraglen;
1253 m_new->m_len = fraglen;
1254 if (m_new != m_final)
1255 m_cat(m_final, m_new);
1256 m_new = NULL;
1257 }
1258 m_freem(m0);
1259 m0 = m_final;
1260 return (m0);
1261nospace:
1262 if (m_new)
1263 m_free(m_new);
1264 if (m_final)
1265 m_freem(m_final);
1266 /* Return the original chain on failure */
1267 return (m0);
1268}
1269
1270#endif
1271
1272struct mbuf *
1273m_uiotombuf(struct uio *uio, int how, int len)
1274{
1275 struct mbuf *m_new = NULL, *m_final = NULL;
1276 int progress = 0, error = 0, length, total;
1277
1278 if (len > 0)
1279 total = min(uio->uio_resid, len);
1280 else
1281 total = uio->uio_resid;
1282 if (total > MHLEN)
1283 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1284 else
1285 m_final = m_gethdr(how, MT_DATA);
1286 if (m_final == NULL)
1287 goto nospace;
1288 m_new = m_final;
1289 while (progress < total) {
1290 length = total - progress;
1291 if (length > MCLBYTES)
1292 length = MCLBYTES;
1293 if (m_new == NULL) {
1294 if (length > MLEN)
1295 m_new = m_getcl(how, MT_DATA, 0);
1296 else
1297 m_new = m_get(how, MT_DATA);
1298 if (m_new == NULL)
1299 goto nospace;
1300 }
1301 error = uiomove(mtod(m_new, void *), length, uio);
1302 if (error)
1303 goto nospace;
1304 progress += length;
1305 m_new->m_len = length;
1306 if (m_new != m_final)
1307 m_cat(m_final, m_new);
1308 m_new = NULL;
1309 }
1310 m_fixhdr(m_final);
1311 return (m_final);
1312nospace:
1313 if (m_new)
1314 m_free(m_new);
1315 if (m_final)
1316 m_freem(m_final);
1317 return (NULL);
1318}
92
93/*
94 * Allocate a given length worth of mbufs and/or clusters (whatever fits
95 * best) and return a pointer to the top of the allocated chain. If an
96 * existing mbuf chain is provided, then we will append the new chain
97 * to the existing one but still return the top of the newly allocated
98 * chain.
99 */
100struct mbuf *
101m_getm(struct mbuf *m, int len, int how, short type)
102{
103 struct mbuf *mb, *top, *cur, *mtail;
104 int num, rem;
105 int i;
106
107 KASSERT(len >= 0, ("m_getm(): len is < 0"));
108
109 /* If m != NULL, we will append to the end of that chain. */
110 if (m != NULL)
111 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
112 else
113 mtail = NULL;
114
115 /*
116 * Calculate how many mbufs+clusters ("packets") we need and how much
117 * leftover there is after that and allocate the first mbuf+cluster
118 * if required.
119 */
120 num = len / MCLBYTES;
121 rem = len % MCLBYTES;
122 top = cur = NULL;
123 if (num > 0) {
124 if ((top = cur = m_getcl(how, type, 0)) == NULL)
125 goto failed;
126 top->m_len = 0;
127 }
128 num--;
129
130 for (i = 0; i < num; i++) {
131 mb = m_getcl(how, type, 0);
132 if (mb == NULL)
133 goto failed;
134 mb->m_len = 0;
135 cur = (cur->m_next = mb);
136 }
137 if (rem > 0) {
138 mb = (rem > MINCLSIZE) ?
139 m_getcl(how, type, 0) : m_get(how, type);
140 if (mb == NULL)
141 goto failed;
142 mb->m_len = 0;
143 if (cur == NULL)
144 top = mb;
145 else
146 cur->m_next = mb;
147 }
148
149 if (mtail != NULL)
150 mtail->m_next = top;
151 return top;
152failed:
153 if (top != NULL)
154 m_freem(top);
155 return NULL;
156}
157
158/*
159 * Free an entire chain of mbufs and associated external buffers, if
160 * applicable.
161 */
162void
163m_freem(struct mbuf *mb)
164{
165
166 while (mb != NULL)
167 mb = m_free(mb);
168}
169
170/*-
171 * Configure a provided mbuf to refer to the provided external storage
172 * buffer and setup a reference count for said buffer. If the setting
173 * up of the reference count fails, the M_EXT bit will not be set. If
174 * successfull, the M_EXT bit is set in the mbuf's flags.
175 *
176 * Arguments:
177 * mb The existing mbuf to which to attach the provided buffer.
178 * buf The address of the provided external storage buffer.
179 * size The size of the provided buffer.
180 * freef A pointer to a routine that is responsible for freeing the
181 * provided external storage buffer.
182 * args A pointer to an argument structure (of any type) to be passed
183 * to the provided freef routine (may be NULL).
184 * flags Any other flags to be passed to the provided mbuf.
185 * type The type that the external storage buffer should be
186 * labeled with.
187 *
188 * Returns:
189 * Nothing.
190 */
191void
192m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
193 void (*freef)(void *, void *), void *args, int flags, int type)
194{
195 u_int *ref_cnt = NULL;
196
197 /* XXX Shouldn't be adding EXT_CLUSTER with this API */
198 if (type == EXT_CLUSTER)
199 ref_cnt = (u_int *)uma_find_refcnt(zone_clust,
200 mb->m_ext.ext_buf);
201 else if (type == EXT_EXTREF)
202 ref_cnt = mb->m_ext.ref_cnt;
203 mb->m_ext.ref_cnt = (ref_cnt == NULL) ?
204 malloc(sizeof(u_int), M_MBUF, M_NOWAIT) : (u_int *)ref_cnt;
205 if (mb->m_ext.ref_cnt != NULL) {
206 *(mb->m_ext.ref_cnt) = 1;
207 mb->m_flags |= (M_EXT | flags);
208 mb->m_ext.ext_buf = buf;
209 mb->m_data = mb->m_ext.ext_buf;
210 mb->m_ext.ext_size = size;
211 mb->m_ext.ext_free = freef;
212 mb->m_ext.ext_args = args;
213 mb->m_ext.ext_type = type;
214 }
215}
216
217/*
218 * Non-directly-exported function to clean up after mbufs with M_EXT
219 * storage attached to them if the reference count hits 0.
220 */
221void
222mb_free_ext(struct mbuf *m)
223{
224 u_int cnt;
225
226 /*
227 * This is tricky. We need to make sure to decrement the
228 * refcount in a safe way but to also clean up if we're the
229 * last reference. This method seems to do it without race.
230 */
231 do {
232 cnt = *(m->m_ext.ref_cnt);
233 if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) {
234 if (cnt == 1) {
235 /*
236 * Do the free, should be safe.
237 */
238 if (m->m_ext.ext_type == EXT_PACKET) {
239 uma_zfree(zone_pack, m);
240 return;
241 } else if (m->m_ext.ext_type == EXT_CLUSTER) {
242 uma_zfree(zone_clust, m->m_ext.ext_buf);
243 m->m_ext.ext_buf = NULL;
244 } else {
245 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
246 m->m_ext.ext_args);
247 if (m->m_ext.ext_type != EXT_EXTREF)
248 free(m->m_ext.ref_cnt, M_MBUF);
249 m->m_ext.ext_buf = NULL;
250 }
251 }
252 /* Decrement (and potentially free) done, safely. */
253 break;
254 }
255 } while (1);
256 uma_zfree(zone_mbuf, m);
257}
258
259/*
260 * "Move" mbuf pkthdr from "from" to "to".
261 * "from" must have M_PKTHDR set, and "to" must be empty.
262 */
263void
264m_move_pkthdr(struct mbuf *to, struct mbuf *from)
265{
266
267#if 0
268 /* see below for why these are not enabled */
269 M_ASSERTPKTHDR(to);
270 /* Note: with MAC, this may not be a good assertion. */
271 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
272 ("m_move_pkthdr: to has tags"));
273#endif
274 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
275#ifdef MAC
276 /*
277 * XXXMAC: It could be this should also occur for non-MAC?
278 */
279 if (to->m_flags & M_PKTHDR)
280 m_tag_delete_chain(to, NULL);
281#endif
282 to->m_flags = from->m_flags & M_COPYFLAGS;
283 to->m_data = to->m_pktdat;
284 to->m_pkthdr = from->m_pkthdr; /* especially tags */
285 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
286 from->m_flags &= ~M_PKTHDR;
287}
288
289/*
290 * Duplicate "from"'s mbuf pkthdr in "to".
291 * "from" must have M_PKTHDR set, and "to" must be empty.
292 * In particular, this does a deep copy of the packet tags.
293 */
294int
295m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
296{
297
298#if 0
299 /*
300 * The mbuf allocator only initializes the pkthdr
301 * when the mbuf is allocated with MGETHDR. Many users
302 * (e.g. m_copy*, m_prepend) use MGET and then
303 * smash the pkthdr as needed causing these
304 * assertions to trip. For now just disable them.
305 */
306 M_ASSERTPKTHDR(to);
307 /* Note: with MAC, this may not be a good assertion. */
308 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
309#endif
310 MBUF_CHECKSLEEP(how);
311#ifdef MAC
312 if (to->m_flags & M_PKTHDR)
313 m_tag_delete_chain(to, NULL);
314#endif
315 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
316 if ((to->m_flags & M_EXT) == 0)
317 to->m_data = to->m_pktdat;
318 to->m_pkthdr = from->m_pkthdr;
319 SLIST_INIT(&to->m_pkthdr.tags);
320 return (m_tag_copy_chain(to, from, MBTOM(how)));
321}
322
323/*
324 * Lesser-used path for M_PREPEND:
325 * allocate new mbuf to prepend to chain,
326 * copy junk along.
327 */
328struct mbuf *
329m_prepend(struct mbuf *m, int len, int how)
330{
331 struct mbuf *mn;
332
333 if (m->m_flags & M_PKTHDR)
334 MGETHDR(mn, how, m->m_type);
335 else
336 MGET(mn, how, m->m_type);
337 if (mn == NULL) {
338 m_freem(m);
339 return (NULL);
340 }
341 if (m->m_flags & M_PKTHDR)
342 M_MOVE_PKTHDR(mn, m);
343 mn->m_next = m;
344 m = mn;
345 if (len < MHLEN)
346 MH_ALIGN(m, len);
347 m->m_len = len;
348 return (m);
349}
350
351/*
352 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
353 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
354 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
355 * Note that the copy is read-only, because clusters are not copied,
356 * only their reference counts are incremented.
357 */
358struct mbuf *
359m_copym(struct mbuf *m, int off0, int len, int wait)
360{
361 struct mbuf *n, **np;
362 int off = off0;
363 struct mbuf *top;
364 int copyhdr = 0;
365
366 KASSERT(off >= 0, ("m_copym, negative off %d", off));
367 KASSERT(len >= 0, ("m_copym, negative len %d", len));
368 MBUF_CHECKSLEEP(wait);
369 if (off == 0 && m->m_flags & M_PKTHDR)
370 copyhdr = 1;
371 while (off > 0) {
372 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
373 if (off < m->m_len)
374 break;
375 off -= m->m_len;
376 m = m->m_next;
377 }
378 np = &top;
379 top = 0;
380 while (len > 0) {
381 if (m == NULL) {
382 KASSERT(len == M_COPYALL,
383 ("m_copym, length > size of mbuf chain"));
384 break;
385 }
386 if (copyhdr)
387 MGETHDR(n, wait, m->m_type);
388 else
389 MGET(n, wait, m->m_type);
390 *np = n;
391 if (n == NULL)
392 goto nospace;
393 if (copyhdr) {
394 if (!m_dup_pkthdr(n, m, wait))
395 goto nospace;
396 if (len == M_COPYALL)
397 n->m_pkthdr.len -= off0;
398 else
399 n->m_pkthdr.len = len;
400 copyhdr = 0;
401 }
402 n->m_len = min(len, m->m_len - off);
403 if (m->m_flags & M_EXT) {
404 n->m_data = m->m_data + off;
405 n->m_ext = m->m_ext;
406 n->m_flags |= M_EXT;
407 MEXT_ADD_REF(m);
408 } else
409 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
410 (u_int)n->m_len);
411 if (len != M_COPYALL)
412 len -= n->m_len;
413 off = 0;
414 m = m->m_next;
415 np = &n->m_next;
416 }
417 if (top == NULL)
418 mbstat.m_mcfail++; /* XXX: No consistency. */
419
420 return (top);
421nospace:
422 m_freem(top);
423 mbstat.m_mcfail++; /* XXX: No consistency. */
424 return (NULL);
425}
426
427/*
428 * Copy an entire packet, including header (which must be present).
429 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
430 * Note that the copy is read-only, because clusters are not copied,
431 * only their reference counts are incremented.
432 * Preserve alignment of the first mbuf so if the creator has left
433 * some room at the beginning (e.g. for inserting protocol headers)
434 * the copies still have the room available.
435 */
436struct mbuf *
437m_copypacket(struct mbuf *m, int how)
438{
439 struct mbuf *top, *n, *o;
440
441 MBUF_CHECKSLEEP(how);
442 MGET(n, how, m->m_type);
443 top = n;
444 if (n == NULL)
445 goto nospace;
446
447 if (!m_dup_pkthdr(n, m, how))
448 goto nospace;
449 n->m_len = m->m_len;
450 if (m->m_flags & M_EXT) {
451 n->m_data = m->m_data;
452 n->m_ext = m->m_ext;
453 n->m_flags |= M_EXT;
454 MEXT_ADD_REF(m);
455 } else {
456 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
457 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
458 }
459
460 m = m->m_next;
461 while (m) {
462 MGET(o, how, m->m_type);
463 if (o == NULL)
464 goto nospace;
465
466 n->m_next = o;
467 n = n->m_next;
468
469 n->m_len = m->m_len;
470 if (m->m_flags & M_EXT) {
471 n->m_data = m->m_data;
472 n->m_ext = m->m_ext;
473 n->m_flags |= M_EXT;
474 MEXT_ADD_REF(m);
475 } else {
476 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
477 }
478
479 m = m->m_next;
480 }
481 return top;
482nospace:
483 m_freem(top);
484 mbstat.m_mcfail++; /* XXX: No consistency. */
485 return (NULL);
486}
487
488/*
489 * Copy data from an mbuf chain starting "off" bytes from the beginning,
490 * continuing for "len" bytes, into the indicated buffer.
491 */
492void
493m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
494{
495 u_int count;
496
497 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
498 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
499 while (off > 0) {
500 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
501 if (off < m->m_len)
502 break;
503 off -= m->m_len;
504 m = m->m_next;
505 }
506 while (len > 0) {
507 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
508 count = min(m->m_len - off, len);
509 bcopy(mtod(m, caddr_t) + off, cp, count);
510 len -= count;
511 cp += count;
512 off = 0;
513 m = m->m_next;
514 }
515}
516
517/*
518 * Copy a packet header mbuf chain into a completely new chain, including
519 * copying any mbuf clusters. Use this instead of m_copypacket() when
520 * you need a writable copy of an mbuf chain.
521 */
522struct mbuf *
523m_dup(struct mbuf *m, int how)
524{
525 struct mbuf **p, *top = NULL;
526 int remain, moff, nsize;
527
528 MBUF_CHECKSLEEP(how);
529 /* Sanity check */
530 if (m == NULL)
531 return (NULL);
532 M_ASSERTPKTHDR(m);
533
534 /* While there's more data, get a new mbuf, tack it on, and fill it */
535 remain = m->m_pkthdr.len;
536 moff = 0;
537 p = &top;
538 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
539 struct mbuf *n;
540
541 /* Get the next new mbuf */
542 if (remain >= MINCLSIZE) {
543 n = m_getcl(how, m->m_type, 0);
544 nsize = MCLBYTES;
545 } else {
546 n = m_get(how, m->m_type);
547 nsize = MLEN;
548 }
549 if (n == NULL)
550 goto nospace;
551
552 if (top == NULL) { /* First one, must be PKTHDR */
553 if (!m_dup_pkthdr(n, m, how)) {
554 m_free(n);
555 goto nospace;
556 }
557 nsize = MHLEN;
558 }
559 n->m_len = 0;
560
561 /* Link it into the new chain */
562 *p = n;
563 p = &n->m_next;
564
565 /* Copy data from original mbuf(s) into new mbuf */
566 while (n->m_len < nsize && m != NULL) {
567 int chunk = min(nsize - n->m_len, m->m_len - moff);
568
569 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
570 moff += chunk;
571 n->m_len += chunk;
572 remain -= chunk;
573 if (moff == m->m_len) {
574 m = m->m_next;
575 moff = 0;
576 }
577 }
578
579 /* Check correct total mbuf length */
580 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
581 ("%s: bogus m_pkthdr.len", __func__));
582 }
583 return (top);
584
585nospace:
586 m_freem(top);
587 mbstat.m_mcfail++; /* XXX: No consistency. */
588 return (NULL);
589}
590
591/*
592 * Concatenate mbuf chain n to m.
593 * Both chains must be of the same type (e.g. MT_DATA).
594 * Any m_pkthdr is not updated.
595 */
596void
597m_cat(struct mbuf *m, struct mbuf *n)
598{
599 while (m->m_next)
600 m = m->m_next;
601 while (n) {
602 if (m->m_flags & M_EXT ||
603 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
604 /* just join the two chains */
605 m->m_next = n;
606 return;
607 }
608 /* splat the data from one into the other */
609 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
610 (u_int)n->m_len);
611 m->m_len += n->m_len;
612 n = m_free(n);
613 }
614}
615
616void
617m_adj(struct mbuf *mp, int req_len)
618{
619 int len = req_len;
620 struct mbuf *m;
621 int count;
622
623 if ((m = mp) == NULL)
624 return;
625 if (len >= 0) {
626 /*
627 * Trim from head.
628 */
629 while (m != NULL && len > 0) {
630 if (m->m_len <= len) {
631 len -= m->m_len;
632 m->m_len = 0;
633 m = m->m_next;
634 } else {
635 m->m_len -= len;
636 m->m_data += len;
637 len = 0;
638 }
639 }
640 m = mp;
641 if (mp->m_flags & M_PKTHDR)
642 m->m_pkthdr.len -= (req_len - len);
643 } else {
644 /*
645 * Trim from tail. Scan the mbuf chain,
646 * calculating its length and finding the last mbuf.
647 * If the adjustment only affects this mbuf, then just
648 * adjust and return. Otherwise, rescan and truncate
649 * after the remaining size.
650 */
651 len = -len;
652 count = 0;
653 for (;;) {
654 count += m->m_len;
655 if (m->m_next == (struct mbuf *)0)
656 break;
657 m = m->m_next;
658 }
659 if (m->m_len >= len) {
660 m->m_len -= len;
661 if (mp->m_flags & M_PKTHDR)
662 mp->m_pkthdr.len -= len;
663 return;
664 }
665 count -= len;
666 if (count < 0)
667 count = 0;
668 /*
669 * Correct length for chain is "count".
670 * Find the mbuf with last data, adjust its length,
671 * and toss data from remaining mbufs on chain.
672 */
673 m = mp;
674 if (m->m_flags & M_PKTHDR)
675 m->m_pkthdr.len = count;
676 for (; m; m = m->m_next) {
677 if (m->m_len >= count) {
678 m->m_len = count;
679 break;
680 }
681 count -= m->m_len;
682 }
683 while (m->m_next)
684 (m = m->m_next) ->m_len = 0;
685 }
686}
687
688/*
689 * Rearange an mbuf chain so that len bytes are contiguous
690 * and in the data area of an mbuf (so that mtod and dtom
691 * will work for a structure of size len). Returns the resulting
692 * mbuf chain on success, frees it and returns null on failure.
693 * If there is room, it will add up to max_protohdr-len extra bytes to the
694 * contiguous region in an attempt to avoid being called next time.
695 */
696struct mbuf *
697m_pullup(struct mbuf *n, int len)
698{
699 struct mbuf *m;
700 int count;
701 int space;
702
703 /*
704 * If first mbuf has no cluster, and has room for len bytes
705 * without shifting current data, pullup into it,
706 * otherwise allocate a new mbuf to prepend to the chain.
707 */
708 if ((n->m_flags & M_EXT) == 0 &&
709 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
710 if (n->m_len >= len)
711 return (n);
712 m = n;
713 n = n->m_next;
714 len -= m->m_len;
715 } else {
716 if (len > MHLEN)
717 goto bad;
718 MGET(m, M_DONTWAIT, n->m_type);
719 if (m == NULL)
720 goto bad;
721 m->m_len = 0;
722 if (n->m_flags & M_PKTHDR)
723 M_MOVE_PKTHDR(m, n);
724 }
725 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
726 do {
727 count = min(min(max(len, max_protohdr), space), n->m_len);
728 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
729 (u_int)count);
730 len -= count;
731 m->m_len += count;
732 n->m_len -= count;
733 space -= count;
734 if (n->m_len)
735 n->m_data += count;
736 else
737 n = m_free(n);
738 } while (len > 0 && n);
739 if (len > 0) {
740 (void) m_free(m);
741 goto bad;
742 }
743 m->m_next = n;
744 return (m);
745bad:
746 m_freem(n);
747 mbstat.m_mpfail++; /* XXX: No consistency. */
748 return (NULL);
749}
750
751/*
752 * Partition an mbuf chain in two pieces, returning the tail --
753 * all but the first len0 bytes. In case of failure, it returns NULL and
754 * attempts to restore the chain to its original state.
755 *
756 * Note that the resulting mbufs might be read-only, because the new
757 * mbuf can end up sharing an mbuf cluster with the original mbuf if
758 * the "breaking point" happens to lie within a cluster mbuf. Use the
759 * M_WRITABLE() macro to check for this case.
760 */
761struct mbuf *
762m_split(struct mbuf *m0, int len0, int wait)
763{
764 struct mbuf *m, *n;
765 u_int len = len0, remain;
766
767 MBUF_CHECKSLEEP(wait);
768 for (m = m0; m && len > m->m_len; m = m->m_next)
769 len -= m->m_len;
770 if (m == NULL)
771 return (NULL);
772 remain = m->m_len - len;
773 if (m0->m_flags & M_PKTHDR) {
774 MGETHDR(n, wait, m0->m_type);
775 if (n == NULL)
776 return (NULL);
777 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
778 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
779 m0->m_pkthdr.len = len0;
780 if (m->m_flags & M_EXT)
781 goto extpacket;
782 if (remain > MHLEN) {
783 /* m can't be the lead packet */
784 MH_ALIGN(n, 0);
785 n->m_next = m_split(m, len, wait);
786 if (n->m_next == NULL) {
787 (void) m_free(n);
788 return (NULL);
789 } else {
790 n->m_len = 0;
791 return (n);
792 }
793 } else
794 MH_ALIGN(n, remain);
795 } else if (remain == 0) {
796 n = m->m_next;
797 m->m_next = NULL;
798 return (n);
799 } else {
800 MGET(n, wait, m->m_type);
801 if (n == NULL)
802 return (NULL);
803 M_ALIGN(n, remain);
804 }
805extpacket:
806 if (m->m_flags & M_EXT) {
807 n->m_flags |= M_EXT;
808 n->m_ext = m->m_ext;
809 MEXT_ADD_REF(m);
810 n->m_data = m->m_data + len;
811 } else {
812 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
813 }
814 n->m_len = remain;
815 m->m_len = len;
816 n->m_next = m->m_next;
817 m->m_next = NULL;
818 return (n);
819}
820/*
821 * Routine to copy from device local memory into mbufs.
822 * Note that `off' argument is offset into first mbuf of target chain from
823 * which to begin copying the data to.
824 */
825struct mbuf *
826m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
827 void (*copy)(char *from, caddr_t to, u_int len))
828{
829 struct mbuf *m;
830 struct mbuf *top = NULL, **mp = &top;
831 int len;
832
833 if (off < 0 || off > MHLEN)
834 return (NULL);
835
836 while (totlen > 0) {
837 if (top == NULL) { /* First one, must be PKTHDR */
838 if (totlen + off >= MINCLSIZE) {
839 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
840 len = MCLBYTES;
841 } else {
842 m = m_gethdr(M_DONTWAIT, MT_DATA);
843 len = MHLEN;
844
845 /* Place initial small packet/header at end of mbuf */
846 if (m && totlen + off + max_linkhdr <= MLEN) {
847 m->m_data += max_linkhdr;
848 len -= max_linkhdr;
849 }
850 }
851 if (m == NULL)
852 return NULL;
853 m->m_pkthdr.rcvif = ifp;
854 m->m_pkthdr.len = totlen;
855 } else {
856 if (totlen + off >= MINCLSIZE) {
857 m = m_getcl(M_DONTWAIT, MT_DATA, 0);
858 len = MCLBYTES;
859 } else {
860 m = m_get(M_DONTWAIT, MT_DATA);
861 len = MLEN;
862 }
863 if (m == NULL) {
864 m_freem(top);
865 return NULL;
866 }
867 }
868 if (off) {
869 m->m_data += off;
870 len -= off;
871 off = 0;
872 }
873 m->m_len = len = min(totlen, len);
874 if (copy)
875 copy(buf, mtod(m, caddr_t), (u_int)len);
876 else
877 bcopy(buf, mtod(m, caddr_t), (u_int)len);
878 buf += len;
879 *mp = m;
880 mp = &m->m_next;
881 totlen -= len;
882 }
883 return (top);
884}
885
886/*
887 * Copy data from a buffer back into the indicated mbuf chain,
888 * starting "off" bytes from the beginning, extending the mbuf
889 * chain if necessary.
890 */
891void
892m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
893{
894 int mlen;
895 struct mbuf *m = m0, *n;
896 int totlen = 0;
897
898 if (m0 == NULL)
899 return;
900 while (off > (mlen = m->m_len)) {
901 off -= mlen;
902 totlen += mlen;
903 if (m->m_next == NULL) {
904 n = m_get(M_DONTWAIT, m->m_type);
905 if (n == NULL)
906 goto out;
907 bzero(mtod(n, caddr_t), MLEN);
908 n->m_len = min(MLEN, len + off);
909 m->m_next = n;
910 }
911 m = m->m_next;
912 }
913 while (len > 0) {
914 mlen = min (m->m_len - off, len);
915 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
916 cp += mlen;
917 len -= mlen;
918 mlen += off;
919 off = 0;
920 totlen += mlen;
921 if (len == 0)
922 break;
923 if (m->m_next == NULL) {
924 n = m_get(M_DONTWAIT, m->m_type);
925 if (n == NULL)
926 break;
927 n->m_len = min(MLEN, len);
928 m->m_next = n;
929 }
930 m = m->m_next;
931 }
932out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
933 m->m_pkthdr.len = totlen;
934}
935
936/*
937 * Append the specified data to the indicated mbuf chain,
938 * Extend the mbuf chain if the new data does not fit in
939 * existing space.
940 *
941 * Return 1 if able to complete the job; otherwise 0.
942 */
943int
944m_append(struct mbuf *m0, int len, c_caddr_t cp)
945{
946 struct mbuf *m, *n;
947 int remainder, space;
948
949 for (m = m0; m->m_next != NULL; m = m->m_next)
950 ;
951 remainder = len;
952 space = M_TRAILINGSPACE(m);
953 if (space > 0) {
954 /*
955 * Copy into available space.
956 */
957 if (space > remainder)
958 space = remainder;
959 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
960 m->m_len += space;
961 cp += space, remainder -= space;
962 }
963 while (remainder > 0) {
964 /*
965 * Allocate a new mbuf; could check space
966 * and allocate a cluster instead.
967 */
968 n = m_get(M_DONTWAIT, m->m_type);
969 if (n == NULL)
970 break;
971 n->m_len = min(MLEN, remainder);
972 bcopy(cp, mtod(n, caddr_t), n->m_len);
973 cp += n->m_len, remainder -= n->m_len;
974 m->m_next = n;
975 m = n;
976 }
977 if (m0->m_flags & M_PKTHDR)
978 m0->m_pkthdr.len += len - remainder;
979 return (remainder == 0);
980}
981
982/*
983 * Apply function f to the data in an mbuf chain starting "off" bytes from
984 * the beginning, continuing for "len" bytes.
985 */
986int
987m_apply(struct mbuf *m, int off, int len,
988 int (*f)(void *, void *, u_int), void *arg)
989{
990 u_int count;
991 int rval;
992
993 KASSERT(off >= 0, ("m_apply, negative off %d", off));
994 KASSERT(len >= 0, ("m_apply, negative len %d", len));
995 while (off > 0) {
996 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
997 if (off < m->m_len)
998 break;
999 off -= m->m_len;
1000 m = m->m_next;
1001 }
1002 while (len > 0) {
1003 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1004 count = min(m->m_len - off, len);
1005 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1006 if (rval)
1007 return (rval);
1008 len -= count;
1009 off = 0;
1010 m = m->m_next;
1011 }
1012 return (0);
1013}
1014
1015/*
1016 * Return a pointer to mbuf/offset of location in mbuf chain.
1017 */
1018struct mbuf *
1019m_getptr(struct mbuf *m, int loc, int *off)
1020{
1021
1022 while (loc >= 0) {
1023 /* Normal end of search. */
1024 if (m->m_len > loc) {
1025 *off = loc;
1026 return (m);
1027 } else {
1028 loc -= m->m_len;
1029 if (m->m_next == NULL) {
1030 if (loc == 0) {
1031 /* Point at the end of valid data. */
1032 *off = m->m_len;
1033 return (m);
1034 }
1035 return (NULL);
1036 }
1037 m = m->m_next;
1038 }
1039 }
1040 return (NULL);
1041}
1042
1043void
1044m_print(const struct mbuf *m, int maxlen)
1045{
1046 int len;
1047 int pdata;
1048 const struct mbuf *m2;
1049
1050 if (m->m_flags & M_PKTHDR)
1051 len = m->m_pkthdr.len;
1052 else
1053 len = -1;
1054 m2 = m;
1055 while (m2 != NULL && (len == -1 || len)) {
1056 pdata = m2->m_len;
1057 if (maxlen != -1 && pdata > maxlen)
1058 pdata = maxlen;
1059 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1060 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1061 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1062 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1063 if (pdata)
1064 printf(", %*D\n", m2->m_len, (u_char *)m2->m_data, "-");
1065 if (len != -1)
1066 len -= m2->m_len;
1067 m2 = m2->m_next;
1068 }
1069 if (len > 0)
1070 printf("%d bytes unaccounted for.\n", len);
1071 return;
1072}
1073
1074u_int
1075m_fixhdr(struct mbuf *m0)
1076{
1077 u_int len;
1078
1079 len = m_length(m0, NULL);
1080 m0->m_pkthdr.len = len;
1081 return (len);
1082}
1083
1084u_int
1085m_length(struct mbuf *m0, struct mbuf **last)
1086{
1087 struct mbuf *m;
1088 u_int len;
1089
1090 len = 0;
1091 for (m = m0; m != NULL; m = m->m_next) {
1092 len += m->m_len;
1093 if (m->m_next == NULL)
1094 break;
1095 }
1096 if (last != NULL)
1097 *last = m;
1098 return (len);
1099}
1100
1101/*
1102 * Defragment a mbuf chain, returning the shortest possible
1103 * chain of mbufs and clusters. If allocation fails and
1104 * this cannot be completed, NULL will be returned, but
1105 * the passed in chain will be unchanged. Upon success,
1106 * the original chain will be freed, and the new chain
1107 * will be returned.
1108 *
1109 * If a non-packet header is passed in, the original
1110 * mbuf (chain?) will be returned unharmed.
1111 */
1112struct mbuf *
1113m_defrag(struct mbuf *m0, int how)
1114{
1115 struct mbuf *m_new = NULL, *m_final = NULL;
1116 int progress = 0, length;
1117
1118 MBUF_CHECKSLEEP(how);
1119 if (!(m0->m_flags & M_PKTHDR))
1120 return (m0);
1121
1122 m_fixhdr(m0); /* Needed sanity check */
1123
1124#ifdef MBUF_STRESS_TEST
1125 if (m_defragrandomfailures) {
1126 int temp = arc4random() & 0xff;
1127 if (temp == 0xba)
1128 goto nospace;
1129 }
1130#endif
1131
1132 if (m0->m_pkthdr.len > MHLEN)
1133 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1134 else
1135 m_final = m_gethdr(how, MT_DATA);
1136
1137 if (m_final == NULL)
1138 goto nospace;
1139
1140 if (m_dup_pkthdr(m_final, m0, how) == 0)
1141 goto nospace;
1142
1143 m_new = m_final;
1144
1145 while (progress < m0->m_pkthdr.len) {
1146 length = m0->m_pkthdr.len - progress;
1147 if (length > MCLBYTES)
1148 length = MCLBYTES;
1149
1150 if (m_new == NULL) {
1151 if (length > MLEN)
1152 m_new = m_getcl(how, MT_DATA, 0);
1153 else
1154 m_new = m_get(how, MT_DATA);
1155 if (m_new == NULL)
1156 goto nospace;
1157 }
1158
1159 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1160 progress += length;
1161 m_new->m_len = length;
1162 if (m_new != m_final)
1163 m_cat(m_final, m_new);
1164 m_new = NULL;
1165 }
1166#ifdef MBUF_STRESS_TEST
1167 if (m0->m_next == NULL)
1168 m_defraguseless++;
1169#endif
1170 m_freem(m0);
1171 m0 = m_final;
1172#ifdef MBUF_STRESS_TEST
1173 m_defragpackets++;
1174 m_defragbytes += m0->m_pkthdr.len;
1175#endif
1176 return (m0);
1177nospace:
1178#ifdef MBUF_STRESS_TEST
1179 m_defragfailure++;
1180#endif
1181 if (m_new)
1182 m_free(m_new);
1183 if (m_final)
1184 m_freem(m_final);
1185 return (NULL);
1186}
1187
1188#ifdef MBUF_STRESS_TEST
1189
1190/*
1191 * Fragment an mbuf chain. There's no reason you'd ever want to do
1192 * this in normal usage, but it's great for stress testing various
1193 * mbuf consumers.
1194 *
1195 * If fragmentation is not possible, the original chain will be
1196 * returned.
1197 *
1198 * Possible length values:
1199 * 0 no fragmentation will occur
1200 * > 0 each fragment will be of the specified length
1201 * -1 each fragment will be the same random value in length
1202 * -2 each fragment's length will be entirely random
1203 * (Random values range from 1 to 256)
1204 */
1205struct mbuf *
1206m_fragment(struct mbuf *m0, int how, int length)
1207{
1208 struct mbuf *m_new = NULL, *m_final = NULL;
1209 int progress = 0;
1210
1211 if (!(m0->m_flags & M_PKTHDR))
1212 return (m0);
1213
1214 if ((length == 0) || (length < -2))
1215 return (m0);
1216
1217 m_fixhdr(m0); /* Needed sanity check */
1218
1219 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1220
1221 if (m_final == NULL)
1222 goto nospace;
1223
1224 if (m_dup_pkthdr(m_final, m0, how) == 0)
1225 goto nospace;
1226
1227 m_new = m_final;
1228
1229 if (length == -1)
1230 length = 1 + (arc4random() & 255);
1231
1232 while (progress < m0->m_pkthdr.len) {
1233 int fraglen;
1234
1235 if (length > 0)
1236 fraglen = length;
1237 else
1238 fraglen = 1 + (arc4random() & 255);
1239 if (fraglen > m0->m_pkthdr.len - progress)
1240 fraglen = m0->m_pkthdr.len - progress;
1241
1242 if (fraglen > MCLBYTES)
1243 fraglen = MCLBYTES;
1244
1245 if (m_new == NULL) {
1246 m_new = m_getcl(how, MT_DATA, 0);
1247 if (m_new == NULL)
1248 goto nospace;
1249 }
1250
1251 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1252 progress += fraglen;
1253 m_new->m_len = fraglen;
1254 if (m_new != m_final)
1255 m_cat(m_final, m_new);
1256 m_new = NULL;
1257 }
1258 m_freem(m0);
1259 m0 = m_final;
1260 return (m0);
1261nospace:
1262 if (m_new)
1263 m_free(m_new);
1264 if (m_final)
1265 m_freem(m_final);
1266 /* Return the original chain on failure */
1267 return (m0);
1268}
1269
1270#endif
1271
1272struct mbuf *
1273m_uiotombuf(struct uio *uio, int how, int len)
1274{
1275 struct mbuf *m_new = NULL, *m_final = NULL;
1276 int progress = 0, error = 0, length, total;
1277
1278 if (len > 0)
1279 total = min(uio->uio_resid, len);
1280 else
1281 total = uio->uio_resid;
1282 if (total > MHLEN)
1283 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1284 else
1285 m_final = m_gethdr(how, MT_DATA);
1286 if (m_final == NULL)
1287 goto nospace;
1288 m_new = m_final;
1289 while (progress < total) {
1290 length = total - progress;
1291 if (length > MCLBYTES)
1292 length = MCLBYTES;
1293 if (m_new == NULL) {
1294 if (length > MLEN)
1295 m_new = m_getcl(how, MT_DATA, 0);
1296 else
1297 m_new = m_get(how, MT_DATA);
1298 if (m_new == NULL)
1299 goto nospace;
1300 }
1301 error = uiomove(mtod(m_new, void *), length, uio);
1302 if (error)
1303 goto nospace;
1304 progress += length;
1305 m_new->m_len = length;
1306 if (m_new != m_final)
1307 m_cat(m_final, m_new);
1308 m_new = NULL;
1309 }
1310 m_fixhdr(m_final);
1311 return (m_final);
1312nospace:
1313 if (m_new)
1314 m_free(m_new);
1315 if (m_final)
1316 m_freem(m_final);
1317 return (NULL);
1318}