uipc_mbuf.c revision 142350
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 142350 2005-02-24 00:40:33Z sam $"); 34 35#include "opt_mac.h" 36#include "opt_param.h" 37#include "opt_mbuf_stress_test.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/kernel.h> 42#include <sys/limits.h> 43#include <sys/lock.h> 44#include <sys/mac.h> 45#include <sys/malloc.h> 46#include <sys/mbuf.h> 47#include <sys/sysctl.h> 48#include <sys/domain.h> 49#include <sys/protosw.h> 50#include <sys/uio.h> 51 52int max_linkhdr; 53int max_protohdr; 54int max_hdr; 55int max_datalen; 56#ifdef MBUF_STRESS_TEST 57int m_defragpackets; 58int m_defragbytes; 59int m_defraguseless; 60int m_defragfailure; 61int m_defragrandomfailures; 62#endif 63 64/* 65 * sysctl(8) exported objects 66 */ 67SYSCTL_DECL(_kern_ipc); 68SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 69 &max_linkhdr, 0, ""); 70SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 71 &max_protohdr, 0, ""); 72SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 73SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 74 &max_datalen, 0, ""); 75#ifdef MBUF_STRESS_TEST 76SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 77 &m_defragpackets, 0, ""); 78SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 79 &m_defragbytes, 0, ""); 80SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 81 &m_defraguseless, 0, ""); 82SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 83 &m_defragfailure, 0, ""); 84SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 85 &m_defragrandomfailures, 0, ""); 86#endif 87 88/* 89 * Malloc-type for external ext_buf ref counts. 90 */ 91static MALLOC_DEFINE(M_MBUF, "mbextcnt", "mbuf external ref counts"); 92 93/* 94 * Allocate a given length worth of mbufs and/or clusters (whatever fits 95 * best) and return a pointer to the top of the allocated chain. If an 96 * existing mbuf chain is provided, then we will append the new chain 97 * to the existing one but still return the top of the newly allocated 98 * chain. 99 */ 100struct mbuf * 101m_getm(struct mbuf *m, int len, int how, short type) 102{ 103 struct mbuf *mb, *top, *cur, *mtail; 104 int num, rem; 105 int i; 106 107 KASSERT(len >= 0, ("m_getm(): len is < 0")); 108 109 /* If m != NULL, we will append to the end of that chain. */ 110 if (m != NULL) 111 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next); 112 else 113 mtail = NULL; 114 115 /* 116 * Calculate how many mbufs+clusters ("packets") we need and how much 117 * leftover there is after that and allocate the first mbuf+cluster 118 * if required. 119 */ 120 num = len / MCLBYTES; 121 rem = len % MCLBYTES; 122 top = cur = NULL; 123 if (num > 0) { 124 if ((top = cur = m_getcl(how, type, 0)) == NULL) 125 goto failed; 126 top->m_len = 0; 127 } 128 num--; 129 130 for (i = 0; i < num; i++) { 131 mb = m_getcl(how, type, 0); 132 if (mb == NULL) 133 goto failed; 134 mb->m_len = 0; 135 cur = (cur->m_next = mb); 136 } 137 if (rem > 0) { 138 mb = (rem > MINCLSIZE) ? 139 m_getcl(how, type, 0) : m_get(how, type); 140 if (mb == NULL) 141 goto failed; 142 mb->m_len = 0; 143 if (cur == NULL) 144 top = mb; 145 else 146 cur->m_next = mb; 147 } 148 149 if (mtail != NULL) 150 mtail->m_next = top; 151 return top; 152failed: 153 if (top != NULL) 154 m_freem(top); 155 return NULL; 156} 157 158/* 159 * Free an entire chain of mbufs and associated external buffers, if 160 * applicable. 161 */ 162void 163m_freem(struct mbuf *mb) 164{ 165 166 while (mb != NULL) 167 mb = m_free(mb); 168} 169 170/*- 171 * Configure a provided mbuf to refer to the provided external storage 172 * buffer and setup a reference count for said buffer. If the setting 173 * up of the reference count fails, the M_EXT bit will not be set. If 174 * successfull, the M_EXT bit is set in the mbuf's flags. 175 * 176 * Arguments: 177 * mb The existing mbuf to which to attach the provided buffer. 178 * buf The address of the provided external storage buffer. 179 * size The size of the provided buffer. 180 * freef A pointer to a routine that is responsible for freeing the 181 * provided external storage buffer. 182 * args A pointer to an argument structure (of any type) to be passed 183 * to the provided freef routine (may be NULL). 184 * flags Any other flags to be passed to the provided mbuf. 185 * type The type that the external storage buffer should be 186 * labeled with. 187 * 188 * Returns: 189 * Nothing. 190 */ 191void 192m_extadd(struct mbuf *mb, caddr_t buf, u_int size, 193 void (*freef)(void *, void *), void *args, int flags, int type) 194{ 195 u_int *ref_cnt = NULL; 196 197 /* XXX Shouldn't be adding EXT_CLUSTER with this API */ 198 if (type == EXT_CLUSTER) 199 ref_cnt = (u_int *)uma_find_refcnt(zone_clust, 200 mb->m_ext.ext_buf); 201 else if (type == EXT_EXTREF) 202 ref_cnt = mb->m_ext.ref_cnt; 203 mb->m_ext.ref_cnt = (ref_cnt == NULL) ? 204 malloc(sizeof(u_int), M_MBUF, M_NOWAIT) : (u_int *)ref_cnt; 205 if (mb->m_ext.ref_cnt != NULL) { 206 *(mb->m_ext.ref_cnt) = 1; 207 mb->m_flags |= (M_EXT | flags); 208 mb->m_ext.ext_buf = buf; 209 mb->m_data = mb->m_ext.ext_buf; 210 mb->m_ext.ext_size = size; 211 mb->m_ext.ext_free = freef; 212 mb->m_ext.ext_args = args; 213 mb->m_ext.ext_type = type; 214 } 215} 216 217/* 218 * Non-directly-exported function to clean up after mbufs with M_EXT 219 * storage attached to them if the reference count hits 0. 220 */ 221void 222mb_free_ext(struct mbuf *m) 223{ 224 u_int cnt; 225 int dofree; 226 227 /* Account for lazy ref count assign. */ 228 if (m->m_ext.ref_cnt == NULL) 229 dofree = 1; 230 else 231 dofree = 0; 232 233 /* 234 * This is tricky. We need to make sure to decrement the 235 * refcount in a safe way but to also clean up if we're the 236 * last reference. This method seems to do it without race. 237 */ 238 while (dofree == 0) { 239 cnt = *(m->m_ext.ref_cnt); 240 if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) { 241 if (cnt == 1) 242 dofree = 1; 243 break; 244 } 245 } 246 247 if (dofree) { 248 /* 249 * Do the free, should be safe. 250 */ 251 if (m->m_ext.ext_type == EXT_PACKET) { 252 uma_zfree(zone_pack, m); 253 return; 254 } else if (m->m_ext.ext_type == EXT_CLUSTER) { 255 uma_zfree(zone_clust, m->m_ext.ext_buf); 256 m->m_ext.ext_buf = NULL; 257 } else { 258 (*(m->m_ext.ext_free))(m->m_ext.ext_buf, 259 m->m_ext.ext_args); 260 if (m->m_ext.ext_type != EXT_EXTREF) { 261 if (m->m_ext.ref_cnt != NULL) 262 free(m->m_ext.ref_cnt, M_MBUF); 263 m->m_ext.ref_cnt = NULL; 264 } 265 m->m_ext.ext_buf = NULL; 266 } 267 } 268 uma_zfree(zone_mbuf, m); 269} 270 271/* 272 * "Move" mbuf pkthdr from "from" to "to". 273 * "from" must have M_PKTHDR set, and "to" must be empty. 274 */ 275void 276m_move_pkthdr(struct mbuf *to, struct mbuf *from) 277{ 278 279#if 0 280 /* see below for why these are not enabled */ 281 M_ASSERTPKTHDR(to); 282 /* Note: with MAC, this may not be a good assertion. */ 283 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), 284 ("m_move_pkthdr: to has tags")); 285#endif 286 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster")); 287#ifdef MAC 288 /* 289 * XXXMAC: It could be this should also occur for non-MAC? 290 */ 291 if (to->m_flags & M_PKTHDR) 292 m_tag_delete_chain(to, NULL); 293#endif 294 to->m_flags = from->m_flags & M_COPYFLAGS; 295 to->m_data = to->m_pktdat; 296 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 297 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 298 from->m_flags &= ~M_PKTHDR; 299} 300 301/* 302 * Duplicate "from"'s mbuf pkthdr in "to". 303 * "from" must have M_PKTHDR set, and "to" must be empty. 304 * In particular, this does a deep copy of the packet tags. 305 */ 306int 307m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how) 308{ 309 310#if 0 311 /* 312 * The mbuf allocator only initializes the pkthdr 313 * when the mbuf is allocated with MGETHDR. Many users 314 * (e.g. m_copy*, m_prepend) use MGET and then 315 * smash the pkthdr as needed causing these 316 * assertions to trip. For now just disable them. 317 */ 318 M_ASSERTPKTHDR(to); 319 /* Note: with MAC, this may not be a good assertion. */ 320 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags")); 321#endif 322 MBUF_CHECKSLEEP(how); 323#ifdef MAC 324 if (to->m_flags & M_PKTHDR) 325 m_tag_delete_chain(to, NULL); 326#endif 327 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 328 if ((to->m_flags & M_EXT) == 0) 329 to->m_data = to->m_pktdat; 330 to->m_pkthdr = from->m_pkthdr; 331 SLIST_INIT(&to->m_pkthdr.tags); 332 return (m_tag_copy_chain(to, from, MBTOM(how))); 333} 334 335/* 336 * Lesser-used path for M_PREPEND: 337 * allocate new mbuf to prepend to chain, 338 * copy junk along. 339 */ 340struct mbuf * 341m_prepend(struct mbuf *m, int len, int how) 342{ 343 struct mbuf *mn; 344 345 if (m->m_flags & M_PKTHDR) 346 MGETHDR(mn, how, m->m_type); 347 else 348 MGET(mn, how, m->m_type); 349 if (mn == NULL) { 350 m_freem(m); 351 return (NULL); 352 } 353 if (m->m_flags & M_PKTHDR) 354 M_MOVE_PKTHDR(mn, m); 355 mn->m_next = m; 356 m = mn; 357 if (len < MHLEN) 358 MH_ALIGN(m, len); 359 m->m_len = len; 360 return (m); 361} 362 363/* 364 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 365 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 366 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 367 * Note that the copy is read-only, because clusters are not copied, 368 * only their reference counts are incremented. 369 */ 370struct mbuf * 371m_copym(struct mbuf *m, int off0, int len, int wait) 372{ 373 struct mbuf *n, **np; 374 int off = off0; 375 struct mbuf *top; 376 int copyhdr = 0; 377 378 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 379 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 380 MBUF_CHECKSLEEP(wait); 381 if (off == 0 && m->m_flags & M_PKTHDR) 382 copyhdr = 1; 383 while (off > 0) { 384 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 385 if (off < m->m_len) 386 break; 387 off -= m->m_len; 388 m = m->m_next; 389 } 390 np = ⊤ 391 top = 0; 392 while (len > 0) { 393 if (m == NULL) { 394 KASSERT(len == M_COPYALL, 395 ("m_copym, length > size of mbuf chain")); 396 break; 397 } 398 if (copyhdr) 399 MGETHDR(n, wait, m->m_type); 400 else 401 MGET(n, wait, m->m_type); 402 *np = n; 403 if (n == NULL) 404 goto nospace; 405 if (copyhdr) { 406 if (!m_dup_pkthdr(n, m, wait)) 407 goto nospace; 408 if (len == M_COPYALL) 409 n->m_pkthdr.len -= off0; 410 else 411 n->m_pkthdr.len = len; 412 copyhdr = 0; 413 } 414 n->m_len = min(len, m->m_len - off); 415 if (m->m_flags & M_EXT) { 416 n->m_data = m->m_data + off; 417 n->m_ext = m->m_ext; 418 n->m_flags |= M_EXT; 419 MEXT_ADD_REF(m); 420 n->m_ext.ref_cnt = m->m_ext.ref_cnt; 421 } else 422 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 423 (u_int)n->m_len); 424 if (len != M_COPYALL) 425 len -= n->m_len; 426 off = 0; 427 m = m->m_next; 428 np = &n->m_next; 429 } 430 if (top == NULL) 431 mbstat.m_mcfail++; /* XXX: No consistency. */ 432 433 return (top); 434nospace: 435 m_freem(top); 436 mbstat.m_mcfail++; /* XXX: No consistency. */ 437 return (NULL); 438} 439 440/* 441 * Copy an entire packet, including header (which must be present). 442 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 443 * Note that the copy is read-only, because clusters are not copied, 444 * only their reference counts are incremented. 445 * Preserve alignment of the first mbuf so if the creator has left 446 * some room at the beginning (e.g. for inserting protocol headers) 447 * the copies still have the room available. 448 */ 449struct mbuf * 450m_copypacket(struct mbuf *m, int how) 451{ 452 struct mbuf *top, *n, *o; 453 454 MBUF_CHECKSLEEP(how); 455 MGET(n, how, m->m_type); 456 top = n; 457 if (n == NULL) 458 goto nospace; 459 460 if (!m_dup_pkthdr(n, m, how)) 461 goto nospace; 462 n->m_len = m->m_len; 463 if (m->m_flags & M_EXT) { 464 n->m_data = m->m_data; 465 n->m_ext = m->m_ext; 466 n->m_flags |= M_EXT; 467 MEXT_ADD_REF(m); 468 n->m_ext.ref_cnt = m->m_ext.ref_cnt; 469 } else { 470 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 471 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 472 } 473 474 m = m->m_next; 475 while (m) { 476 MGET(o, how, m->m_type); 477 if (o == NULL) 478 goto nospace; 479 480 n->m_next = o; 481 n = n->m_next; 482 483 n->m_len = m->m_len; 484 if (m->m_flags & M_EXT) { 485 n->m_data = m->m_data; 486 n->m_ext = m->m_ext; 487 n->m_flags |= M_EXT; 488 MEXT_ADD_REF(m); 489 n->m_ext.ref_cnt = m->m_ext.ref_cnt; 490 } else { 491 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 492 } 493 494 m = m->m_next; 495 } 496 return top; 497nospace: 498 m_freem(top); 499 mbstat.m_mcfail++; /* XXX: No consistency. */ 500 return (NULL); 501} 502 503/* 504 * Copy data from an mbuf chain starting "off" bytes from the beginning, 505 * continuing for "len" bytes, into the indicated buffer. 506 */ 507void 508m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 509{ 510 u_int count; 511 512 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 513 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 514 while (off > 0) { 515 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 516 if (off < m->m_len) 517 break; 518 off -= m->m_len; 519 m = m->m_next; 520 } 521 while (len > 0) { 522 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 523 count = min(m->m_len - off, len); 524 bcopy(mtod(m, caddr_t) + off, cp, count); 525 len -= count; 526 cp += count; 527 off = 0; 528 m = m->m_next; 529 } 530} 531 532/* 533 * Copy a packet header mbuf chain into a completely new chain, including 534 * copying any mbuf clusters. Use this instead of m_copypacket() when 535 * you need a writable copy of an mbuf chain. 536 */ 537struct mbuf * 538m_dup(struct mbuf *m, int how) 539{ 540 struct mbuf **p, *top = NULL; 541 int remain, moff, nsize; 542 543 MBUF_CHECKSLEEP(how); 544 /* Sanity check */ 545 if (m == NULL) 546 return (NULL); 547 M_ASSERTPKTHDR(m); 548 549 /* While there's more data, get a new mbuf, tack it on, and fill it */ 550 remain = m->m_pkthdr.len; 551 moff = 0; 552 p = ⊤ 553 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 554 struct mbuf *n; 555 556 /* Get the next new mbuf */ 557 if (remain >= MINCLSIZE) { 558 n = m_getcl(how, m->m_type, 0); 559 nsize = MCLBYTES; 560 } else { 561 n = m_get(how, m->m_type); 562 nsize = MLEN; 563 } 564 if (n == NULL) 565 goto nospace; 566 567 if (top == NULL) { /* First one, must be PKTHDR */ 568 if (!m_dup_pkthdr(n, m, how)) { 569 m_free(n); 570 goto nospace; 571 } 572 nsize = MHLEN; 573 } 574 n->m_len = 0; 575 576 /* Link it into the new chain */ 577 *p = n; 578 p = &n->m_next; 579 580 /* Copy data from original mbuf(s) into new mbuf */ 581 while (n->m_len < nsize && m != NULL) { 582 int chunk = min(nsize - n->m_len, m->m_len - moff); 583 584 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 585 moff += chunk; 586 n->m_len += chunk; 587 remain -= chunk; 588 if (moff == m->m_len) { 589 m = m->m_next; 590 moff = 0; 591 } 592 } 593 594 /* Check correct total mbuf length */ 595 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 596 ("%s: bogus m_pkthdr.len", __func__)); 597 } 598 return (top); 599 600nospace: 601 m_freem(top); 602 mbstat.m_mcfail++; /* XXX: No consistency. */ 603 return (NULL); 604} 605 606/* 607 * Concatenate mbuf chain n to m. 608 * Both chains must be of the same type (e.g. MT_DATA). 609 * Any m_pkthdr is not updated. 610 */ 611void 612m_cat(struct mbuf *m, struct mbuf *n) 613{ 614 while (m->m_next) 615 m = m->m_next; 616 while (n) { 617 if (m->m_flags & M_EXT || 618 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 619 /* just join the two chains */ 620 m->m_next = n; 621 return; 622 } 623 /* splat the data from one into the other */ 624 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 625 (u_int)n->m_len); 626 m->m_len += n->m_len; 627 n = m_free(n); 628 } 629} 630 631void 632m_adj(struct mbuf *mp, int req_len) 633{ 634 int len = req_len; 635 struct mbuf *m; 636 int count; 637 638 if ((m = mp) == NULL) 639 return; 640 if (len >= 0) { 641 /* 642 * Trim from head. 643 */ 644 while (m != NULL && len > 0) { 645 if (m->m_len <= len) { 646 len -= m->m_len; 647 m->m_len = 0; 648 m = m->m_next; 649 } else { 650 m->m_len -= len; 651 m->m_data += len; 652 len = 0; 653 } 654 } 655 m = mp; 656 if (mp->m_flags & M_PKTHDR) 657 m->m_pkthdr.len -= (req_len - len); 658 } else { 659 /* 660 * Trim from tail. Scan the mbuf chain, 661 * calculating its length and finding the last mbuf. 662 * If the adjustment only affects this mbuf, then just 663 * adjust and return. Otherwise, rescan and truncate 664 * after the remaining size. 665 */ 666 len = -len; 667 count = 0; 668 for (;;) { 669 count += m->m_len; 670 if (m->m_next == (struct mbuf *)0) 671 break; 672 m = m->m_next; 673 } 674 if (m->m_len >= len) { 675 m->m_len -= len; 676 if (mp->m_flags & M_PKTHDR) 677 mp->m_pkthdr.len -= len; 678 return; 679 } 680 count -= len; 681 if (count < 0) 682 count = 0; 683 /* 684 * Correct length for chain is "count". 685 * Find the mbuf with last data, adjust its length, 686 * and toss data from remaining mbufs on chain. 687 */ 688 m = mp; 689 if (m->m_flags & M_PKTHDR) 690 m->m_pkthdr.len = count; 691 for (; m; m = m->m_next) { 692 if (m->m_len >= count) { 693 m->m_len = count; 694 if (m->m_next != NULL) { 695 m_freem(m->m_next); 696 m->m_next = NULL; 697 } 698 break; 699 } 700 count -= m->m_len; 701 } 702 } 703} 704 705/* 706 * Rearange an mbuf chain so that len bytes are contiguous 707 * and in the data area of an mbuf (so that mtod and dtom 708 * will work for a structure of size len). Returns the resulting 709 * mbuf chain on success, frees it and returns null on failure. 710 * If there is room, it will add up to max_protohdr-len extra bytes to the 711 * contiguous region in an attempt to avoid being called next time. 712 */ 713struct mbuf * 714m_pullup(struct mbuf *n, int len) 715{ 716 struct mbuf *m; 717 int count; 718 int space; 719 720 /* 721 * If first mbuf has no cluster, and has room for len bytes 722 * without shifting current data, pullup into it, 723 * otherwise allocate a new mbuf to prepend to the chain. 724 */ 725 if ((n->m_flags & M_EXT) == 0 && 726 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 727 if (n->m_len >= len) 728 return (n); 729 m = n; 730 n = n->m_next; 731 len -= m->m_len; 732 } else { 733 if (len > MHLEN) 734 goto bad; 735 MGET(m, M_DONTWAIT, n->m_type); 736 if (m == NULL) 737 goto bad; 738 m->m_len = 0; 739 if (n->m_flags & M_PKTHDR) 740 M_MOVE_PKTHDR(m, n); 741 } 742 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 743 do { 744 count = min(min(max(len, max_protohdr), space), n->m_len); 745 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 746 (u_int)count); 747 len -= count; 748 m->m_len += count; 749 n->m_len -= count; 750 space -= count; 751 if (n->m_len) 752 n->m_data += count; 753 else 754 n = m_free(n); 755 } while (len > 0 && n); 756 if (len > 0) { 757 (void) m_free(m); 758 goto bad; 759 } 760 m->m_next = n; 761 return (m); 762bad: 763 m_freem(n); 764 mbstat.m_mpfail++; /* XXX: No consistency. */ 765 return (NULL); 766} 767 768/* 769 * Partition an mbuf chain in two pieces, returning the tail -- 770 * all but the first len0 bytes. In case of failure, it returns NULL and 771 * attempts to restore the chain to its original state. 772 * 773 * Note that the resulting mbufs might be read-only, because the new 774 * mbuf can end up sharing an mbuf cluster with the original mbuf if 775 * the "breaking point" happens to lie within a cluster mbuf. Use the 776 * M_WRITABLE() macro to check for this case. 777 */ 778struct mbuf * 779m_split(struct mbuf *m0, int len0, int wait) 780{ 781 struct mbuf *m, *n; 782 u_int len = len0, remain; 783 784 MBUF_CHECKSLEEP(wait); 785 for (m = m0; m && len > m->m_len; m = m->m_next) 786 len -= m->m_len; 787 if (m == NULL) 788 return (NULL); 789 remain = m->m_len - len; 790 if (m0->m_flags & M_PKTHDR) { 791 MGETHDR(n, wait, m0->m_type); 792 if (n == NULL) 793 return (NULL); 794 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 795 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 796 m0->m_pkthdr.len = len0; 797 if (m->m_flags & M_EXT) 798 goto extpacket; 799 if (remain > MHLEN) { 800 /* m can't be the lead packet */ 801 MH_ALIGN(n, 0); 802 n->m_next = m_split(m, len, wait); 803 if (n->m_next == NULL) { 804 (void) m_free(n); 805 return (NULL); 806 } else { 807 n->m_len = 0; 808 return (n); 809 } 810 } else 811 MH_ALIGN(n, remain); 812 } else if (remain == 0) { 813 n = m->m_next; 814 m->m_next = NULL; 815 return (n); 816 } else { 817 MGET(n, wait, m->m_type); 818 if (n == NULL) 819 return (NULL); 820 M_ALIGN(n, remain); 821 } 822extpacket: 823 if (m->m_flags & M_EXT) { 824 n->m_flags |= M_EXT; 825 n->m_ext = m->m_ext; 826 MEXT_ADD_REF(m); 827 n->m_ext.ref_cnt = m->m_ext.ref_cnt; 828 n->m_data = m->m_data + len; 829 } else { 830 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 831 } 832 n->m_len = remain; 833 m->m_len = len; 834 n->m_next = m->m_next; 835 m->m_next = NULL; 836 return (n); 837} 838/* 839 * Routine to copy from device local memory into mbufs. 840 * Note that `off' argument is offset into first mbuf of target chain from 841 * which to begin copying the data to. 842 */ 843struct mbuf * 844m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 845 void (*copy)(char *from, caddr_t to, u_int len)) 846{ 847 struct mbuf *m; 848 struct mbuf *top = NULL, **mp = ⊤ 849 int len; 850 851 if (off < 0 || off > MHLEN) 852 return (NULL); 853 854 while (totlen > 0) { 855 if (top == NULL) { /* First one, must be PKTHDR */ 856 if (totlen + off >= MINCLSIZE) { 857 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 858 len = MCLBYTES; 859 } else { 860 m = m_gethdr(M_DONTWAIT, MT_DATA); 861 len = MHLEN; 862 863 /* Place initial small packet/header at end of mbuf */ 864 if (m && totlen + off + max_linkhdr <= MLEN) { 865 m->m_data += max_linkhdr; 866 len -= max_linkhdr; 867 } 868 } 869 if (m == NULL) 870 return NULL; 871 m->m_pkthdr.rcvif = ifp; 872 m->m_pkthdr.len = totlen; 873 } else { 874 if (totlen + off >= MINCLSIZE) { 875 m = m_getcl(M_DONTWAIT, MT_DATA, 0); 876 len = MCLBYTES; 877 } else { 878 m = m_get(M_DONTWAIT, MT_DATA); 879 len = MLEN; 880 } 881 if (m == NULL) { 882 m_freem(top); 883 return NULL; 884 } 885 } 886 if (off) { 887 m->m_data += off; 888 len -= off; 889 off = 0; 890 } 891 m->m_len = len = min(totlen, len); 892 if (copy) 893 copy(buf, mtod(m, caddr_t), (u_int)len); 894 else 895 bcopy(buf, mtod(m, caddr_t), (u_int)len); 896 buf += len; 897 *mp = m; 898 mp = &m->m_next; 899 totlen -= len; 900 } 901 return (top); 902} 903 904/* 905 * Copy data from a buffer back into the indicated mbuf chain, 906 * starting "off" bytes from the beginning, extending the mbuf 907 * chain if necessary. 908 */ 909void 910m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp) 911{ 912 int mlen; 913 struct mbuf *m = m0, *n; 914 int totlen = 0; 915 916 if (m0 == NULL) 917 return; 918 while (off > (mlen = m->m_len)) { 919 off -= mlen; 920 totlen += mlen; 921 if (m->m_next == NULL) { 922 n = m_get(M_DONTWAIT, m->m_type); 923 if (n == NULL) 924 goto out; 925 bzero(mtod(n, caddr_t), MLEN); 926 n->m_len = min(MLEN, len + off); 927 m->m_next = n; 928 } 929 m = m->m_next; 930 } 931 while (len > 0) { 932 mlen = min (m->m_len - off, len); 933 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); 934 cp += mlen; 935 len -= mlen; 936 mlen += off; 937 off = 0; 938 totlen += mlen; 939 if (len == 0) 940 break; 941 if (m->m_next == NULL) { 942 n = m_get(M_DONTWAIT, m->m_type); 943 if (n == NULL) 944 break; 945 n->m_len = min(MLEN, len); 946 m->m_next = n; 947 } 948 m = m->m_next; 949 } 950out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 951 m->m_pkthdr.len = totlen; 952} 953 954/* 955 * Append the specified data to the indicated mbuf chain, 956 * Extend the mbuf chain if the new data does not fit in 957 * existing space. 958 * 959 * Return 1 if able to complete the job; otherwise 0. 960 */ 961int 962m_append(struct mbuf *m0, int len, c_caddr_t cp) 963{ 964 struct mbuf *m, *n; 965 int remainder, space; 966 967 for (m = m0; m->m_next != NULL; m = m->m_next) 968 ; 969 remainder = len; 970 space = M_TRAILINGSPACE(m); 971 if (space > 0) { 972 /* 973 * Copy into available space. 974 */ 975 if (space > remainder) 976 space = remainder; 977 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 978 m->m_len += space; 979 cp += space, remainder -= space; 980 } 981 while (remainder > 0) { 982 /* 983 * Allocate a new mbuf; could check space 984 * and allocate a cluster instead. 985 */ 986 n = m_get(M_DONTWAIT, m->m_type); 987 if (n == NULL) 988 break; 989 n->m_len = min(MLEN, remainder); 990 bcopy(cp, mtod(n, caddr_t), n->m_len); 991 cp += n->m_len, remainder -= n->m_len; 992 m->m_next = n; 993 m = n; 994 } 995 if (m0->m_flags & M_PKTHDR) 996 m0->m_pkthdr.len += len - remainder; 997 return (remainder == 0); 998} 999 1000/* 1001 * Apply function f to the data in an mbuf chain starting "off" bytes from 1002 * the beginning, continuing for "len" bytes. 1003 */ 1004int 1005m_apply(struct mbuf *m, int off, int len, 1006 int (*f)(void *, void *, u_int), void *arg) 1007{ 1008 u_int count; 1009 int rval; 1010 1011 KASSERT(off >= 0, ("m_apply, negative off %d", off)); 1012 KASSERT(len >= 0, ("m_apply, negative len %d", len)); 1013 while (off > 0) { 1014 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1015 if (off < m->m_len) 1016 break; 1017 off -= m->m_len; 1018 m = m->m_next; 1019 } 1020 while (len > 0) { 1021 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1022 count = min(m->m_len - off, len); 1023 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 1024 if (rval) 1025 return (rval); 1026 len -= count; 1027 off = 0; 1028 m = m->m_next; 1029 } 1030 return (0); 1031} 1032 1033/* 1034 * Return a pointer to mbuf/offset of location in mbuf chain. 1035 */ 1036struct mbuf * 1037m_getptr(struct mbuf *m, int loc, int *off) 1038{ 1039 1040 while (loc >= 0) { 1041 /* Normal end of search. */ 1042 if (m->m_len > loc) { 1043 *off = loc; 1044 return (m); 1045 } else { 1046 loc -= m->m_len; 1047 if (m->m_next == NULL) { 1048 if (loc == 0) { 1049 /* Point at the end of valid data. */ 1050 *off = m->m_len; 1051 return (m); 1052 } 1053 return (NULL); 1054 } 1055 m = m->m_next; 1056 } 1057 } 1058 return (NULL); 1059} 1060 1061void 1062m_print(const struct mbuf *m, int maxlen) 1063{ 1064 int len; 1065 int pdata; 1066 const struct mbuf *m2; 1067 1068 if (m->m_flags & M_PKTHDR) 1069 len = m->m_pkthdr.len; 1070 else 1071 len = -1; 1072 m2 = m; 1073 while (m2 != NULL && (len == -1 || len)) { 1074 pdata = m2->m_len; 1075 if (maxlen != -1 && pdata > maxlen) 1076 pdata = maxlen; 1077 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len, 1078 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw" 1079 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly" 1080 "\3eor\2pkthdr\1ext", pdata ? "" : "\n"); 1081 if (pdata) 1082 printf(", %*D\n", m2->m_len, (u_char *)m2->m_data, "-"); 1083 if (len != -1) 1084 len -= m2->m_len; 1085 m2 = m2->m_next; 1086 } 1087 if (len > 0) 1088 printf("%d bytes unaccounted for.\n", len); 1089 return; 1090} 1091 1092u_int 1093m_fixhdr(struct mbuf *m0) 1094{ 1095 u_int len; 1096 1097 len = m_length(m0, NULL); 1098 m0->m_pkthdr.len = len; 1099 return (len); 1100} 1101 1102u_int 1103m_length(struct mbuf *m0, struct mbuf **last) 1104{ 1105 struct mbuf *m; 1106 u_int len; 1107 1108 len = 0; 1109 for (m = m0; m != NULL; m = m->m_next) { 1110 len += m->m_len; 1111 if (m->m_next == NULL) 1112 break; 1113 } 1114 if (last != NULL) 1115 *last = m; 1116 return (len); 1117} 1118 1119/* 1120 * Defragment a mbuf chain, returning the shortest possible 1121 * chain of mbufs and clusters. If allocation fails and 1122 * this cannot be completed, NULL will be returned, but 1123 * the passed in chain will be unchanged. Upon success, 1124 * the original chain will be freed, and the new chain 1125 * will be returned. 1126 * 1127 * If a non-packet header is passed in, the original 1128 * mbuf (chain?) will be returned unharmed. 1129 */ 1130struct mbuf * 1131m_defrag(struct mbuf *m0, int how) 1132{ 1133 struct mbuf *m_new = NULL, *m_final = NULL; 1134 int progress = 0, length; 1135 1136 MBUF_CHECKSLEEP(how); 1137 if (!(m0->m_flags & M_PKTHDR)) 1138 return (m0); 1139 1140 m_fixhdr(m0); /* Needed sanity check */ 1141 1142#ifdef MBUF_STRESS_TEST 1143 if (m_defragrandomfailures) { 1144 int temp = arc4random() & 0xff; 1145 if (temp == 0xba) 1146 goto nospace; 1147 } 1148#endif 1149 1150 if (m0->m_pkthdr.len > MHLEN) 1151 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1152 else 1153 m_final = m_gethdr(how, MT_DATA); 1154 1155 if (m_final == NULL) 1156 goto nospace; 1157 1158 if (m_dup_pkthdr(m_final, m0, how) == 0) 1159 goto nospace; 1160 1161 m_new = m_final; 1162 1163 while (progress < m0->m_pkthdr.len) { 1164 length = m0->m_pkthdr.len - progress; 1165 if (length > MCLBYTES) 1166 length = MCLBYTES; 1167 1168 if (m_new == NULL) { 1169 if (length > MLEN) 1170 m_new = m_getcl(how, MT_DATA, 0); 1171 else 1172 m_new = m_get(how, MT_DATA); 1173 if (m_new == NULL) 1174 goto nospace; 1175 } 1176 1177 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 1178 progress += length; 1179 m_new->m_len = length; 1180 if (m_new != m_final) 1181 m_cat(m_final, m_new); 1182 m_new = NULL; 1183 } 1184#ifdef MBUF_STRESS_TEST 1185 if (m0->m_next == NULL) 1186 m_defraguseless++; 1187#endif 1188 m_freem(m0); 1189 m0 = m_final; 1190#ifdef MBUF_STRESS_TEST 1191 m_defragpackets++; 1192 m_defragbytes += m0->m_pkthdr.len; 1193#endif 1194 return (m0); 1195nospace: 1196#ifdef MBUF_STRESS_TEST 1197 m_defragfailure++; 1198#endif 1199 if (m_final) 1200 m_freem(m_final); 1201 return (NULL); 1202} 1203 1204#ifdef MBUF_STRESS_TEST 1205 1206/* 1207 * Fragment an mbuf chain. There's no reason you'd ever want to do 1208 * this in normal usage, but it's great for stress testing various 1209 * mbuf consumers. 1210 * 1211 * If fragmentation is not possible, the original chain will be 1212 * returned. 1213 * 1214 * Possible length values: 1215 * 0 no fragmentation will occur 1216 * > 0 each fragment will be of the specified length 1217 * -1 each fragment will be the same random value in length 1218 * -2 each fragment's length will be entirely random 1219 * (Random values range from 1 to 256) 1220 */ 1221struct mbuf * 1222m_fragment(struct mbuf *m0, int how, int length) 1223{ 1224 struct mbuf *m_new = NULL, *m_final = NULL; 1225 int progress = 0; 1226 1227 if (!(m0->m_flags & M_PKTHDR)) 1228 return (m0); 1229 1230 if ((length == 0) || (length < -2)) 1231 return (m0); 1232 1233 m_fixhdr(m0); /* Needed sanity check */ 1234 1235 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1236 1237 if (m_final == NULL) 1238 goto nospace; 1239 1240 if (m_dup_pkthdr(m_final, m0, how) == 0) 1241 goto nospace; 1242 1243 m_new = m_final; 1244 1245 if (length == -1) 1246 length = 1 + (arc4random() & 255); 1247 1248 while (progress < m0->m_pkthdr.len) { 1249 int fraglen; 1250 1251 if (length > 0) 1252 fraglen = length; 1253 else 1254 fraglen = 1 + (arc4random() & 255); 1255 if (fraglen > m0->m_pkthdr.len - progress) 1256 fraglen = m0->m_pkthdr.len - progress; 1257 1258 if (fraglen > MCLBYTES) 1259 fraglen = MCLBYTES; 1260 1261 if (m_new == NULL) { 1262 m_new = m_getcl(how, MT_DATA, 0); 1263 if (m_new == NULL) 1264 goto nospace; 1265 } 1266 1267 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t)); 1268 progress += fraglen; 1269 m_new->m_len = fraglen; 1270 if (m_new != m_final) 1271 m_cat(m_final, m_new); 1272 m_new = NULL; 1273 } 1274 m_freem(m0); 1275 m0 = m_final; 1276 return (m0); 1277nospace: 1278 if (m_final) 1279 m_freem(m_final); 1280 /* Return the original chain on failure */ 1281 return (m0); 1282} 1283 1284#endif 1285 1286struct mbuf * 1287m_uiotombuf(struct uio *uio, int how, int len) 1288{ 1289 struct mbuf *m_new = NULL, *m_final = NULL; 1290 int progress = 0, error = 0, length, total; 1291 1292 if (len > 0) 1293 total = min(uio->uio_resid, len); 1294 else 1295 total = uio->uio_resid; 1296 if (total > MHLEN) 1297 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1298 else 1299 m_final = m_gethdr(how, MT_DATA); 1300 if (m_final == NULL) 1301 goto nospace; 1302 m_new = m_final; 1303 while (progress < total) { 1304 length = total - progress; 1305 if (length > MCLBYTES) 1306 length = MCLBYTES; 1307 if (m_new == NULL) { 1308 if (length > MLEN) 1309 m_new = m_getcl(how, MT_DATA, 0); 1310 else 1311 m_new = m_get(how, MT_DATA); 1312 if (m_new == NULL) 1313 goto nospace; 1314 } 1315 error = uiomove(mtod(m_new, void *), length, uio); 1316 if (error) 1317 goto nospace; 1318 progress += length; 1319 m_new->m_len = length; 1320 if (m_new != m_final) 1321 m_cat(m_final, m_new); 1322 m_new = NULL; 1323 } 1324 m_fixhdr(m_final); 1325 return (m_final); 1326nospace: 1327 if (m_new) 1328 m_free(m_new); 1329 if (m_final) 1330 m_freem(m_final); 1331 return (NULL); 1332} 1333