uipc_mbuf.c revision 15736
1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $Id: uipc_mbuf.c,v 1.21 1996/05/10 19:28:48 wollman Exp $ 35 */ 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/proc.h> 40#include <sys/malloc.h> 41#define MBTYPES 42#include <sys/mbuf.h> 43#include <sys/kernel.h> 44#include <sys/syslog.h> 45#include <sys/domain.h> 46#include <sys/protosw.h> 47 48#include <vm/vm.h> 49#include <vm/vm_param.h> 50#include <vm/vm_kern.h> 51#include <vm/vm_extern.h> 52 53static void mbinit __P((void *)); 54SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 55 56struct mbuf *mbutl; 57char *mclrefcnt; 58struct mbstat mbstat; 59struct mbuf *mmbfree; 60union mcluster *mclfree; 61int max_linkhdr; 62int max_protohdr; 63int max_hdr; 64int max_datalen; 65 66static void m_reclaim __P((void)); 67 68/* "number of clusters of pages" */ 69#define NCL_INIT 1 70 71/* ARGSUSED*/ 72static void 73mbinit(dummy) 74 void *dummy; 75{ 76 int s; 77 78 mmbfree = NULL; mclfree = NULL; 79 s = splimp(); 80 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 81 goto bad; 82 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 83 goto bad; 84 splx(s); 85 return; 86bad: 87 panic("mbinit"); 88} 89 90/* 91 * Allocate at least nmb mbufs and place on mbuf free list. 92 * Must be called at splimp. 93 */ 94/* ARGSUSED */ 95int 96m_mballoc(nmb, nowait) 97 register int nmb; 98 int nowait; 99{ 100 register caddr_t p; 101 register int i; 102 int nbytes; 103 104 /* Once we run out of map space, it will be impossible to get 105 * any more (nothing is ever freed back to the map) (XXX which 106 * is dumb). (however you are not dead as m_reclaim might 107 * still be able to free a substantial amount of space). 108 */ 109 if (mb_map_full) 110 return (0); 111 112 nbytes = round_page(nmb * MSIZE); 113 p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK); 114 /* 115 * Either the map is now full, or this is nowait and there 116 * are no pages left. 117 */ 118 if (p == NULL) 119 return (0); 120 121 nmb = nbytes / MSIZE; 122 for (i = 0; i < nmb; i++) { 123 ((struct mbuf *)p)->m_next = mmbfree; 124 mmbfree = (struct mbuf *)p; 125 p += MSIZE; 126 } 127 mbstat.m_mbufs += nmb; 128 return (1); 129} 130 131/* 132 * Allocate some number of mbuf clusters 133 * and place on cluster free list. 134 * Must be called at splimp. 135 */ 136/* ARGSUSED */ 137int 138m_clalloc(ncl, nowait) 139 register int ncl; 140 int nowait; 141{ 142 register caddr_t p; 143 register int i; 144 int npg; 145 146 /* 147 * Once we run out of map space, it will be impossible 148 * to get any more (nothing is ever freed back to the 149 * map). 150 */ 151 if (mcl_map_full) 152 return (0); 153 154 npg = ncl; 155 p = (caddr_t)kmem_malloc(mcl_map, ctob(npg), 156 nowait ? M_NOWAIT : M_WAITOK); 157 /* 158 * Either the map is now full, or this is nowait and there 159 * are no pages left. 160 */ 161 if (p == NULL) 162 return (0); 163 164 ncl = ncl * PAGE_SIZE / MCLBYTES; 165 for (i = 0; i < ncl; i++) { 166 ((union mcluster *)p)->mcl_next = mclfree; 167 mclfree = (union mcluster *)p; 168 p += MCLBYTES; 169 mbstat.m_clfree++; 170 } 171 mbstat.m_clusters += ncl; 172 return (1); 173} 174 175/* 176 * When MGET failes, ask protocols to free space when short of memory, 177 * then re-attempt to allocate an mbuf. 178 */ 179struct mbuf * 180m_retry(i, t) 181 int i, t; 182{ 183 register struct mbuf *m; 184 185 m_reclaim(); 186#define m_retry(i, t) (struct mbuf *)0 187 MGET(m, i, t); 188#undef m_retry 189 if (m != NULL) 190 mbstat.m_wait++; 191 else 192 mbstat.m_drops++; 193 return (m); 194} 195 196/* 197 * As above; retry an MGETHDR. 198 */ 199struct mbuf * 200m_retryhdr(i, t) 201 int i, t; 202{ 203 register struct mbuf *m; 204 205 m_reclaim(); 206#define m_retryhdr(i, t) (struct mbuf *)0 207 MGETHDR(m, i, t); 208#undef m_retryhdr 209 if (m != NULL) 210 mbstat.m_wait++; 211 else 212 mbstat.m_drops++; 213 return (m); 214} 215 216static void 217m_reclaim() 218{ 219 register struct domain *dp; 220 register struct protosw *pr; 221 int s = splimp(); 222 223 for (dp = domains; dp; dp = dp->dom_next) 224 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 225 if (pr->pr_drain) 226 (*pr->pr_drain)(); 227 splx(s); 228 mbstat.m_drain++; 229} 230 231/* 232 * Space allocation routines. 233 * These are also available as macros 234 * for critical paths. 235 */ 236struct mbuf * 237m_get(nowait, type) 238 int nowait, type; 239{ 240 register struct mbuf *m; 241 242 MGET(m, nowait, type); 243 return (m); 244} 245 246struct mbuf * 247m_gethdr(nowait, type) 248 int nowait, type; 249{ 250 register struct mbuf *m; 251 252 MGETHDR(m, nowait, type); 253 return (m); 254} 255 256struct mbuf * 257m_getclr(nowait, type) 258 int nowait, type; 259{ 260 register struct mbuf *m; 261 262 MGET(m, nowait, type); 263 if (m == 0) 264 return (0); 265 bzero(mtod(m, caddr_t), MLEN); 266 return (m); 267} 268 269struct mbuf * 270m_free(m) 271 struct mbuf *m; 272{ 273 register struct mbuf *n; 274 275 MFREE(m, n); 276 return (n); 277} 278 279void 280m_freem(m) 281 register struct mbuf *m; 282{ 283 register struct mbuf *n; 284 285 if (m == NULL) 286 return; 287 do { 288 MFREE(m, n); 289 m = n; 290 } while (m); 291} 292 293/* 294 * Mbuffer utility routines. 295 */ 296 297/* 298 * Lesser-used path for M_PREPEND: 299 * allocate new mbuf to prepend to chain, 300 * copy junk along. 301 */ 302struct mbuf * 303m_prepend(m, len, how) 304 register struct mbuf *m; 305 int len, how; 306{ 307 struct mbuf *mn; 308 309 MGET(mn, how, m->m_type); 310 if (mn == (struct mbuf *)NULL) { 311 m_freem(m); 312 return ((struct mbuf *)NULL); 313 } 314 if (m->m_flags & M_PKTHDR) { 315 M_COPY_PKTHDR(mn, m); 316 m->m_flags &= ~M_PKTHDR; 317 } 318 mn->m_next = m; 319 m = mn; 320 if (len < MHLEN) 321 MH_ALIGN(m, len); 322 m->m_len = len; 323 return (m); 324} 325 326/* 327 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 328 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 329 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 330 */ 331static int MCFail; 332 333struct mbuf * 334m_copym(m, off0, len, wait) 335 register struct mbuf *m; 336 int off0, wait; 337 register int len; 338{ 339 register struct mbuf *n, **np; 340 register int off = off0; 341 struct mbuf *top; 342 int copyhdr = 0; 343 344 if (off < 0 || len < 0) 345 panic("m_copym"); 346 if (off == 0 && m->m_flags & M_PKTHDR) 347 copyhdr = 1; 348 while (off > 0) { 349 if (m == 0) 350 panic("m_copym"); 351 if (off < m->m_len) 352 break; 353 off -= m->m_len; 354 m = m->m_next; 355 } 356 np = ⊤ 357 top = 0; 358 while (len > 0) { 359 if (m == 0) { 360 if (len != M_COPYALL) 361 panic("m_copym"); 362 break; 363 } 364 MGET(n, wait, m->m_type); 365 *np = n; 366 if (n == 0) 367 goto nospace; 368 if (copyhdr) { 369 M_COPY_PKTHDR(n, m); 370 if (len == M_COPYALL) 371 n->m_pkthdr.len -= off0; 372 else 373 n->m_pkthdr.len = len; 374 copyhdr = 0; 375 } 376 n->m_len = min(len, m->m_len - off); 377 if (m->m_flags & M_EXT) { 378 n->m_data = m->m_data + off; 379 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 380 n->m_ext = m->m_ext; 381 n->m_flags |= M_EXT; 382 } else 383 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 384 (unsigned)n->m_len); 385 if (len != M_COPYALL) 386 len -= n->m_len; 387 off = 0; 388 m = m->m_next; 389 np = &n->m_next; 390 } 391 if (top == 0) 392 MCFail++; 393 return (top); 394nospace: 395 m_freem(top); 396 MCFail++; 397 return (0); 398} 399 400/* 401 * Copy an entire packet, including header (which must be present). 402 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 403 */ 404struct mbuf * 405m_copypacket(m, how) 406 struct mbuf *m; 407 int how; 408{ 409 struct mbuf *top, *n, *o; 410 411 MGET(n, how, m->m_type); 412 top = n; 413 if (!n) 414 goto nospace; 415 416 M_COPY_PKTHDR(n, m); 417 n->m_len = m->m_len; 418 if (m->m_flags & M_EXT) { 419 n->m_data = m->m_data; 420 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 421 n->m_ext = m->m_ext; 422 n->m_flags |= M_EXT; 423 } else { 424 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 425 } 426 427 m = m->m_next; 428 while (m) { 429 MGET(o, how, m->m_type); 430 if (!o) 431 goto nospace; 432 433 n->m_next = o; 434 n = n->m_next; 435 436 n->m_len = m->m_len; 437 if (m->m_flags & M_EXT) { 438 n->m_data = m->m_data; 439 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 440 n->m_ext = m->m_ext; 441 n->m_flags |= M_EXT; 442 } else { 443 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 444 } 445 446 m = m->m_next; 447 } 448 return top; 449nospace: 450 m_freem(top); 451 MCFail++; 452 return 0; 453} 454 455/* 456 * Copy data from an mbuf chain starting "off" bytes from the beginning, 457 * continuing for "len" bytes, into the indicated buffer. 458 */ 459void 460m_copydata(m, off, len, cp) 461 register struct mbuf *m; 462 register int off; 463 register int len; 464 caddr_t cp; 465{ 466 register unsigned count; 467 468 if (off < 0 || len < 0) 469 panic("m_copydata"); 470 while (off > 0) { 471 if (m == 0) 472 panic("m_copydata"); 473 if (off < m->m_len) 474 break; 475 off -= m->m_len; 476 m = m->m_next; 477 } 478 while (len > 0) { 479 if (m == 0) 480 panic("m_copydata"); 481 count = min(m->m_len - off, len); 482 bcopy(mtod(m, caddr_t) + off, cp, count); 483 len -= count; 484 cp += count; 485 off = 0; 486 m = m->m_next; 487 } 488} 489 490/* 491 * Concatenate mbuf chain n to m. 492 * Both chains must be of the same type (e.g. MT_DATA). 493 * Any m_pkthdr is not updated. 494 */ 495void 496m_cat(m, n) 497 register struct mbuf *m, *n; 498{ 499 while (m->m_next) 500 m = m->m_next; 501 while (n) { 502 if (m->m_flags & M_EXT || 503 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 504 /* just join the two chains */ 505 m->m_next = n; 506 return; 507 } 508 /* splat the data from one into the other */ 509 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 510 (u_int)n->m_len); 511 m->m_len += n->m_len; 512 n = m_free(n); 513 } 514} 515 516void 517m_adj(mp, req_len) 518 struct mbuf *mp; 519 int req_len; 520{ 521 register int len = req_len; 522 register struct mbuf *m; 523 register count; 524 525 if ((m = mp) == NULL) 526 return; 527 if (len >= 0) { 528 /* 529 * Trim from head. 530 */ 531 while (m != NULL && len > 0) { 532 if (m->m_len <= len) { 533 len -= m->m_len; 534 m->m_len = 0; 535 m = m->m_next; 536 } else { 537 m->m_len -= len; 538 m->m_data += len; 539 len = 0; 540 } 541 } 542 m = mp; 543 if (mp->m_flags & M_PKTHDR) 544 m->m_pkthdr.len -= (req_len - len); 545 } else { 546 /* 547 * Trim from tail. Scan the mbuf chain, 548 * calculating its length and finding the last mbuf. 549 * If the adjustment only affects this mbuf, then just 550 * adjust and return. Otherwise, rescan and truncate 551 * after the remaining size. 552 */ 553 len = -len; 554 count = 0; 555 for (;;) { 556 count += m->m_len; 557 if (m->m_next == (struct mbuf *)0) 558 break; 559 m = m->m_next; 560 } 561 if (m->m_len >= len) { 562 m->m_len -= len; 563 if (mp->m_flags & M_PKTHDR) 564 mp->m_pkthdr.len -= len; 565 return; 566 } 567 count -= len; 568 if (count < 0) 569 count = 0; 570 /* 571 * Correct length for chain is "count". 572 * Find the mbuf with last data, adjust its length, 573 * and toss data from remaining mbufs on chain. 574 */ 575 m = mp; 576 if (m->m_flags & M_PKTHDR) 577 m->m_pkthdr.len = count; 578 for (; m; m = m->m_next) { 579 if (m->m_len >= count) { 580 m->m_len = count; 581 break; 582 } 583 count -= m->m_len; 584 } 585 while (m->m_next) 586 (m = m->m_next) ->m_len = 0; 587 } 588} 589 590/* 591 * Rearange an mbuf chain so that len bytes are contiguous 592 * and in the data area of an mbuf (so that mtod and dtom 593 * will work for a structure of size len). Returns the resulting 594 * mbuf chain on success, frees it and returns null on failure. 595 * If there is room, it will add up to max_protohdr-len extra bytes to the 596 * contiguous region in an attempt to avoid being called next time. 597 */ 598static int MPFail; 599 600struct mbuf * 601m_pullup(n, len) 602 register struct mbuf *n; 603 int len; 604{ 605 register struct mbuf *m; 606 register int count; 607 int space; 608 609 /* 610 * If first mbuf has no cluster, and has room for len bytes 611 * without shifting current data, pullup into it, 612 * otherwise allocate a new mbuf to prepend to the chain. 613 */ 614 if ((n->m_flags & M_EXT) == 0 && 615 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 616 if (n->m_len >= len) 617 return (n); 618 m = n; 619 n = n->m_next; 620 len -= m->m_len; 621 } else { 622 if (len > MHLEN) 623 goto bad; 624 MGET(m, M_DONTWAIT, n->m_type); 625 if (m == 0) 626 goto bad; 627 m->m_len = 0; 628 if (n->m_flags & M_PKTHDR) { 629 M_COPY_PKTHDR(m, n); 630 n->m_flags &= ~M_PKTHDR; 631 } 632 } 633 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 634 do { 635 count = min(min(max(len, max_protohdr), space), n->m_len); 636 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 637 (unsigned)count); 638 len -= count; 639 m->m_len += count; 640 n->m_len -= count; 641 space -= count; 642 if (n->m_len) 643 n->m_data += count; 644 else 645 n = m_free(n); 646 } while (len > 0 && n); 647 if (len > 0) { 648 (void) m_free(m); 649 goto bad; 650 } 651 m->m_next = n; 652 return (m); 653bad: 654 m_freem(n); 655 MPFail++; 656 return (0); 657} 658 659/* 660 * Partition an mbuf chain in two pieces, returning the tail -- 661 * all but the first len0 bytes. In case of failure, it returns NULL and 662 * attempts to restore the chain to its original state. 663 */ 664struct mbuf * 665m_split(m0, len0, wait) 666 register struct mbuf *m0; 667 int len0, wait; 668{ 669 register struct mbuf *m, *n; 670 unsigned len = len0, remain; 671 672 for (m = m0; m && len > m->m_len; m = m->m_next) 673 len -= m->m_len; 674 if (m == 0) 675 return (0); 676 remain = m->m_len - len; 677 if (m0->m_flags & M_PKTHDR) { 678 MGETHDR(n, wait, m0->m_type); 679 if (n == 0) 680 return (0); 681 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 682 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 683 m0->m_pkthdr.len = len0; 684 if (m->m_flags & M_EXT) 685 goto extpacket; 686 if (remain > MHLEN) { 687 /* m can't be the lead packet */ 688 MH_ALIGN(n, 0); 689 n->m_next = m_split(m, len, wait); 690 if (n->m_next == 0) { 691 (void) m_free(n); 692 return (0); 693 } else 694 return (n); 695 } else 696 MH_ALIGN(n, remain); 697 } else if (remain == 0) { 698 n = m->m_next; 699 m->m_next = 0; 700 return (n); 701 } else { 702 MGET(n, wait, m->m_type); 703 if (n == 0) 704 return (0); 705 M_ALIGN(n, remain); 706 } 707extpacket: 708 if (m->m_flags & M_EXT) { 709 n->m_flags |= M_EXT; 710 n->m_ext = m->m_ext; 711 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 712 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 713 n->m_data = m->m_data + len; 714 } else { 715 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 716 } 717 n->m_len = remain; 718 m->m_len = len; 719 n->m_next = m->m_next; 720 m->m_next = 0; 721 return (n); 722} 723/* 724 * Routine to copy from device local memory into mbufs. 725 */ 726struct mbuf * 727m_devget(buf, totlen, off0, ifp, copy) 728 char *buf; 729 int totlen, off0; 730 struct ifnet *ifp; 731 void (*copy) __P((char *from, caddr_t to, u_int len)); 732{ 733 register struct mbuf *m; 734 struct mbuf *top = 0, **mp = ⊤ 735 register int off = off0, len; 736 register char *cp; 737 char *epkt; 738 739 cp = buf; 740 epkt = cp + totlen; 741 if (off) { 742 cp += off + 2 * sizeof(u_short); 743 totlen -= 2 * sizeof(u_short); 744 } 745 MGETHDR(m, M_DONTWAIT, MT_DATA); 746 if (m == 0) 747 return (0); 748 m->m_pkthdr.rcvif = ifp; 749 m->m_pkthdr.len = totlen; 750 m->m_len = MHLEN; 751 752 while (totlen > 0) { 753 if (top) { 754 MGET(m, M_DONTWAIT, MT_DATA); 755 if (m == 0) { 756 m_freem(top); 757 return (0); 758 } 759 m->m_len = MLEN; 760 } 761 len = min(totlen, epkt - cp); 762 if (len >= MINCLSIZE) { 763 MCLGET(m, M_DONTWAIT); 764 if (m->m_flags & M_EXT) 765 m->m_len = len = min(len, MCLBYTES); 766 else 767 len = m->m_len; 768 } else { 769 /* 770 * Place initial small packet/header at end of mbuf. 771 */ 772 if (len < m->m_len) { 773 if (top == 0 && len + max_linkhdr <= m->m_len) 774 m->m_data += max_linkhdr; 775 m->m_len = len; 776 } else 777 len = m->m_len; 778 } 779 if (copy) 780 copy(cp, mtod(m, caddr_t), (unsigned)len); 781 else 782 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 783 cp += len; 784 *mp = m; 785 mp = &m->m_next; 786 totlen -= len; 787 if (cp == epkt) 788 cp = buf; 789 } 790 return (top); 791} 792 793/* 794 * Copy data from a buffer back into the indicated mbuf chain, 795 * starting "off" bytes from the beginning, extending the mbuf 796 * chain if necessary. 797 */ 798void 799m_copyback(m0, off, len, cp) 800 struct mbuf *m0; 801 register int off; 802 register int len; 803 caddr_t cp; 804{ 805 register int mlen; 806 register struct mbuf *m = m0, *n; 807 int totlen = 0; 808 809 if (m0 == 0) 810 return; 811 while (off > (mlen = m->m_len)) { 812 off -= mlen; 813 totlen += mlen; 814 if (m->m_next == 0) { 815 n = m_getclr(M_DONTWAIT, m->m_type); 816 if (n == 0) 817 goto out; 818 n->m_len = min(MLEN, len + off); 819 m->m_next = n; 820 } 821 m = m->m_next; 822 } 823 while (len > 0) { 824 mlen = min (m->m_len - off, len); 825 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 826 cp += mlen; 827 len -= mlen; 828 mlen += off; 829 off = 0; 830 totlen += mlen; 831 if (len == 0) 832 break; 833 if (m->m_next == 0) { 834 n = m_get(M_DONTWAIT, m->m_type); 835 if (n == 0) 836 break; 837 n->m_len = min(MLEN, len); 838 m->m_next = n; 839 } 840 m = m->m_next; 841 } 842out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 843 m->m_pkthdr.len = totlen; 844} 845