uipc_mbuf.c revision 72200
1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 72200 2001-02-09 06:11:45Z bmilekic $ 35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/malloc.h> 41#include <sys/mbuf.h> 42#include <sys/mutex.h> 43#include <sys/kernel.h> 44#include <sys/sysctl.h> 45#include <sys/domain.h> 46#include <sys/protosw.h> 47#include <vm/vm.h> 48#include <vm/vm_kern.h> 49#include <vm/vm_extern.h> 50 51static void mbinit __P((void *)); 52SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 53 54struct mbuf *mbutl; 55struct mbstat mbstat; 56u_long mbtypes[MT_NTYPES]; 57int max_linkhdr; 58int max_protohdr; 59int max_hdr; 60int max_datalen; 61int nmbclusters; 62int nmbufs; 63int nmbcnt; 64u_long m_mballoc_wid = 0; 65u_long m_clalloc_wid = 0; 66 67/* 68 * freelist header structures... 69 * mbffree_lst, mclfree_lst, mcntfree_lst 70 */ 71struct mbffree_lst mmbfree; 72struct mclfree_lst mclfree; 73struct mcntfree_lst mcntfree; 74 75/* 76 * sysctl(8) exported objects 77 */ 78SYSCTL_DECL(_kern_ipc); 79SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 80 &max_linkhdr, 0, ""); 81SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 82 &max_protohdr, 0, ""); 83SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 84SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 85 &max_datalen, 0, ""); 86SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 87 &mbuf_wait, 0, ""); 88SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RD, &mbstat, mbstat, ""); 89SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 90 sizeof(mbtypes), "LU", ""); 91SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 92 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 93SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 94 "Maximum number of mbufs available"); 95SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0, 96 "Maximum number of ext_buf counters available"); 97#ifndef NMBCLUSTERS 98#define NMBCLUSTERS (512 + MAXUSERS * 16) 99#endif 100TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 101TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); 102TUNABLE_INT_DECL("kern.ipc.nmbcnt", EXT_COUNTERS, nmbcnt); 103 104static void m_reclaim __P((void)); 105 106/* Initial allocation numbers */ 107#define NCL_INIT 2 108#define NMB_INIT 16 109#define REF_INIT NMBCLUSTERS 110 111/* 112 * Full mbuf subsystem initialization done here. 113 * 114 * XXX: If ever we have system specific map setups to do, then move them to 115 * machdep.c - for now, there is no reason for this stuff to go there. 116 */ 117static void 118mbinit(dummy) 119 void *dummy; 120{ 121 vm_offset_t maxaddr, mb_map_size; 122 123 /* 124 * Setup the mb_map, allocate requested VM space. 125 */ 126 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt 127 * sizeof(union mext_refcnt); 128 mb_map_size = roundup2(mb_map_size, PAGE_SIZE); 129 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 130 mb_map_size); 131 /* XXX: mb_map->system_map = 1; */ 132 133 /* 134 * Initialize the free list headers, and setup locks for lists. 135 */ 136 mmbfree.m_head = NULL; 137 mclfree.m_head = NULL; 138 mcntfree.m_head = NULL; 139 mtx_init(&mmbfree.m_mtx, "mbuf free list lock", MTX_DEF); 140 mtx_init(&mclfree.m_mtx, "mcluster free list lock", MTX_DEF); 141 mtx_init(&mcntfree.m_mtx, "m_ext counter free list lock", MTX_DEF); 142 143 /* 144 * Initialize mbuf subsystem (sysctl exported) statistics structure. 145 */ 146 mbstat.m_msize = MSIZE; 147 mbstat.m_mclbytes = MCLBYTES; 148 mbstat.m_minclsize = MINCLSIZE; 149 mbstat.m_mlen = MLEN; 150 mbstat.m_mhlen = MHLEN; 151 152 /* 153 * Perform some initial allocations. 154 */ 155 mtx_lock(&mcntfree.m_mtx); 156 if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0) 157 goto bad; 158 mtx_unlock(&mcntfree.m_mtx); 159 160 mtx_lock(&mmbfree.m_mtx); 161 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 162 goto bad; 163 mtx_unlock(&mmbfree.m_mtx); 164 165 mtx_lock(&mclfree.m_mtx); 166 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 167 goto bad; 168 mtx_unlock(&mclfree.m_mtx); 169 170 return; 171bad: 172 panic("mbinit: failed to initialize mbuf subsystem!"); 173} 174 175/* 176 * Allocate at least nmb reference count structs and place them 177 * on the ref cnt free list. 178 * 179 * Must be called with the mcntfree lock held. 180 */ 181int 182m_alloc_ref(nmb, how) 183 u_int nmb; 184 int how; 185{ 186 caddr_t p; 187 u_int nbytes; 188 int i; 189 190 /* 191 * We don't cap the amount of memory that can be used 192 * by the reference counters, like we do for mbufs and 193 * mbuf clusters. In fact, we're absolutely sure that we 194 * won't ever be going over our allocated space. We keep enough 195 * space in mb_map to accomodate maximum values of allocatable 196 * external buffers including, but not limited to, clusters. 197 * (That's also why we won't have to have wait routines for 198 * counters). 199 * 200 * If we're in here, we're absolutely certain to be returning 201 * succesfully, as long as there is physical memory to accomodate 202 * us. And if there isn't, but we're willing to wait, then 203 * kmem_malloc() will do the only waiting needed. 204 */ 205 206 nbytes = round_page(nmb * sizeof(union mext_refcnt)); 207 mtx_unlock(&mcntfree.m_mtx); 208 if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ? 209 M_WAITOK : M_NOWAIT)) == NULL) { 210 mtx_lock(&mcntfree.m_mtx); 211 return (0); 212 } 213 nmb = nbytes / sizeof(union mext_refcnt); 214 215 /* 216 * We don't let go of the mutex in order to avoid a race. 217 * It is up to the caller to let go of the mutex. 218 */ 219 mtx_lock(&mcntfree.m_mtx); 220 for (i = 0; i < nmb; i++) { 221 ((union mext_refcnt *)p)->next_ref = mcntfree.m_head; 222 mcntfree.m_head = (union mext_refcnt *)p; 223 p += sizeof(union mext_refcnt); 224 mbstat.m_refree++; 225 } 226 mbstat.m_refcnt += nmb; 227 228 return (1); 229} 230 231/* 232 * Allocate at least nmb mbufs and place on mbuf free list. 233 * 234 * Must be called with the mmbfree lock held. 235 */ 236int 237m_mballoc(nmb, how) 238 register int nmb; 239 int how; 240{ 241 register caddr_t p; 242 register int i; 243 int nbytes; 244 245 /* 246 * If we've hit the mbuf limit, stop allocating from mb_map. 247 * Also, once we run out of map space, it will be impossible to 248 * get any more (nothing is ever freed back to the map). 249 */ 250 if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) { 251 /* 252 * Needs to be atomic as we may be incrementing it 253 * while holding another mutex, like mclfree. In other 254 * words, m_drops is not reserved solely for mbufs, 255 * but is also available for clusters. 256 */ 257 atomic_add_long(&mbstat.m_drops, 1); 258 return (0); 259 } 260 261 nbytes = round_page(nmb * MSIZE); 262 263 mtx_unlock(&mmbfree.m_mtx); 264 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 265 if (p == 0 && how == M_TRYWAIT) { 266 atomic_add_long(&mbstat.m_wait, 1); 267 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 268 } 269 mtx_lock(&mmbfree.m_mtx); 270 271 /* 272 * Either the map is now full, or `how' is M_DONTWAIT and there 273 * are no pages left. 274 */ 275 if (p == NULL) 276 return (0); 277 278 nmb = nbytes / MSIZE; 279 280 /* 281 * We don't let go of the mutex in order to avoid a race. 282 * It is up to the caller to let go of the mutex when done 283 * with grabbing the mbuf from the free list. 284 */ 285 for (i = 0; i < nmb; i++) { 286 ((struct mbuf *)p)->m_next = mmbfree.m_head; 287 mmbfree.m_head = (struct mbuf *)p; 288 p += MSIZE; 289 } 290 mbstat.m_mbufs += nmb; 291 mbtypes[MT_FREE] += nmb; 292 return (1); 293} 294 295/* 296 * Once the mb_map has been exhausted and if the call to the allocation macros 297 * (or, in some cases, functions) is with M_TRYWAIT, then it is necessary to 298 * rely solely on reclaimed mbufs. 299 * 300 * Here we request for the protocols to free up some resources and, if we 301 * still cannot get anything, then we wait for an mbuf to be freed for a 302 * designated (mbuf_wait) time. 303 * 304 * Must be called with the mmbfree mutex held. 305 */ 306struct mbuf * 307m_mballoc_wait(void) 308{ 309 struct mbuf *p = NULL; 310 311 /* 312 * See if we can drain some resources out of the protocols. 313 * We drop the mmbfree mutex to avoid recursing into it in some of 314 * the drain routines. Clearly, we're faced with a race here because 315 * once something is freed during the drain, it may be grabbed right 316 * from under us by some other thread. But we accept this possibility 317 * in order to avoid a potentially large lock recursion and, more 318 * importantly, to avoid a potential lock order reversal which may 319 * result in deadlock (See comment above m_reclaim()). 320 */ 321 mtx_unlock(&mmbfree.m_mtx); 322 m_reclaim(); 323 324 mtx_lock(&mmbfree.m_mtx); 325 _MGET(p, M_DONTWAIT); 326 327 if (p == NULL) { 328 m_mballoc_wid++; 329 msleep(&m_mballoc_wid, &mmbfree.m_mtx, PVM, "mballc", 330 mbuf_wait); 331 m_mballoc_wid--; 332 333 /* 334 * Try again (one last time). 335 * 336 * We retry to fetch _even_ if the sleep timed out. This 337 * is left this way, purposely, in the [unlikely] case 338 * that an mbuf was freed but the sleep was not awoken 339 * in time. 340 * 341 * If the sleep didn't time out (i.e. we got woken up) then 342 * we have the lock so we just grab an mbuf, hopefully. 343 */ 344 _MGET(p, M_DONTWAIT); 345 } 346 347 /* If we waited and got something... */ 348 if (p != NULL) { 349 atomic_add_long(&mbstat.m_wait, 1); 350 if (mmbfree.m_head != NULL) 351 MBWAKEUP(m_mballoc_wid); 352 } else 353 atomic_add_long(&mbstat.m_drops, 1); 354 355 return (p); 356} 357 358/* 359 * Allocate some number of mbuf clusters 360 * and place on cluster free list. 361 * 362 * Must be called with the mclfree lock held. 363 */ 364int 365m_clalloc(ncl, how) 366 register int ncl; 367 int how; 368{ 369 register caddr_t p; 370 register int i; 371 int npg; 372 373 /* 374 * If the map is now full (nothing will ever be freed to it). 375 * If we've hit the mcluster number limit, stop allocating from 376 * mb_map. 377 */ 378 if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) { 379 atomic_add_long(&mbstat.m_drops, 1); 380 return (0); 381 } 382 383 npg = ncl; 384 mtx_unlock(&mclfree.m_mtx); 385 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 386 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT); 387 ncl = ncl * PAGE_SIZE / MCLBYTES; 388 mtx_lock(&mclfree.m_mtx); 389 390 /* 391 * Either the map is now full, or `how' is M_DONTWAIT and there 392 * are no pages left. 393 */ 394 if (p == NULL) { 395 atomic_add_long(&mbstat.m_drops, 1); 396 return (0); 397 } 398 399 /* 400 * We don't let go of the mutex in order to avoid a race. 401 */ 402 for (i = 0; i < ncl; i++) { 403 ((union mcluster *)p)->mcl_next = mclfree.m_head; 404 mclfree.m_head = (union mcluster *)p; 405 p += MCLBYTES; 406 mbstat.m_clfree++; 407 } 408 mbstat.m_clusters += ncl; 409 return (1); 410} 411 412/* 413 * Once the mb_map submap has been exhausted and the allocation is called with 414 * M_TRYWAIT, we rely on the mclfree list. If nothing is free, we will 415 * sleep for a designated amount of time (mbuf_wait) or until we're woken up 416 * due to sudden mcluster availability. 417 * 418 * Must be called with the mclfree lock held. 419 */ 420caddr_t 421m_clalloc_wait(void) 422{ 423 caddr_t p = NULL; 424 425 m_clalloc_wid++; 426 msleep(&m_clalloc_wid, &mclfree.m_mtx, PVM, "mclalc", mbuf_wait); 427 m_clalloc_wid--; 428 429 /* 430 * Now that we (think) that we've got something, try again. 431 */ 432 _MCLALLOC(p, M_DONTWAIT); 433 434 /* If we waited and got something ... */ 435 if (p != NULL) { 436 atomic_add_long(&mbstat.m_wait, 1); 437 if (mclfree.m_head != NULL) 438 MBWAKEUP(m_clalloc_wid); 439 } else 440 atomic_add_long(&mbstat.m_drops, 1); 441 442 return (p); 443} 444 445/* 446 * m_reclaim: drain protocols in hopes to free up some resources... 447 * 448 * XXX: No locks should be held going in here. The drain routines have 449 * to presently acquire some locks which raises the possibility of lock 450 * order violation if we're holding any mutex if that mutex is acquired in 451 * reverse order relative to one of the locks in the drain routines. 452 */ 453static void 454m_reclaim() 455{ 456 register struct domain *dp; 457 register struct protosw *pr; 458 459#ifdef WITNESS 460 KASSERT(witness_list(CURPROC) == 0, 461 ("m_reclaim called with locks held")); 462#endif 463 464 for (dp = domains; dp; dp = dp->dom_next) 465 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 466 if (pr->pr_drain) 467 (*pr->pr_drain)(); 468 mbstat.m_drain++; 469} 470 471/* 472 * Space allocation routines. 473 * These are also available as macros 474 * for critical paths. 475 */ 476struct mbuf * 477m_get(how, type) 478 int how, type; 479{ 480 register struct mbuf *m; 481 482 MGET(m, how, type); 483 return (m); 484} 485 486struct mbuf * 487m_gethdr(how, type) 488 int how, type; 489{ 490 register struct mbuf *m; 491 492 MGETHDR(m, how, type); 493 return (m); 494} 495 496struct mbuf * 497m_getclr(how, type) 498 int how, type; 499{ 500 register struct mbuf *m; 501 502 MGET(m, how, type); 503 if (m == 0) 504 return (0); 505 bzero(mtod(m, caddr_t), MLEN); 506 return (m); 507} 508 509struct mbuf * 510m_free(m) 511 struct mbuf *m; 512{ 513 register struct mbuf *n; 514 515 MFREE(m, n); 516 return (n); 517} 518 519void 520m_freem(m) 521 register struct mbuf *m; 522{ 523 register struct mbuf *n; 524 525 if (m == NULL) 526 return; 527 do { 528 /* 529 * we do need to check non-first mbuf, since some of existing 530 * code does not call M_PREPEND properly. 531 * (example: call to bpf_mtap from drivers) 532 */ 533 if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 534 m_freem(m->m_pkthdr.aux); 535 m->m_pkthdr.aux = NULL; 536 } 537 MFREE(m, n); 538 m = n; 539 } while (m); 540} 541 542/* 543 * Mbuffer utility routines. 544 */ 545 546/* 547 * Lesser-used path for M_PREPEND: 548 * allocate new mbuf to prepend to chain, 549 * copy junk along. 550 */ 551struct mbuf * 552m_prepend(m, len, how) 553 register struct mbuf *m; 554 int len, how; 555{ 556 struct mbuf *mn; 557 558 MGET(mn, how, m->m_type); 559 if (mn == (struct mbuf *)NULL) { 560 m_freem(m); 561 return ((struct mbuf *)NULL); 562 } 563 if (m->m_flags & M_PKTHDR) { 564 M_COPY_PKTHDR(mn, m); 565 m->m_flags &= ~M_PKTHDR; 566 } 567 mn->m_next = m; 568 m = mn; 569 if (len < MHLEN) 570 MH_ALIGN(m, len); 571 m->m_len = len; 572 return (m); 573} 574 575/* 576 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 577 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 578 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 579 * Note that the copy is read-only, because clusters are not copied, 580 * only their reference counts are incremented. 581 */ 582#define MCFail (mbstat.m_mcfail) 583 584struct mbuf * 585m_copym(m, off0, len, wait) 586 register struct mbuf *m; 587 int off0, wait; 588 register int len; 589{ 590 register struct mbuf *n, **np; 591 register int off = off0; 592 struct mbuf *top; 593 int copyhdr = 0; 594 595 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 596 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 597 if (off == 0 && m->m_flags & M_PKTHDR) 598 copyhdr = 1; 599 while (off > 0) { 600 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 601 if (off < m->m_len) 602 break; 603 off -= m->m_len; 604 m = m->m_next; 605 } 606 np = ⊤ 607 top = 0; 608 while (len > 0) { 609 if (m == 0) { 610 KASSERT(len == M_COPYALL, 611 ("m_copym, length > size of mbuf chain")); 612 break; 613 } 614 MGET(n, wait, m->m_type); 615 *np = n; 616 if (n == 0) 617 goto nospace; 618 if (copyhdr) { 619 M_COPY_PKTHDR(n, m); 620 if (len == M_COPYALL) 621 n->m_pkthdr.len -= off0; 622 else 623 n->m_pkthdr.len = len; 624 copyhdr = 0; 625 } 626 n->m_len = min(len, m->m_len - off); 627 if (m->m_flags & M_EXT) { 628 n->m_data = m->m_data + off; 629 n->m_ext = m->m_ext; 630 n->m_flags |= M_EXT; 631 MEXT_ADD_REF(m); 632 } else 633 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 634 (unsigned)n->m_len); 635 if (len != M_COPYALL) 636 len -= n->m_len; 637 off = 0; 638 m = m->m_next; 639 np = &n->m_next; 640 } 641 if (top == 0) 642 atomic_add_long(&MCFail, 1); 643 return (top); 644nospace: 645 m_freem(top); 646 atomic_add_long(&MCFail, 1); 647 return (0); 648} 649 650/* 651 * Copy an entire packet, including header (which must be present). 652 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 653 * Note that the copy is read-only, because clusters are not copied, 654 * only their reference counts are incremented. 655 */ 656struct mbuf * 657m_copypacket(m, how) 658 struct mbuf *m; 659 int how; 660{ 661 struct mbuf *top, *n, *o; 662 663 MGET(n, how, m->m_type); 664 top = n; 665 if (!n) 666 goto nospace; 667 668 M_COPY_PKTHDR(n, m); 669 n->m_len = m->m_len; 670 if (m->m_flags & M_EXT) { 671 n->m_data = m->m_data; 672 n->m_ext = m->m_ext; 673 n->m_flags |= M_EXT; 674 MEXT_ADD_REF(m); 675 } else { 676 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 677 } 678 679 m = m->m_next; 680 while (m) { 681 MGET(o, how, m->m_type); 682 if (!o) 683 goto nospace; 684 685 n->m_next = o; 686 n = n->m_next; 687 688 n->m_len = m->m_len; 689 if (m->m_flags & M_EXT) { 690 n->m_data = m->m_data; 691 n->m_ext = m->m_ext; 692 n->m_flags |= M_EXT; 693 MEXT_ADD_REF(m); 694 } else { 695 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 696 } 697 698 m = m->m_next; 699 } 700 return top; 701nospace: 702 m_freem(top); 703 atomic_add_long(&MCFail, 1); 704 return 0; 705} 706 707/* 708 * Copy data from an mbuf chain starting "off" bytes from the beginning, 709 * continuing for "len" bytes, into the indicated buffer. 710 */ 711void 712m_copydata(m, off, len, cp) 713 register struct mbuf *m; 714 register int off; 715 register int len; 716 caddr_t cp; 717{ 718 register unsigned count; 719 720 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 721 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 722 while (off > 0) { 723 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 724 if (off < m->m_len) 725 break; 726 off -= m->m_len; 727 m = m->m_next; 728 } 729 while (len > 0) { 730 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 731 count = min(m->m_len - off, len); 732 bcopy(mtod(m, caddr_t) + off, cp, count); 733 len -= count; 734 cp += count; 735 off = 0; 736 m = m->m_next; 737 } 738} 739 740/* 741 * Copy a packet header mbuf chain into a completely new chain, including 742 * copying any mbuf clusters. Use this instead of m_copypacket() when 743 * you need a writable copy of an mbuf chain. 744 */ 745struct mbuf * 746m_dup(m, how) 747 struct mbuf *m; 748 int how; 749{ 750 struct mbuf **p, *top = NULL; 751 int remain, moff, nsize; 752 753 /* Sanity check */ 754 if (m == NULL) 755 return (0); 756 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 757 758 /* While there's more data, get a new mbuf, tack it on, and fill it */ 759 remain = m->m_pkthdr.len; 760 moff = 0; 761 p = ⊤ 762 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 763 struct mbuf *n; 764 765 /* Get the next new mbuf */ 766 MGET(n, how, m->m_type); 767 if (n == NULL) 768 goto nospace; 769 if (top == NULL) { /* first one, must be PKTHDR */ 770 M_COPY_PKTHDR(n, m); 771 nsize = MHLEN; 772 } else /* not the first one */ 773 nsize = MLEN; 774 if (remain >= MINCLSIZE) { 775 MCLGET(n, how); 776 if ((n->m_flags & M_EXT) == 0) { 777 (void)m_free(n); 778 goto nospace; 779 } 780 nsize = MCLBYTES; 781 } 782 n->m_len = 0; 783 784 /* Link it into the new chain */ 785 *p = n; 786 p = &n->m_next; 787 788 /* Copy data from original mbuf(s) into new mbuf */ 789 while (n->m_len < nsize && m != NULL) { 790 int chunk = min(nsize - n->m_len, m->m_len - moff); 791 792 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 793 moff += chunk; 794 n->m_len += chunk; 795 remain -= chunk; 796 if (moff == m->m_len) { 797 m = m->m_next; 798 moff = 0; 799 } 800 } 801 802 /* Check correct total mbuf length */ 803 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 804 ("%s: bogus m_pkthdr.len", __FUNCTION__)); 805 } 806 return (top); 807 808nospace: 809 m_freem(top); 810 atomic_add_long(&MCFail, 1); 811 return (0); 812} 813 814/* 815 * Concatenate mbuf chain n to m. 816 * Both chains must be of the same type (e.g. MT_DATA). 817 * Any m_pkthdr is not updated. 818 */ 819void 820m_cat(m, n) 821 register struct mbuf *m, *n; 822{ 823 while (m->m_next) 824 m = m->m_next; 825 while (n) { 826 if (m->m_flags & M_EXT || 827 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 828 /* just join the two chains */ 829 m->m_next = n; 830 return; 831 } 832 /* splat the data from one into the other */ 833 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 834 (u_int)n->m_len); 835 m->m_len += n->m_len; 836 n = m_free(n); 837 } 838} 839 840void 841m_adj(mp, req_len) 842 struct mbuf *mp; 843 int req_len; 844{ 845 register int len = req_len; 846 register struct mbuf *m; 847 register int count; 848 849 if ((m = mp) == NULL) 850 return; 851 if (len >= 0) { 852 /* 853 * Trim from head. 854 */ 855 while (m != NULL && len > 0) { 856 if (m->m_len <= len) { 857 len -= m->m_len; 858 m->m_len = 0; 859 m = m->m_next; 860 } else { 861 m->m_len -= len; 862 m->m_data += len; 863 len = 0; 864 } 865 } 866 m = mp; 867 if (mp->m_flags & M_PKTHDR) 868 m->m_pkthdr.len -= (req_len - len); 869 } else { 870 /* 871 * Trim from tail. Scan the mbuf chain, 872 * calculating its length and finding the last mbuf. 873 * If the adjustment only affects this mbuf, then just 874 * adjust and return. Otherwise, rescan and truncate 875 * after the remaining size. 876 */ 877 len = -len; 878 count = 0; 879 for (;;) { 880 count += m->m_len; 881 if (m->m_next == (struct mbuf *)0) 882 break; 883 m = m->m_next; 884 } 885 if (m->m_len >= len) { 886 m->m_len -= len; 887 if (mp->m_flags & M_PKTHDR) 888 mp->m_pkthdr.len -= len; 889 return; 890 } 891 count -= len; 892 if (count < 0) 893 count = 0; 894 /* 895 * Correct length for chain is "count". 896 * Find the mbuf with last data, adjust its length, 897 * and toss data from remaining mbufs on chain. 898 */ 899 m = mp; 900 if (m->m_flags & M_PKTHDR) 901 m->m_pkthdr.len = count; 902 for (; m; m = m->m_next) { 903 if (m->m_len >= count) { 904 m->m_len = count; 905 break; 906 } 907 count -= m->m_len; 908 } 909 while (m->m_next) 910 (m = m->m_next) ->m_len = 0; 911 } 912} 913 914/* 915 * Rearange an mbuf chain so that len bytes are contiguous 916 * and in the data area of an mbuf (so that mtod and dtom 917 * will work for a structure of size len). Returns the resulting 918 * mbuf chain on success, frees it and returns null on failure. 919 * If there is room, it will add up to max_protohdr-len extra bytes to the 920 * contiguous region in an attempt to avoid being called next time. 921 */ 922#define MPFail (mbstat.m_mpfail) 923 924struct mbuf * 925m_pullup(n, len) 926 register struct mbuf *n; 927 int len; 928{ 929 register struct mbuf *m; 930 register int count; 931 int space; 932 933 /* 934 * If first mbuf has no cluster, and has room for len bytes 935 * without shifting current data, pullup into it, 936 * otherwise allocate a new mbuf to prepend to the chain. 937 */ 938 if ((n->m_flags & M_EXT) == 0 && 939 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 940 if (n->m_len >= len) 941 return (n); 942 m = n; 943 n = n->m_next; 944 len -= m->m_len; 945 } else { 946 if (len > MHLEN) 947 goto bad; 948 MGET(m, M_DONTWAIT, n->m_type); 949 if (m == 0) 950 goto bad; 951 m->m_len = 0; 952 if (n->m_flags & M_PKTHDR) { 953 M_COPY_PKTHDR(m, n); 954 n->m_flags &= ~M_PKTHDR; 955 } 956 } 957 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 958 do { 959 count = min(min(max(len, max_protohdr), space), n->m_len); 960 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 961 (unsigned)count); 962 len -= count; 963 m->m_len += count; 964 n->m_len -= count; 965 space -= count; 966 if (n->m_len) 967 n->m_data += count; 968 else 969 n = m_free(n); 970 } while (len > 0 && n); 971 if (len > 0) { 972 (void) m_free(m); 973 goto bad; 974 } 975 m->m_next = n; 976 return (m); 977bad: 978 m_freem(n); 979 atomic_add_long(&MPFail, 1); 980 return (0); 981} 982 983/* 984 * Partition an mbuf chain in two pieces, returning the tail -- 985 * all but the first len0 bytes. In case of failure, it returns NULL and 986 * attempts to restore the chain to its original state. 987 */ 988struct mbuf * 989m_split(m0, len0, wait) 990 register struct mbuf *m0; 991 int len0, wait; 992{ 993 register struct mbuf *m, *n; 994 unsigned len = len0, remain; 995 996 for (m = m0; m && len > m->m_len; m = m->m_next) 997 len -= m->m_len; 998 if (m == 0) 999 return (0); 1000 remain = m->m_len - len; 1001 if (m0->m_flags & M_PKTHDR) { 1002 MGETHDR(n, wait, m0->m_type); 1003 if (n == 0) 1004 return (0); 1005 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1006 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1007 m0->m_pkthdr.len = len0; 1008 if (m->m_flags & M_EXT) 1009 goto extpacket; 1010 if (remain > MHLEN) { 1011 /* m can't be the lead packet */ 1012 MH_ALIGN(n, 0); 1013 n->m_next = m_split(m, len, wait); 1014 if (n->m_next == 0) { 1015 (void) m_free(n); 1016 return (0); 1017 } else 1018 return (n); 1019 } else 1020 MH_ALIGN(n, remain); 1021 } else if (remain == 0) { 1022 n = m->m_next; 1023 m->m_next = 0; 1024 return (n); 1025 } else { 1026 MGET(n, wait, m->m_type); 1027 if (n == 0) 1028 return (0); 1029 M_ALIGN(n, remain); 1030 } 1031extpacket: 1032 if (m->m_flags & M_EXT) { 1033 n->m_flags |= M_EXT; 1034 n->m_ext = m->m_ext; 1035 MEXT_ADD_REF(m); 1036 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1037 n->m_data = m->m_data + len; 1038 } else { 1039 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1040 } 1041 n->m_len = remain; 1042 m->m_len = len; 1043 n->m_next = m->m_next; 1044 m->m_next = 0; 1045 return (n); 1046} 1047/* 1048 * Routine to copy from device local memory into mbufs. 1049 */ 1050struct mbuf * 1051m_devget(buf, totlen, off0, ifp, copy) 1052 char *buf; 1053 int totlen, off0; 1054 struct ifnet *ifp; 1055 void (*copy) __P((char *from, caddr_t to, u_int len)); 1056{ 1057 register struct mbuf *m; 1058 struct mbuf *top = 0, **mp = ⊤ 1059 register int off = off0, len; 1060 register char *cp; 1061 char *epkt; 1062 1063 cp = buf; 1064 epkt = cp + totlen; 1065 if (off) { 1066 cp += off + 2 * sizeof(u_short); 1067 totlen -= 2 * sizeof(u_short); 1068 } 1069 MGETHDR(m, M_DONTWAIT, MT_DATA); 1070 if (m == 0) 1071 return (0); 1072 m->m_pkthdr.rcvif = ifp; 1073 m->m_pkthdr.len = totlen; 1074 m->m_len = MHLEN; 1075 1076 while (totlen > 0) { 1077 if (top) { 1078 MGET(m, M_DONTWAIT, MT_DATA); 1079 if (m == 0) { 1080 m_freem(top); 1081 return (0); 1082 } 1083 m->m_len = MLEN; 1084 } 1085 len = min(totlen, epkt - cp); 1086 if (len >= MINCLSIZE) { 1087 MCLGET(m, M_DONTWAIT); 1088 if (m->m_flags & M_EXT) 1089 m->m_len = len = min(len, MCLBYTES); 1090 else 1091 len = m->m_len; 1092 } else { 1093 /* 1094 * Place initial small packet/header at end of mbuf. 1095 */ 1096 if (len < m->m_len) { 1097 if (top == 0 && len + max_linkhdr <= m->m_len) 1098 m->m_data += max_linkhdr; 1099 m->m_len = len; 1100 } else 1101 len = m->m_len; 1102 } 1103 if (copy) 1104 copy(cp, mtod(m, caddr_t), (unsigned)len); 1105 else 1106 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1107 cp += len; 1108 *mp = m; 1109 mp = &m->m_next; 1110 totlen -= len; 1111 if (cp == epkt) 1112 cp = buf; 1113 } 1114 return (top); 1115} 1116 1117/* 1118 * Copy data from a buffer back into the indicated mbuf chain, 1119 * starting "off" bytes from the beginning, extending the mbuf 1120 * chain if necessary. 1121 */ 1122void 1123m_copyback(m0, off, len, cp) 1124 struct mbuf *m0; 1125 register int off; 1126 register int len; 1127 caddr_t cp; 1128{ 1129 register int mlen; 1130 register struct mbuf *m = m0, *n; 1131 int totlen = 0; 1132 1133 if (m0 == 0) 1134 return; 1135 while (off > (mlen = m->m_len)) { 1136 off -= mlen; 1137 totlen += mlen; 1138 if (m->m_next == 0) { 1139 n = m_getclr(M_DONTWAIT, m->m_type); 1140 if (n == 0) 1141 goto out; 1142 n->m_len = min(MLEN, len + off); 1143 m->m_next = n; 1144 } 1145 m = m->m_next; 1146 } 1147 while (len > 0) { 1148 mlen = min (m->m_len - off, len); 1149 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1150 cp += mlen; 1151 len -= mlen; 1152 mlen += off; 1153 off = 0; 1154 totlen += mlen; 1155 if (len == 0) 1156 break; 1157 if (m->m_next == 0) { 1158 n = m_get(M_DONTWAIT, m->m_type); 1159 if (n == 0) 1160 break; 1161 n->m_len = min(MLEN, len); 1162 m->m_next = n; 1163 } 1164 m = m->m_next; 1165 } 1166out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1167 m->m_pkthdr.len = totlen; 1168} 1169 1170void 1171m_print(const struct mbuf *m) 1172{ 1173 int len; 1174 const struct mbuf *m2; 1175 1176 len = m->m_pkthdr.len; 1177 m2 = m; 1178 while (len) { 1179 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1180 len -= m2->m_len; 1181 m2 = m2->m_next; 1182 } 1183 return; 1184} 1185