uipc_mbuf.c revision 78508
1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 78508 2001-06-20 19:48:35Z bmilekic $ 35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/condvar.h> 41#include <sys/kernel.h> 42#include <sys/lock.h> 43#include <sys/malloc.h> 44#include <sys/mbuf.h> 45#include <sys/mutex.h> 46#include <sys/sysctl.h> 47#include <sys/domain.h> 48#include <sys/protosw.h> 49 50#include <vm/vm.h> 51#include <vm/vm_kern.h> 52#include <vm/vm_extern.h> 53 54#ifndef NMBCLUSTERS 55#define NMBCLUSTERS (512 + MAXUSERS * 16) 56#endif 57 58static void mbinit(void *); 59SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 60 61struct mbuf *mbutl; 62struct mbstat mbstat; 63u_long mbtypes[MT_NTYPES]; 64int max_linkhdr; 65int max_protohdr; 66int max_hdr; 67int max_datalen; 68int nmbclusters = NMBCLUSTERS; 69int nmbufs = NMBCLUSTERS * 4; 70int nmbcnt; 71u_long m_mballoc_wid = 0; 72u_long m_clalloc_wid = 0; 73 74/* 75 * freelist header structures... 76 * mbffree_lst, mclfree_lst, mcntfree_lst 77 */ 78struct mbffree_lst mmbfree; 79struct mclfree_lst mclfree; 80struct mcntfree_lst mcntfree; 81struct mtx mbuf_mtx; 82 83/* 84 * sysctl(8) exported objects 85 */ 86SYSCTL_DECL(_kern_ipc); 87SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 88 &max_linkhdr, 0, ""); 89SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 90 &max_protohdr, 0, ""); 91SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 92SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 93 &max_datalen, 0, ""); 94SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 95 &mbuf_wait, 0, ""); 96SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RD, &mbstat, mbstat, ""); 97SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 98 sizeof(mbtypes), "LU", ""); 99SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 100 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 101SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 102 "Maximum number of mbufs available"); 103SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0, 104 "Maximum number of ext_buf counters available"); 105 106TUNABLE_INT("kern.ipc.nmbclusters", &nmbclusters); 107TUNABLE_INT("kern.ipc.nmbufs", &nmbufs); 108TUNABLE_INT("kern.ipc.nmbcnt", &nmbcnt); 109 110static void m_reclaim(void); 111 112/* Initial allocation numbers */ 113#define NCL_INIT 2 114#define NMB_INIT 16 115#define REF_INIT NMBCLUSTERS 116 117static void 118tunable_mbinit(void *dummy) 119{ 120 121 /* 122 * Sanity checks and pre-initialization for non-constants. 123 * This has to be done before VM initialization. 124 */ 125 if (nmbufs < nmbclusters * 2) 126 nmbufs = nmbclusters * 2; 127 if (nmbcnt == 0) 128 nmbcnt = EXT_COUNTERS; 129} 130SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL); 131 132/* 133 * Full mbuf subsystem initialization done here. 134 * 135 * XXX: If ever we have system specific map setups to do, then move them to 136 * machdep.c - for now, there is no reason for this stuff to go there. 137 */ 138static void 139mbinit(void *dummy) 140{ 141 vm_offset_t maxaddr; 142 vm_size_t mb_map_size; 143 144 /* 145 * Setup the mb_map, allocate requested VM space. 146 */ 147 mb_map_size = (vm_size_t)(nmbufs * MSIZE + nmbclusters * MCLBYTES + 148 nmbcnt * sizeof(union mext_refcnt)); 149 mb_map_size = rounddown(mb_map_size, PAGE_SIZE); 150 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 151 mb_map_size); 152 /* XXX XXX XXX: mb_map->system_map = 1; */ 153 154 /* 155 * Initialize the free list headers, and setup locks for lists. 156 */ 157 mmbfree.m_head = NULL; 158 mclfree.m_head = NULL; 159 mcntfree.m_head = NULL; 160 mtx_init(&mbuf_mtx, "mbuf free list lock", MTX_DEF); 161 cv_init(&mmbfree.m_starved, "mbuf free list starved cv"); 162 cv_init(&mclfree.m_starved, "mbuf cluster free list starved cv"); 163 164 /* 165 * Initialize mbuf subsystem (sysctl exported) statistics structure. 166 */ 167 mbstat.m_msize = MSIZE; 168 mbstat.m_mclbytes = MCLBYTES; 169 mbstat.m_minclsize = MINCLSIZE; 170 mbstat.m_mlen = MLEN; 171 mbstat.m_mhlen = MHLEN; 172 173 /* 174 * Perform some initial allocations. 175 */ 176 mtx_lock(&mbuf_mtx); 177 if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0) 178 goto bad; 179 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 180 goto bad; 181 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 182 goto bad; 183 mtx_unlock(&mbuf_mtx); 184 185 return; 186bad: 187 panic("mbinit: failed to initialize mbuf subsystem!"); 188} 189 190/* 191 * Allocate at least nmb reference count structs and place them 192 * on the ref cnt free list. 193 * 194 * Must be called with the mcntfree lock held. 195 */ 196int 197m_alloc_ref(u_int nmb, int how) 198{ 199 caddr_t p; 200 u_int nbytes; 201 int i; 202 203 /* 204 * We don't cap the amount of memory that can be used 205 * by the reference counters, like we do for mbufs and 206 * mbuf clusters. In fact, we're absolutely sure that we 207 * won't ever be going over our allocated space. We keep enough 208 * space in mb_map to accomodate maximum values of allocatable 209 * external buffers including, but not limited to, clusters. 210 * (That's also why we won't have to have wait routines for 211 * counters). 212 * 213 * If we're in here, we're absolutely certain to be returning 214 * succesfully, as long as there is physical memory to accomodate 215 * us. And if there isn't, but we're willing to wait, then 216 * kmem_malloc() will do the only waiting needed. 217 */ 218 219 nbytes = round_page(nmb * sizeof(union mext_refcnt)); 220 if (1 /* XXX: how == M_TRYWAIT */) 221 mtx_unlock(&mbuf_mtx); 222 if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ? 223 M_WAITOK : M_NOWAIT)) == NULL) { 224 if (1 /* XXX: how == M_TRYWAIT */) 225 mtx_lock(&mbuf_mtx); 226 return (0); 227 } 228 nmb = nbytes / sizeof(union mext_refcnt); 229 230 /* 231 * We don't let go of the mutex in order to avoid a race. 232 * It is up to the caller to let go of the mutex. 233 */ 234 if (1 /* XXX: how == M_TRYWAIT */) 235 mtx_lock(&mbuf_mtx); 236 for (i = 0; i < nmb; i++) { 237 ((union mext_refcnt *)p)->next_ref = mcntfree.m_head; 238 mcntfree.m_head = (union mext_refcnt *)p; 239 p += sizeof(union mext_refcnt); 240 mbstat.m_refree++; 241 } 242 mbstat.m_refcnt += nmb; 243 244 return (1); 245} 246 247/* 248 * Allocate at least nmb mbufs and place on mbuf free list. 249 * 250 * Must be called with the mmbfree lock held. 251 */ 252int 253m_mballoc(int nmb, int how) 254{ 255 caddr_t p; 256 int i; 257 int nbytes; 258 259 nbytes = round_page(nmb * MSIZE); 260 nmb = nbytes / MSIZE; 261 262 /* 263 * If we've hit the mbuf limit, stop allocating from mb_map. 264 * Also, once we run out of map space, it will be impossible to 265 * get any more (nothing is ever freed back to the map). 266 */ 267 if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) 268 return (0); 269 270 if (1 /* XXX: how == M_TRYWAIT */) 271 mtx_unlock(&mbuf_mtx); 272 p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ? 273 M_WAITOK : M_NOWAIT); 274 if (1 /* XXX: how == M_TRYWAIT */) { 275 mtx_lock(&mbuf_mtx); 276 if (p == NULL) 277 mbstat.m_wait++; 278 } 279 280 /* 281 * Either the map is now full, or `how' is M_DONTWAIT and there 282 * are no pages left. 283 */ 284 if (p == NULL) 285 return (0); 286 287 /* 288 * We don't let go of the mutex in order to avoid a race. 289 * It is up to the caller to let go of the mutex when done 290 * with grabbing the mbuf from the free list. 291 */ 292 for (i = 0; i < nmb; i++) { 293 ((struct mbuf *)p)->m_next = mmbfree.m_head; 294 mmbfree.m_head = (struct mbuf *)p; 295 p += MSIZE; 296 } 297 mbstat.m_mbufs += nmb; 298 mbtypes[MT_FREE] += nmb; 299 return (1); 300} 301 302/* 303 * Once the mb_map has been exhausted and if the call to the allocation macros 304 * (or, in some cases, functions) is with M_TRYWAIT, then it is necessary to 305 * rely solely on reclaimed mbufs. 306 * 307 * Here we request for the protocols to free up some resources and, if we 308 * still cannot get anything, then we wait for an mbuf to be freed for a 309 * designated (mbuf_wait) time, at most. 310 * 311 * Must be called with the mmbfree mutex held. 312 */ 313struct mbuf * 314m_mballoc_wait(void) 315{ 316 struct mbuf *p = NULL; 317 318 /* 319 * See if we can drain some resources out of the protocols. 320 * We drop the mmbfree mutex to avoid recursing into it in some of 321 * the drain routines. Clearly, we're faced with a race here because 322 * once something is freed during the drain, it may be grabbed right 323 * from under us by some other thread. But we accept this possibility 324 * in order to avoid a potentially large lock recursion and, more 325 * importantly, to avoid a potential lock order reversal which may 326 * result in deadlock (See comment above m_reclaim()). 327 */ 328 mtx_unlock(&mbuf_mtx); 329 m_reclaim(); 330 331 mtx_lock(&mbuf_mtx); 332 _MGET(p, M_DONTWAIT); 333 334 if (p == NULL) { 335 int retval; 336 337 m_mballoc_wid++; 338 retval = cv_timedwait(&mmbfree.m_starved, &mbuf_mtx, 339 mbuf_wait); 340 m_mballoc_wid--; 341 342 /* 343 * If we got signaled (i.e. didn't time out), allocate. 344 */ 345 if (retval == 0) 346 _MGET(p, M_DONTWAIT); 347 } 348 349 if (p != NULL) { 350 mbstat.m_wait++; 351 if (mmbfree.m_head != NULL) 352 MBWAKEUP(m_mballoc_wid, &mmbfree.m_starved); 353 } 354 355 return (p); 356} 357 358/* 359 * Allocate some number of mbuf clusters 360 * and place on cluster free list. 361 * 362 * Must be called with the mclfree lock held. 363 */ 364int 365m_clalloc(int ncl, int how) 366{ 367 caddr_t p; 368 int i; 369 int npg_sz; 370 371 npg_sz = round_page(ncl * MCLBYTES); 372 ncl = npg_sz / MCLBYTES; 373 374 /* 375 * If the map is now full (nothing will ever be freed to it). 376 * If we've hit the mcluster number limit, stop allocating from 377 * mb_map. 378 */ 379 if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) 380 return (0); 381 382 if (1 /* XXX: how == M_TRYWAIT */) 383 mtx_unlock(&mbuf_mtx); 384 p = (caddr_t)kmem_malloc(mb_map, npg_sz, 385 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT); 386 if (1 /* XXX: how == M_TRYWAIT */) 387 mtx_lock(&mbuf_mtx); 388 389 /* 390 * Either the map is now full, or `how' is M_DONTWAIT and there 391 * are no pages left. 392 */ 393 if (p == NULL) 394 return (0); 395 396 for (i = 0; i < ncl; i++) { 397 ((union mcluster *)p)->mcl_next = mclfree.m_head; 398 mclfree.m_head = (union mcluster *)p; 399 p += MCLBYTES; 400 mbstat.m_clfree++; 401 } 402 mbstat.m_clusters += ncl; 403 return (1); 404} 405 406/* 407 * Once the mb_map submap has been exhausted and the allocation is called with 408 * M_TRYWAIT, we rely on the mclfree list. If nothing is free, we will 409 * block on a cv for a designated amount of time (mbuf_wait) or until we're 410 * signaled due to sudden mcluster availability. 411 * 412 * Must be called with the mclfree lock held. 413 */ 414caddr_t 415m_clalloc_wait(void) 416{ 417 caddr_t p = NULL; 418 int retval; 419 420 m_clalloc_wid++; 421 retval = cv_timedwait(&mclfree.m_starved, &mbuf_mtx, mbuf_wait); 422 m_clalloc_wid--; 423 424 /* 425 * Now that we (think) that we've got something, try again. 426 */ 427 if (retval == 0) 428 _MCLALLOC(p, M_DONTWAIT); 429 430 if (p != NULL) { 431 mbstat.m_wait++; 432 if (mclfree.m_head != NULL) 433 MBWAKEUP(m_clalloc_wid, &mclfree.m_starved); 434 } 435 436 return (p); 437} 438 439/* 440 * m_reclaim: drain protocols in hopes to free up some resources... 441 * 442 * XXX: No locks should be held going in here. The drain routines have 443 * to presently acquire some locks which raises the possibility of lock 444 * order violation if we're holding any mutex if that mutex is acquired in 445 * reverse order relative to one of the locks in the drain routines. 446 */ 447static void 448m_reclaim(void) 449{ 450 struct domain *dp; 451 struct protosw *pr; 452 453#ifdef WITNESS 454 KASSERT(witness_list(curproc) == 0, 455 ("m_reclaim called with locks held")); 456#endif 457 458 for (dp = domains; dp; dp = dp->dom_next) 459 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 460 if (pr->pr_drain) 461 (*pr->pr_drain)(); 462 mbstat.m_drain++; 463} 464 465/* 466 * Space allocation routines. 467 * Some of these are also available as macros 468 * for critical paths. 469 */ 470struct mbuf * 471m_get(int how, int type) 472{ 473 struct mbuf *m; 474 475 MGET(m, how, type); 476 return (m); 477} 478 479struct mbuf * 480m_gethdr(int how, int type) 481{ 482 struct mbuf *m; 483 484 MGETHDR(m, how, type); 485 return (m); 486} 487 488struct mbuf * 489m_getclr(int how, int type) 490{ 491 struct mbuf *m; 492 493 MGET(m, how, type); 494 if (m != NULL) 495 bzero(mtod(m, caddr_t), MLEN); 496 return (m); 497} 498 499struct mbuf * 500m_free(struct mbuf *m) 501{ 502 struct mbuf *n; 503 504 MFREE(m, n); 505 return (n); 506} 507 508/* 509 * struct mbuf * 510 * m_getm(m, len, how, type) 511 * 512 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits 513 * best) and return a pointer to the top of the allocated chain. If m is 514 * non-null, then we assume that it is a single mbuf or an mbuf chain to 515 * which we want len bytes worth of mbufs and/or clusters attached, and so 516 * if we succeed in allocating it, we will just return a pointer to m. 517 * 518 * If we happen to fail at any point during the allocation, we will free 519 * up everything we have already allocated and return NULL. 520 * 521 */ 522struct mbuf * 523m_getm(struct mbuf *m, int len, int how, int type) 524{ 525 struct mbuf *top, *tail, *mp, *mtail = NULL; 526 527 KASSERT(len >= 0, ("len is < 0 in m_getm")); 528 529 MGET(mp, how, type); 530 if (mp == NULL) 531 return (NULL); 532 else if (len > MINCLSIZE) { 533 MCLGET(mp, how); 534 if ((mp->m_flags & M_EXT) == 0) { 535 m_free(mp); 536 return (NULL); 537 } 538 } 539 mp->m_len = 0; 540 len -= M_TRAILINGSPACE(mp); 541 542 if (m != NULL) 543 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next); 544 else 545 m = mp; 546 547 top = tail = mp; 548 while (len > 0) { 549 MGET(mp, how, type); 550 if (mp == NULL) 551 goto failed; 552 553 tail->m_next = mp; 554 tail = mp; 555 if (len > MINCLSIZE) { 556 MCLGET(mp, how); 557 if ((mp->m_flags & M_EXT) == 0) 558 goto failed; 559 } 560 561 mp->m_len = 0; 562 len -= M_TRAILINGSPACE(mp); 563 } 564 565 if (mtail != NULL) 566 mtail->m_next = top; 567 return (m); 568 569failed: 570 m_freem(top); 571 return (NULL); 572} 573 574void 575m_freem(struct mbuf *m) 576{ 577 struct mbuf *n; 578 579 if (m == NULL) 580 return; 581 do { 582 MFREE(m, n); 583 m = n; 584 } while (m); 585} 586 587/* 588 * Lesser-used path for M_PREPEND: 589 * allocate new mbuf to prepend to chain, 590 * copy junk along. 591 */ 592struct mbuf * 593m_prepend(struct mbuf *m, int len, int how) 594{ 595 struct mbuf *mn; 596 597 MGET(mn, how, m->m_type); 598 if (mn == NULL) { 599 m_freem(m); 600 return (NULL); 601 } 602 if (m->m_flags & M_PKTHDR) { 603 M_COPY_PKTHDR(mn, m); 604 m->m_flags &= ~M_PKTHDR; 605 } 606 mn->m_next = m; 607 m = mn; 608 if (len < MHLEN) 609 MH_ALIGN(m, len); 610 m->m_len = len; 611 return (m); 612} 613 614/* 615 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 616 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 617 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 618 * Note that the copy is read-only, because clusters are not copied, 619 * only their reference counts are incremented. 620 */ 621struct mbuf * 622m_copym(struct mbuf *m, int off0, int len, int wait) 623{ 624 struct mbuf *n, **np; 625 int off = off0; 626 struct mbuf *top; 627 int copyhdr = 0; 628 629 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 630 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 631 if (off == 0 && m->m_flags & M_PKTHDR) 632 copyhdr = 1; 633 while (off > 0) { 634 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 635 if (off < m->m_len) 636 break; 637 off -= m->m_len; 638 m = m->m_next; 639 } 640 np = ⊤ 641 top = 0; 642 while (len > 0) { 643 if (m == NULL) { 644 KASSERT(len == M_COPYALL, 645 ("m_copym, length > size of mbuf chain")); 646 break; 647 } 648 MGET(n, wait, m->m_type); 649 *np = n; 650 if (n == NULL) 651 goto nospace; 652 if (copyhdr) { 653 M_COPY_PKTHDR(n, m); 654 if (len == M_COPYALL) 655 n->m_pkthdr.len -= off0; 656 else 657 n->m_pkthdr.len = len; 658 copyhdr = 0; 659 } 660 n->m_len = min(len, m->m_len - off); 661 if (m->m_flags & M_EXT) { 662 n->m_data = m->m_data + off; 663 n->m_ext = m->m_ext; 664 n->m_flags |= M_EXT; 665 MEXT_ADD_REF(m); 666 } else 667 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 668 (unsigned)n->m_len); 669 if (len != M_COPYALL) 670 len -= n->m_len; 671 off = 0; 672 m = m->m_next; 673 np = &n->m_next; 674 } 675 if (top == NULL) { 676 mtx_lock(&mbuf_mtx); 677 mbstat.m_mcfail++; 678 mtx_unlock(&mbuf_mtx); 679 } 680 return (top); 681nospace: 682 m_freem(top); 683 mtx_lock(&mbuf_mtx); 684 mbstat.m_mcfail++; 685 mtx_unlock(&mbuf_mtx); 686 return (NULL); 687} 688 689/* 690 * Copy an entire packet, including header (which must be present). 691 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 692 * Note that the copy is read-only, because clusters are not copied, 693 * only their reference counts are incremented. 694 * Preserve alignment of the first mbuf so if the creator has left 695 * some room at the beginning (e.g. for inserting protocol headers) 696 * the copies still have the room available. 697 */ 698struct mbuf * 699m_copypacket(struct mbuf *m, int how) 700{ 701 struct mbuf *top, *n, *o; 702 703 MGET(n, how, m->m_type); 704 top = n; 705 if (n == NULL) 706 goto nospace; 707 708 M_COPY_PKTHDR(n, m); 709 n->m_len = m->m_len; 710 if (m->m_flags & M_EXT) { 711 n->m_data = m->m_data; 712 n->m_ext = m->m_ext; 713 n->m_flags |= M_EXT; 714 MEXT_ADD_REF(m); 715 } else { 716 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 717 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 718 } 719 720 m = m->m_next; 721 while (m) { 722 MGET(o, how, m->m_type); 723 if (o == NULL) 724 goto nospace; 725 726 n->m_next = o; 727 n = n->m_next; 728 729 n->m_len = m->m_len; 730 if (m->m_flags & M_EXT) { 731 n->m_data = m->m_data; 732 n->m_ext = m->m_ext; 733 n->m_flags |= M_EXT; 734 MEXT_ADD_REF(m); 735 } else { 736 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 737 } 738 739 m = m->m_next; 740 } 741 return top; 742nospace: 743 m_freem(top); 744 mtx_lock(&mbuf_mtx); 745 mbstat.m_mcfail++; 746 mtx_unlock(&mbuf_mtx); 747 return (NULL); 748} 749 750/* 751 * Copy data from an mbuf chain starting "off" bytes from the beginning, 752 * continuing for "len" bytes, into the indicated buffer. 753 */ 754void 755m_copydata(struct mbuf *m, int off, int len, caddr_t cp) 756{ 757 unsigned count; 758 759 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 760 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 761 while (off > 0) { 762 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 763 if (off < m->m_len) 764 break; 765 off -= m->m_len; 766 m = m->m_next; 767 } 768 while (len > 0) { 769 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 770 count = min(m->m_len - off, len); 771 bcopy(mtod(m, caddr_t) + off, cp, count); 772 len -= count; 773 cp += count; 774 off = 0; 775 m = m->m_next; 776 } 777} 778 779/* 780 * Copy a packet header mbuf chain into a completely new chain, including 781 * copying any mbuf clusters. Use this instead of m_copypacket() when 782 * you need a writable copy of an mbuf chain. 783 */ 784struct mbuf * 785m_dup(struct mbuf *m, int how) 786{ 787 struct mbuf **p, *top = NULL; 788 int remain, moff, nsize; 789 790 /* Sanity check */ 791 if (m == NULL) 792 return (NULL); 793 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 794 795 /* While there's more data, get a new mbuf, tack it on, and fill it */ 796 remain = m->m_pkthdr.len; 797 moff = 0; 798 p = ⊤ 799 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 800 struct mbuf *n; 801 802 /* Get the next new mbuf */ 803 MGET(n, how, m->m_type); 804 if (n == NULL) 805 goto nospace; 806 if (top == NULL) { /* first one, must be PKTHDR */ 807 M_COPY_PKTHDR(n, m); 808 nsize = MHLEN; 809 } else /* not the first one */ 810 nsize = MLEN; 811 if (remain >= MINCLSIZE) { 812 MCLGET(n, how); 813 if ((n->m_flags & M_EXT) == 0) { 814 (void)m_free(n); 815 goto nospace; 816 } 817 nsize = MCLBYTES; 818 } 819 n->m_len = 0; 820 821 /* Link it into the new chain */ 822 *p = n; 823 p = &n->m_next; 824 825 /* Copy data from original mbuf(s) into new mbuf */ 826 while (n->m_len < nsize && m != NULL) { 827 int chunk = min(nsize - n->m_len, m->m_len - moff); 828 829 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 830 moff += chunk; 831 n->m_len += chunk; 832 remain -= chunk; 833 if (moff == m->m_len) { 834 m = m->m_next; 835 moff = 0; 836 } 837 } 838 839 /* Check correct total mbuf length */ 840 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 841 ("%s: bogus m_pkthdr.len", __FUNCTION__)); 842 } 843 return (top); 844 845nospace: 846 m_freem(top); 847 mtx_lock(&mbuf_mtx); 848 mbstat.m_mcfail++; 849 mtx_unlock(&mbuf_mtx); 850 return (NULL); 851} 852 853/* 854 * Concatenate mbuf chain n to m. 855 * Both chains must be of the same type (e.g. MT_DATA). 856 * Any m_pkthdr is not updated. 857 */ 858void 859m_cat(struct mbuf *m, struct mbuf *n) 860{ 861 while (m->m_next) 862 m = m->m_next; 863 while (n) { 864 if (m->m_flags & M_EXT || 865 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 866 /* just join the two chains */ 867 m->m_next = n; 868 return; 869 } 870 /* splat the data from one into the other */ 871 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 872 (u_int)n->m_len); 873 m->m_len += n->m_len; 874 n = m_free(n); 875 } 876} 877 878void 879m_adj(struct mbuf *mp, int req_len) 880{ 881 int len = req_len; 882 struct mbuf *m; 883 int count; 884 885 if ((m = mp) == NULL) 886 return; 887 if (len >= 0) { 888 /* 889 * Trim from head. 890 */ 891 while (m != NULL && len > 0) { 892 if (m->m_len <= len) { 893 len -= m->m_len; 894 m->m_len = 0; 895 m = m->m_next; 896 } else { 897 m->m_len -= len; 898 m->m_data += len; 899 len = 0; 900 } 901 } 902 m = mp; 903 if (mp->m_flags & M_PKTHDR) 904 m->m_pkthdr.len -= (req_len - len); 905 } else { 906 /* 907 * Trim from tail. Scan the mbuf chain, 908 * calculating its length and finding the last mbuf. 909 * If the adjustment only affects this mbuf, then just 910 * adjust and return. Otherwise, rescan and truncate 911 * after the remaining size. 912 */ 913 len = -len; 914 count = 0; 915 for (;;) { 916 count += m->m_len; 917 if (m->m_next == (struct mbuf *)0) 918 break; 919 m = m->m_next; 920 } 921 if (m->m_len >= len) { 922 m->m_len -= len; 923 if (mp->m_flags & M_PKTHDR) 924 mp->m_pkthdr.len -= len; 925 return; 926 } 927 count -= len; 928 if (count < 0) 929 count = 0; 930 /* 931 * Correct length for chain is "count". 932 * Find the mbuf with last data, adjust its length, 933 * and toss data from remaining mbufs on chain. 934 */ 935 m = mp; 936 if (m->m_flags & M_PKTHDR) 937 m->m_pkthdr.len = count; 938 for (; m; m = m->m_next) { 939 if (m->m_len >= count) { 940 m->m_len = count; 941 break; 942 } 943 count -= m->m_len; 944 } 945 while (m->m_next) 946 (m = m->m_next) ->m_len = 0; 947 } 948} 949 950/* 951 * Rearange an mbuf chain so that len bytes are contiguous 952 * and in the data area of an mbuf (so that mtod and dtom 953 * will work for a structure of size len). Returns the resulting 954 * mbuf chain on success, frees it and returns null on failure. 955 * If there is room, it will add up to max_protohdr-len extra bytes to the 956 * contiguous region in an attempt to avoid being called next time. 957 */ 958struct mbuf * 959m_pullup(struct mbuf *n, int len) 960{ 961 struct mbuf *m; 962 int count; 963 int space; 964 965 /* 966 * If first mbuf has no cluster, and has room for len bytes 967 * without shifting current data, pullup into it, 968 * otherwise allocate a new mbuf to prepend to the chain. 969 */ 970 if ((n->m_flags & M_EXT) == 0 && 971 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 972 if (n->m_len >= len) 973 return (n); 974 m = n; 975 n = n->m_next; 976 len -= m->m_len; 977 } else { 978 if (len > MHLEN) 979 goto bad; 980 MGET(m, M_DONTWAIT, n->m_type); 981 if (m == NULL) 982 goto bad; 983 m->m_len = 0; 984 if (n->m_flags & M_PKTHDR) { 985 M_COPY_PKTHDR(m, n); 986 n->m_flags &= ~M_PKTHDR; 987 } 988 } 989 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 990 do { 991 count = min(min(max(len, max_protohdr), space), n->m_len); 992 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 993 (unsigned)count); 994 len -= count; 995 m->m_len += count; 996 n->m_len -= count; 997 space -= count; 998 if (n->m_len) 999 n->m_data += count; 1000 else 1001 n = m_free(n); 1002 } while (len > 0 && n); 1003 if (len > 0) { 1004 (void) m_free(m); 1005 goto bad; 1006 } 1007 m->m_next = n; 1008 return (m); 1009bad: 1010 m_freem(n); 1011 mtx_lock(&mbuf_mtx); 1012 mbstat.m_mpfail++; 1013 mtx_unlock(&mbuf_mtx); 1014 return (NULL); 1015} 1016 1017/* 1018 * Partition an mbuf chain in two pieces, returning the tail -- 1019 * all but the first len0 bytes. In case of failure, it returns NULL and 1020 * attempts to restore the chain to its original state. 1021 */ 1022struct mbuf * 1023m_split(struct mbuf *m0, int len0, int wait) 1024{ 1025 struct mbuf *m, *n; 1026 unsigned len = len0, remain; 1027 1028 for (m = m0; m && len > m->m_len; m = m->m_next) 1029 len -= m->m_len; 1030 if (m == NULL) 1031 return (NULL); 1032 remain = m->m_len - len; 1033 if (m0->m_flags & M_PKTHDR) { 1034 MGETHDR(n, wait, m0->m_type); 1035 if (n == NULL) 1036 return (NULL); 1037 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1038 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1039 m0->m_pkthdr.len = len0; 1040 if (m->m_flags & M_EXT) 1041 goto extpacket; 1042 if (remain > MHLEN) { 1043 /* m can't be the lead packet */ 1044 MH_ALIGN(n, 0); 1045 n->m_next = m_split(m, len, wait); 1046 if (n->m_next == NULL) { 1047 (void) m_free(n); 1048 return (NULL); 1049 } else 1050 return (n); 1051 } else 1052 MH_ALIGN(n, remain); 1053 } else if (remain == 0) { 1054 n = m->m_next; 1055 m->m_next = NULL; 1056 return (n); 1057 } else { 1058 MGET(n, wait, m->m_type); 1059 if (n == NULL) 1060 return (NULL); 1061 M_ALIGN(n, remain); 1062 } 1063extpacket: 1064 if (m->m_flags & M_EXT) { 1065 n->m_flags |= M_EXT; 1066 n->m_ext = m->m_ext; 1067 MEXT_ADD_REF(m); 1068 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1069 n->m_data = m->m_data + len; 1070 } else { 1071 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1072 } 1073 n->m_len = remain; 1074 m->m_len = len; 1075 n->m_next = m->m_next; 1076 m->m_next = NULL; 1077 return (n); 1078} 1079/* 1080 * Routine to copy from device local memory into mbufs. 1081 * Note that `off' argument is offset into first mbuf of target chain from 1082 * which to begin copying the data to. 1083 */ 1084struct mbuf * 1085m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 1086 void (*copy)(char *from, caddr_t to, u_int len)) 1087{ 1088 struct mbuf *m; 1089 struct mbuf *top = 0, **mp = ⊤ 1090 int len; 1091 1092 if (off < 0 || off > MHLEN) 1093 return (NULL); 1094 1095 MGETHDR(m, M_DONTWAIT, MT_DATA); 1096 if (m == NULL) 1097 return (NULL); 1098 m->m_pkthdr.rcvif = ifp; 1099 m->m_pkthdr.len = totlen; 1100 len = MHLEN; 1101 1102 while (totlen > 0) { 1103 if (top) { 1104 MGET(m, M_DONTWAIT, MT_DATA); 1105 if (m == NULL) { 1106 m_freem(top); 1107 return (NULL); 1108 } 1109 len = MLEN; 1110 } 1111 if (totlen + off >= MINCLSIZE) { 1112 MCLGET(m, M_DONTWAIT); 1113 if (m->m_flags & M_EXT) 1114 len = MCLBYTES; 1115 } else { 1116 /* 1117 * Place initial small packet/header at end of mbuf. 1118 */ 1119 if (top == NULL && totlen + off + max_linkhdr <= len) { 1120 m->m_data += max_linkhdr; 1121 len -= max_linkhdr; 1122 } 1123 } 1124 if (off) { 1125 m->m_data += off; 1126 len -= off; 1127 off = 0; 1128 } 1129 m->m_len = len = min(totlen, len); 1130 if (copy) 1131 copy(buf, mtod(m, caddr_t), (unsigned)len); 1132 else 1133 bcopy(buf, mtod(m, caddr_t), (unsigned)len); 1134 buf += len; 1135 *mp = m; 1136 mp = &m->m_next; 1137 totlen -= len; 1138 } 1139 return (top); 1140} 1141 1142/* 1143 * Copy data from a buffer back into the indicated mbuf chain, 1144 * starting "off" bytes from the beginning, extending the mbuf 1145 * chain if necessary. 1146 */ 1147void 1148m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 1149{ 1150 int mlen; 1151 struct mbuf *m = m0, *n; 1152 int totlen = 0; 1153 1154 if (m0 == NULL) 1155 return; 1156 while (off > (mlen = m->m_len)) { 1157 off -= mlen; 1158 totlen += mlen; 1159 if (m->m_next == NULL) { 1160 n = m_getclr(M_DONTWAIT, m->m_type); 1161 if (n == NULL) 1162 goto out; 1163 n->m_len = min(MLEN, len + off); 1164 m->m_next = n; 1165 } 1166 m = m->m_next; 1167 } 1168 while (len > 0) { 1169 mlen = min (m->m_len - off, len); 1170 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1171 cp += mlen; 1172 len -= mlen; 1173 mlen += off; 1174 off = 0; 1175 totlen += mlen; 1176 if (len == 0) 1177 break; 1178 if (m->m_next == NULL) { 1179 n = m_get(M_DONTWAIT, m->m_type); 1180 if (n == NULL) 1181 break; 1182 n->m_len = min(MLEN, len); 1183 m->m_next = n; 1184 } 1185 m = m->m_next; 1186 } 1187out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1188 m->m_pkthdr.len = totlen; 1189} 1190 1191void 1192m_print(const struct mbuf *m) 1193{ 1194 int len; 1195 const struct mbuf *m2; 1196 1197 len = m->m_pkthdr.len; 1198 m2 = m; 1199 while (len) { 1200 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1201 len -= m2->m_len; 1202 m2 = m2->m_next; 1203 } 1204 return; 1205} 1206