uipc_mbuf.c revision 72750
1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 72750 2001-02-20 08:23:41Z luigi $ 35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/malloc.h> 41#include <sys/mbuf.h> 42#include <sys/mutex.h> 43#include <sys/kernel.h> 44#include <sys/sysctl.h> 45#include <sys/domain.h> 46#include <sys/protosw.h> 47#include <vm/vm.h> 48#include <vm/vm_kern.h> 49#include <vm/vm_extern.h> 50 51static void mbinit(void *); 52SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 53 54struct mbuf *mbutl; 55struct mbstat mbstat; 56u_long mbtypes[MT_NTYPES]; 57int max_linkhdr; 58int max_protohdr; 59int max_hdr; 60int max_datalen; 61int nmbclusters; 62int nmbufs; 63int nmbcnt; 64u_long m_mballoc_wid = 0; 65u_long m_clalloc_wid = 0; 66 67/* 68 * freelist header structures... 69 * mbffree_lst, mclfree_lst, mcntfree_lst 70 */ 71struct mbffree_lst mmbfree; 72struct mclfree_lst mclfree; 73struct mcntfree_lst mcntfree; 74 75/* 76 * sysctl(8) exported objects 77 */ 78SYSCTL_DECL(_kern_ipc); 79SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 80 &max_linkhdr, 0, ""); 81SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 82 &max_protohdr, 0, ""); 83SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 84SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 85 &max_datalen, 0, ""); 86SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 87 &mbuf_wait, 0, ""); 88SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RD, &mbstat, mbstat, ""); 89SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 90 sizeof(mbtypes), "LU", ""); 91SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 92 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 93SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 94 "Maximum number of mbufs available"); 95SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0, 96 "Maximum number of ext_buf counters available"); 97#ifndef NMBCLUSTERS 98#define NMBCLUSTERS (512 + MAXUSERS * 16) 99#endif 100TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 101TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); 102TUNABLE_INT_DECL("kern.ipc.nmbcnt", EXT_COUNTERS, nmbcnt); 103 104static void m_reclaim(void); 105 106/* Initial allocation numbers */ 107#define NCL_INIT 2 108#define NMB_INIT 16 109#define REF_INIT NMBCLUSTERS 110 111/* 112 * Full mbuf subsystem initialization done here. 113 * 114 * XXX: If ever we have system specific map setups to do, then move them to 115 * machdep.c - for now, there is no reason for this stuff to go there. 116 */ 117static void 118mbinit(void *dummy) 119{ 120 vm_offset_t maxaddr, mb_map_size; 121 122 /* 123 * Setup the mb_map, allocate requested VM space. 124 */ 125 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt 126 * sizeof(union mext_refcnt); 127 mb_map_size = roundup2(mb_map_size, PAGE_SIZE); 128 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 129 mb_map_size); 130 /* XXX XXX XXX: mb_map->system_map = 1; */ 131 132 /* 133 * Initialize the free list headers, and setup locks for lists. 134 */ 135 mmbfree.m_head = NULL; 136 mclfree.m_head = NULL; 137 mcntfree.m_head = NULL; 138 mtx_init(&mmbfree.m_mtx, "mbuf free list lock", MTX_DEF); 139 mtx_init(&mclfree.m_mtx, "mcluster free list lock", MTX_DEF); 140 mtx_init(&mcntfree.m_mtx, "m_ext counter free list lock", MTX_DEF); 141 142 /* 143 * Initialize mbuf subsystem (sysctl exported) statistics structure. 144 */ 145 mbstat.m_msize = MSIZE; 146 mbstat.m_mclbytes = MCLBYTES; 147 mbstat.m_minclsize = MINCLSIZE; 148 mbstat.m_mlen = MLEN; 149 mbstat.m_mhlen = MHLEN; 150 151 /* 152 * Perform some initial allocations. 153 */ 154 mtx_lock(&mcntfree.m_mtx); 155 if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0) 156 goto bad; 157 mtx_unlock(&mcntfree.m_mtx); 158 159 mtx_lock(&mmbfree.m_mtx); 160 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 161 goto bad; 162 mtx_unlock(&mmbfree.m_mtx); 163 164 mtx_lock(&mclfree.m_mtx); 165 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 166 goto bad; 167 mtx_unlock(&mclfree.m_mtx); 168 169 return; 170bad: 171 panic("mbinit: failed to initialize mbuf subsystem!"); 172} 173 174/* 175 * Allocate at least nmb reference count structs and place them 176 * on the ref cnt free list. 177 * 178 * Must be called with the mcntfree lock held. 179 */ 180int 181m_alloc_ref(u_int nmb, int how) 182{ 183 caddr_t p; 184 u_int nbytes; 185 int i; 186 187 /* 188 * We don't cap the amount of memory that can be used 189 * by the reference counters, like we do for mbufs and 190 * mbuf clusters. In fact, we're absolutely sure that we 191 * won't ever be going over our allocated space. We keep enough 192 * space in mb_map to accomodate maximum values of allocatable 193 * external buffers including, but not limited to, clusters. 194 * (That's also why we won't have to have wait routines for 195 * counters). 196 * 197 * If we're in here, we're absolutely certain to be returning 198 * succesfully, as long as there is physical memory to accomodate 199 * us. And if there isn't, but we're willing to wait, then 200 * kmem_malloc() will do the only waiting needed. 201 */ 202 203 nbytes = round_page(nmb * sizeof(union mext_refcnt)); 204 mtx_unlock(&mcntfree.m_mtx); 205 if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ? 206 M_WAITOK : M_NOWAIT)) == NULL) { 207 mtx_lock(&mcntfree.m_mtx); 208 return (0); 209 } 210 nmb = nbytes / sizeof(union mext_refcnt); 211 212 /* 213 * We don't let go of the mutex in order to avoid a race. 214 * It is up to the caller to let go of the mutex. 215 */ 216 mtx_lock(&mcntfree.m_mtx); 217 for (i = 0; i < nmb; i++) { 218 ((union mext_refcnt *)p)->next_ref = mcntfree.m_head; 219 mcntfree.m_head = (union mext_refcnt *)p; 220 p += sizeof(union mext_refcnt); 221 mbstat.m_refree++; 222 } 223 mbstat.m_refcnt += nmb; 224 225 return (1); 226} 227 228/* 229 * Allocate at least nmb mbufs and place on mbuf free list. 230 * 231 * Must be called with the mmbfree lock held. 232 */ 233int 234m_mballoc(int nmb, int how) 235{ 236 caddr_t p; 237 int i; 238 int nbytes; 239 240 /* 241 * If we've hit the mbuf limit, stop allocating from mb_map. 242 * Also, once we run out of map space, it will be impossible to 243 * get any more (nothing is ever freed back to the map). 244 */ 245 if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) { 246 /* 247 * Needs to be atomic as we may be incrementing it 248 * while holding another mutex, like mclfree. In other 249 * words, m_drops is not reserved solely for mbufs, 250 * but is also available for clusters. 251 */ 252 atomic_add_long(&mbstat.m_drops, 1); 253 return (0); 254 } 255 256 nbytes = round_page(nmb * MSIZE); 257 258 mtx_unlock(&mmbfree.m_mtx); 259 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 260 if (p == NULL && how == M_TRYWAIT) { 261 atomic_add_long(&mbstat.m_wait, 1); 262 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 263 } 264 mtx_lock(&mmbfree.m_mtx); 265 266 /* 267 * Either the map is now full, or `how' is M_DONTWAIT and there 268 * are no pages left. 269 */ 270 if (p == NULL) 271 return (0); 272 273 nmb = nbytes / MSIZE; 274 275 /* 276 * We don't let go of the mutex in order to avoid a race. 277 * It is up to the caller to let go of the mutex when done 278 * with grabbing the mbuf from the free list. 279 */ 280 for (i = 0; i < nmb; i++) { 281 ((struct mbuf *)p)->m_next = mmbfree.m_head; 282 mmbfree.m_head = (struct mbuf *)p; 283 p += MSIZE; 284 } 285 mbstat.m_mbufs += nmb; 286 mbtypes[MT_FREE] += nmb; 287 return (1); 288} 289 290/* 291 * Once the mb_map has been exhausted and if the call to the allocation macros 292 * (or, in some cases, functions) is with M_TRYWAIT, then it is necessary to 293 * rely solely on reclaimed mbufs. 294 * 295 * Here we request for the protocols to free up some resources and, if we 296 * still cannot get anything, then we wait for an mbuf to be freed for a 297 * designated (mbuf_wait) time. 298 * 299 * Must be called with the mmbfree mutex held. 300 */ 301struct mbuf * 302m_mballoc_wait(void) 303{ 304 struct mbuf *p = NULL; 305 306 /* 307 * See if we can drain some resources out of the protocols. 308 * We drop the mmbfree mutex to avoid recursing into it in some of 309 * the drain routines. Clearly, we're faced with a race here because 310 * once something is freed during the drain, it may be grabbed right 311 * from under us by some other thread. But we accept this possibility 312 * in order to avoid a potentially large lock recursion and, more 313 * importantly, to avoid a potential lock order reversal which may 314 * result in deadlock (See comment above m_reclaim()). 315 */ 316 mtx_unlock(&mmbfree.m_mtx); 317 m_reclaim(); 318 319 mtx_lock(&mmbfree.m_mtx); 320 _MGET(p, M_DONTWAIT); 321 322 if (p == NULL) { 323 m_mballoc_wid++; 324 msleep(&m_mballoc_wid, &mmbfree.m_mtx, PVM, "mballc", 325 mbuf_wait); 326 m_mballoc_wid--; 327 328 /* 329 * Try again (one last time). 330 * 331 * We retry to fetch _even_ if the sleep timed out. This 332 * is left this way, purposely, in the [unlikely] case 333 * that an mbuf was freed but the sleep was not awoken 334 * in time. 335 * 336 * If the sleep didn't time out (i.e. we got woken up) then 337 * we have the lock so we just grab an mbuf, hopefully. 338 */ 339 _MGET(p, M_DONTWAIT); 340 } 341 342 /* If we waited and got something... */ 343 if (p != NULL) { 344 atomic_add_long(&mbstat.m_wait, 1); 345 if (mmbfree.m_head != NULL) 346 MBWAKEUP(m_mballoc_wid); 347 } else 348 atomic_add_long(&mbstat.m_drops, 1); 349 350 return (p); 351} 352 353/* 354 * Allocate some number of mbuf clusters 355 * and place on cluster free list. 356 * 357 * Must be called with the mclfree lock held. 358 */ 359int 360m_clalloc(int ncl, int how) 361{ 362 caddr_t p; 363 int i; 364 int npg; 365 366 /* 367 * If the map is now full (nothing will ever be freed to it). 368 * If we've hit the mcluster number limit, stop allocating from 369 * mb_map. 370 */ 371 if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) { 372 atomic_add_long(&mbstat.m_drops, 1); 373 return (0); 374 } 375 376 npg = ncl; 377 mtx_unlock(&mclfree.m_mtx); 378 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 379 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT); 380 ncl = ncl * PAGE_SIZE / MCLBYTES; 381 mtx_lock(&mclfree.m_mtx); 382 383 /* 384 * Either the map is now full, or `how' is M_DONTWAIT and there 385 * are no pages left. 386 */ 387 if (p == NULL) { 388 atomic_add_long(&mbstat.m_drops, 1); 389 return (0); 390 } 391 392 /* 393 * We don't let go of the mutex in order to avoid a race. 394 */ 395 for (i = 0; i < ncl; i++) { 396 ((union mcluster *)p)->mcl_next = mclfree.m_head; 397 mclfree.m_head = (union mcluster *)p; 398 p += MCLBYTES; 399 mbstat.m_clfree++; 400 } 401 mbstat.m_clusters += ncl; 402 return (1); 403} 404 405/* 406 * Once the mb_map submap has been exhausted and the allocation is called with 407 * M_TRYWAIT, we rely on the mclfree list. If nothing is free, we will 408 * sleep for a designated amount of time (mbuf_wait) or until we're woken up 409 * due to sudden mcluster availability. 410 * 411 * Must be called with the mclfree lock held. 412 */ 413caddr_t 414m_clalloc_wait(void) 415{ 416 caddr_t p = NULL; 417 418 m_clalloc_wid++; 419 msleep(&m_clalloc_wid, &mclfree.m_mtx, PVM, "mclalc", mbuf_wait); 420 m_clalloc_wid--; 421 422 /* 423 * Now that we (think) that we've got something, try again. 424 */ 425 _MCLALLOC(p, M_DONTWAIT); 426 427 /* If we waited and got something ... */ 428 if (p != NULL) { 429 atomic_add_long(&mbstat.m_wait, 1); 430 if (mclfree.m_head != NULL) 431 MBWAKEUP(m_clalloc_wid); 432 } else 433 atomic_add_long(&mbstat.m_drops, 1); 434 435 return (p); 436} 437 438/* 439 * m_reclaim: drain protocols in hopes to free up some resources... 440 * 441 * XXX: No locks should be held going in here. The drain routines have 442 * to presently acquire some locks which raises the possibility of lock 443 * order violation if we're holding any mutex if that mutex is acquired in 444 * reverse order relative to one of the locks in the drain routines. 445 */ 446static void 447m_reclaim(void) 448{ 449 struct domain *dp; 450 struct protosw *pr; 451 452#ifdef WITNESS 453 KASSERT(witness_list(CURPROC) == 0, 454 ("m_reclaim called with locks held")); 455#endif 456 457 for (dp = domains; dp; dp = dp->dom_next) 458 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 459 if (pr->pr_drain) 460 (*pr->pr_drain)(); 461 mbstat.m_drain++; 462} 463 464/* 465 * Space allocation routines. 466 * Some of these are also available as macros 467 * for critical paths. 468 */ 469struct mbuf * 470m_get(int how, int type) 471{ 472 struct mbuf *m; 473 474 MGET(m, how, type); 475 return (m); 476} 477 478struct mbuf * 479m_gethdr(int how, int type) 480{ 481 struct mbuf *m; 482 483 MGETHDR(m, how, type); 484 return (m); 485} 486 487struct mbuf * 488m_getclr(int how, int type) 489{ 490 struct mbuf *m; 491 492 MGET(m, how, type); 493 if (m == NULL) 494 return (NULL); 495 bzero(mtod(m, caddr_t), MLEN); 496 return (m); 497} 498 499struct mbuf * 500m_free(struct mbuf *m) 501{ 502 struct mbuf *n; 503 504 MFREE(m, n); 505 return (n); 506} 507 508/* 509 * struct mbuf * 510 * m_getm(m, len, how, type) 511 * 512 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits 513 * best) and return a pointer to the top of the allocated chain. If m is 514 * non-null, then we assume that it is a single mbuf or an mbuf chain to 515 * which we want len bytes worth of mbufs and/or clusters attached, and so 516 * if we succeed in allocating it, we will just return a pointer to m. 517 * 518 * If we happen to fail at any point during the allocation, we will free 519 * up everything we have already allocated and return NULL. 520 * 521 */ 522struct mbuf * 523m_getm(struct mbuf *m, int len, int how, int type) 524{ 525 struct mbuf *top, *tail, *mp, *mtail = NULL; 526 527 KASSERT(len >= 0, ("len is < 0 in m_getm")); 528 529 MGET(mp, type, how); 530 if (mp == NULL) 531 return (NULL); 532 else if (len > MINCLSIZE) { 533 MCLGET(mp, how); 534 if ((mp->m_flags & M_EXT) == 0) { 535 m_free(mp); 536 return (NULL); 537 } 538 } 539 mp->m_len = 0; 540 len -= M_TRAILINGSPACE(mp); 541 542 if (m != NULL) 543 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next); 544 else 545 m = mp; 546 547 top = tail = mp; 548 while (len > 0) { 549 MGET(mp, type, how); 550 if (mp == NULL) 551 goto failed; 552 553 tail->m_next = mp; 554 tail = mp; 555 if (len > MINCLSIZE) { 556 MCLGET(mp, how); 557 if ((mp->m_flags & M_EXT) == 0) 558 goto failed; 559 } 560 561 mp->m_len = 0; 562 len -= M_TRAILINGSPACE(mp); 563 } 564 565 if (mtail != NULL) 566 mtail->m_next = top; 567 return (m); 568 569failed: 570 m_freem(top); 571 return (NULL); 572} 573 574void 575m_freem(struct mbuf *m) 576{ 577 struct mbuf *n; 578 579 if (m == NULL) 580 return; 581 do { 582 /* 583 * we do need to check non-first mbuf, since some of existing 584 * code does not call M_PREPEND properly. 585 * (example: call to bpf_mtap from drivers) 586 */ 587 if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 588 m_freem(m->m_pkthdr.aux); 589 m->m_pkthdr.aux = NULL; 590 } 591 MFREE(m, n); 592 m = n; 593 } while (m); 594} 595 596/* 597 * Lesser-used path for M_PREPEND: 598 * allocate new mbuf to prepend to chain, 599 * copy junk along. 600 */ 601struct mbuf * 602m_prepend(struct mbuf *m, int len, int how) 603{ 604 struct mbuf *mn; 605 606 MGET(mn, how, m->m_type); 607 if (mn == NULL) { 608 m_freem(m); 609 return (NULL); 610 } 611 if (m->m_flags & M_PKTHDR) { 612 M_COPY_PKTHDR(mn, m); 613 m->m_flags &= ~M_PKTHDR; 614 } 615 mn->m_next = m; 616 m = mn; 617 if (len < MHLEN) 618 MH_ALIGN(m, len); 619 m->m_len = len; 620 return (m); 621} 622 623/* 624 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 625 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 626 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 627 * Note that the copy is read-only, because clusters are not copied, 628 * only their reference counts are incremented. 629 */ 630#define MCFail (mbstat.m_mcfail) 631 632struct mbuf * 633m_copym(struct mbuf *m, int off0, int len, int wait) 634{ 635 struct mbuf *n, **np; 636 int off = off0; 637 struct mbuf *top; 638 int copyhdr = 0; 639 640 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 641 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 642 if (off == 0 && m->m_flags & M_PKTHDR) 643 copyhdr = 1; 644 while (off > 0) { 645 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 646 if (off < m->m_len) 647 break; 648 off -= m->m_len; 649 m = m->m_next; 650 } 651 np = ⊤ 652 top = 0; 653 while (len > 0) { 654 if (m == NULL) { 655 KASSERT(len == M_COPYALL, 656 ("m_copym, length > size of mbuf chain")); 657 break; 658 } 659 MGET(n, wait, m->m_type); 660 *np = n; 661 if (n == NULL) 662 goto nospace; 663 if (copyhdr) { 664 M_COPY_PKTHDR(n, m); 665 if (len == M_COPYALL) 666 n->m_pkthdr.len -= off0; 667 else 668 n->m_pkthdr.len = len; 669 copyhdr = 0; 670 } 671 n->m_len = min(len, m->m_len - off); 672 if (m->m_flags & M_EXT) { 673 n->m_data = m->m_data + off; 674 n->m_ext = m->m_ext; 675 n->m_flags |= M_EXT; 676 MEXT_ADD_REF(m); 677 } else 678 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 679 (unsigned)n->m_len); 680 if (len != M_COPYALL) 681 len -= n->m_len; 682 off = 0; 683 m = m->m_next; 684 np = &n->m_next; 685 } 686 if (top == NULL) 687 atomic_add_long(&MCFail, 1); 688 return (top); 689nospace: 690 m_freem(top); 691 atomic_add_long(&MCFail, 1); 692 return (NULL); 693} 694 695/* 696 * Copy an entire packet, including header (which must be present). 697 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 698 * Note that the copy is read-only, because clusters are not copied, 699 * only their reference counts are incremented. 700 * Preserve alignment of the first mbuf so if the creator has left 701 * some room at the beginning (e.g. for inserting protocol headers) 702 * the copies still have the room available. 703 */ 704struct mbuf * 705m_copypacket(struct mbuf *m, int how) 706{ 707 struct mbuf *top, *n, *o; 708 709 MGET(n, how, m->m_type); 710 top = n; 711 if (n == NULL) 712 goto nospace; 713 714 M_COPY_PKTHDR(n, m); 715 n->m_len = m->m_len; 716 if (m->m_flags & M_EXT) { 717 n->m_data = m->m_data; 718 n->m_ext = m->m_ext; 719 n->m_flags |= M_EXT; 720 MEXT_ADD_REF(m); 721 } else { 722 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 723 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 724 } 725 726 m = m->m_next; 727 while (m) { 728 MGET(o, how, m->m_type); 729 if (o == NULL) 730 goto nospace; 731 732 n->m_next = o; 733 n = n->m_next; 734 735 n->m_len = m->m_len; 736 if (m->m_flags & M_EXT) { 737 n->m_data = m->m_data; 738 n->m_ext = m->m_ext; 739 n->m_flags |= M_EXT; 740 MEXT_ADD_REF(m); 741 } else { 742 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 743 } 744 745 m = m->m_next; 746 } 747 return top; 748nospace: 749 m_freem(top); 750 atomic_add_long(&MCFail, 1); 751 return (NULL); 752} 753 754/* 755 * Copy data from an mbuf chain starting "off" bytes from the beginning, 756 * continuing for "len" bytes, into the indicated buffer. 757 */ 758void 759m_copydata(struct mbuf *m, int off, int len, caddr_t cp) 760{ 761 unsigned count; 762 763 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 764 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 765 while (off > 0) { 766 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 767 if (off < m->m_len) 768 break; 769 off -= m->m_len; 770 m = m->m_next; 771 } 772 while (len > 0) { 773 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 774 count = min(m->m_len - off, len); 775 bcopy(mtod(m, caddr_t) + off, cp, count); 776 len -= count; 777 cp += count; 778 off = 0; 779 m = m->m_next; 780 } 781} 782 783/* 784 * Copy a packet header mbuf chain into a completely new chain, including 785 * copying any mbuf clusters. Use this instead of m_copypacket() when 786 * you need a writable copy of an mbuf chain. 787 */ 788struct mbuf * 789m_dup(struct mbuf *m, int how) 790{ 791 struct mbuf **p, *top = NULL; 792 int remain, moff, nsize; 793 794 /* Sanity check */ 795 if (m == NULL) 796 return (NULL); 797 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 798 799 /* While there's more data, get a new mbuf, tack it on, and fill it */ 800 remain = m->m_pkthdr.len; 801 moff = 0; 802 p = ⊤ 803 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 804 struct mbuf *n; 805 806 /* Get the next new mbuf */ 807 MGET(n, how, m->m_type); 808 if (n == NULL) 809 goto nospace; 810 if (top == NULL) { /* first one, must be PKTHDR */ 811 M_COPY_PKTHDR(n, m); 812 nsize = MHLEN; 813 } else /* not the first one */ 814 nsize = MLEN; 815 if (remain >= MINCLSIZE) { 816 MCLGET(n, how); 817 if ((n->m_flags & M_EXT) == 0) { 818 (void)m_free(n); 819 goto nospace; 820 } 821 nsize = MCLBYTES; 822 } 823 n->m_len = 0; 824 825 /* Link it into the new chain */ 826 *p = n; 827 p = &n->m_next; 828 829 /* Copy data from original mbuf(s) into new mbuf */ 830 while (n->m_len < nsize && m != NULL) { 831 int chunk = min(nsize - n->m_len, m->m_len - moff); 832 833 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 834 moff += chunk; 835 n->m_len += chunk; 836 remain -= chunk; 837 if (moff == m->m_len) { 838 m = m->m_next; 839 moff = 0; 840 } 841 } 842 843 /* Check correct total mbuf length */ 844 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 845 ("%s: bogus m_pkthdr.len", __FUNCTION__)); 846 } 847 return (top); 848 849nospace: 850 m_freem(top); 851 atomic_add_long(&MCFail, 1); 852 return (NULL); 853} 854 855/* 856 * Concatenate mbuf chain n to m. 857 * Both chains must be of the same type (e.g. MT_DATA). 858 * Any m_pkthdr is not updated. 859 */ 860void 861m_cat(struct mbuf *m, struct mbuf *n) 862{ 863 while (m->m_next) 864 m = m->m_next; 865 while (n) { 866 if (m->m_flags & M_EXT || 867 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 868 /* just join the two chains */ 869 m->m_next = n; 870 return; 871 } 872 /* splat the data from one into the other */ 873 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 874 (u_int)n->m_len); 875 m->m_len += n->m_len; 876 n = m_free(n); 877 } 878} 879 880void 881m_adj(struct mbuf *mp, int req_len) 882{ 883 int len = req_len; 884 struct mbuf *m; 885 int count; 886 887 if ((m = mp) == NULL) 888 return; 889 if (len >= 0) { 890 /* 891 * Trim from head. 892 */ 893 while (m != NULL && len > 0) { 894 if (m->m_len <= len) { 895 len -= m->m_len; 896 m->m_len = 0; 897 m = m->m_next; 898 } else { 899 m->m_len -= len; 900 m->m_data += len; 901 len = 0; 902 } 903 } 904 m = mp; 905 if (mp->m_flags & M_PKTHDR) 906 m->m_pkthdr.len -= (req_len - len); 907 } else { 908 /* 909 * Trim from tail. Scan the mbuf chain, 910 * calculating its length and finding the last mbuf. 911 * If the adjustment only affects this mbuf, then just 912 * adjust and return. Otherwise, rescan and truncate 913 * after the remaining size. 914 */ 915 len = -len; 916 count = 0; 917 for (;;) { 918 count += m->m_len; 919 if (m->m_next == (struct mbuf *)0) 920 break; 921 m = m->m_next; 922 } 923 if (m->m_len >= len) { 924 m->m_len -= len; 925 if (mp->m_flags & M_PKTHDR) 926 mp->m_pkthdr.len -= len; 927 return; 928 } 929 count -= len; 930 if (count < 0) 931 count = 0; 932 /* 933 * Correct length for chain is "count". 934 * Find the mbuf with last data, adjust its length, 935 * and toss data from remaining mbufs on chain. 936 */ 937 m = mp; 938 if (m->m_flags & M_PKTHDR) 939 m->m_pkthdr.len = count; 940 for (; m; m = m->m_next) { 941 if (m->m_len >= count) { 942 m->m_len = count; 943 break; 944 } 945 count -= m->m_len; 946 } 947 while (m->m_next) 948 (m = m->m_next) ->m_len = 0; 949 } 950} 951 952/* 953 * Rearange an mbuf chain so that len bytes are contiguous 954 * and in the data area of an mbuf (so that mtod and dtom 955 * will work for a structure of size len). Returns the resulting 956 * mbuf chain on success, frees it and returns null on failure. 957 * If there is room, it will add up to max_protohdr-len extra bytes to the 958 * contiguous region in an attempt to avoid being called next time. 959 */ 960#define MPFail (mbstat.m_mpfail) 961 962struct mbuf * 963m_pullup(struct mbuf *n, int len) 964{ 965 struct mbuf *m; 966 int count; 967 int space; 968 969 /* 970 * If first mbuf has no cluster, and has room for len bytes 971 * without shifting current data, pullup into it, 972 * otherwise allocate a new mbuf to prepend to the chain. 973 */ 974 if ((n->m_flags & M_EXT) == 0 && 975 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 976 if (n->m_len >= len) 977 return (n); 978 m = n; 979 n = n->m_next; 980 len -= m->m_len; 981 } else { 982 if (len > MHLEN) 983 goto bad; 984 MGET(m, M_DONTWAIT, n->m_type); 985 if (m == NULL) 986 goto bad; 987 m->m_len = 0; 988 if (n->m_flags & M_PKTHDR) { 989 M_COPY_PKTHDR(m, n); 990 n->m_flags &= ~M_PKTHDR; 991 } 992 } 993 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 994 do { 995 count = min(min(max(len, max_protohdr), space), n->m_len); 996 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 997 (unsigned)count); 998 len -= count; 999 m->m_len += count; 1000 n->m_len -= count; 1001 space -= count; 1002 if (n->m_len) 1003 n->m_data += count; 1004 else 1005 n = m_free(n); 1006 } while (len > 0 && n); 1007 if (len > 0) { 1008 (void) m_free(m); 1009 goto bad; 1010 } 1011 m->m_next = n; 1012 return (m); 1013bad: 1014 m_freem(n); 1015 atomic_add_long(&MPFail, 1); 1016 return (NULL); 1017} 1018 1019/* 1020 * Partition an mbuf chain in two pieces, returning the tail -- 1021 * all but the first len0 bytes. In case of failure, it returns NULL and 1022 * attempts to restore the chain to its original state. 1023 */ 1024struct mbuf * 1025m_split(struct mbuf *m0, int len0, int wait) 1026{ 1027 struct mbuf *m, *n; 1028 unsigned len = len0, remain; 1029 1030 for (m = m0; m && len > m->m_len; m = m->m_next) 1031 len -= m->m_len; 1032 if (m == NULL) 1033 return (NULL); 1034 remain = m->m_len - len; 1035 if (m0->m_flags & M_PKTHDR) { 1036 MGETHDR(n, wait, m0->m_type); 1037 if (n == NULL) 1038 return (NULL); 1039 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1040 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1041 m0->m_pkthdr.len = len0; 1042 if (m->m_flags & M_EXT) 1043 goto extpacket; 1044 if (remain > MHLEN) { 1045 /* m can't be the lead packet */ 1046 MH_ALIGN(n, 0); 1047 n->m_next = m_split(m, len, wait); 1048 if (n->m_next == NULL) { 1049 (void) m_free(n); 1050 return (NULL); 1051 } else 1052 return (n); 1053 } else 1054 MH_ALIGN(n, remain); 1055 } else if (remain == 0) { 1056 n = m->m_next; 1057 m->m_next = NULL; 1058 return (n); 1059 } else { 1060 MGET(n, wait, m->m_type); 1061 if (n == NULL) 1062 return (NULL); 1063 M_ALIGN(n, remain); 1064 } 1065extpacket: 1066 if (m->m_flags & M_EXT) { 1067 n->m_flags |= M_EXT; 1068 n->m_ext = m->m_ext; 1069 MEXT_ADD_REF(m); 1070 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1071 n->m_data = m->m_data + len; 1072 } else { 1073 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1074 } 1075 n->m_len = remain; 1076 m->m_len = len; 1077 n->m_next = m->m_next; 1078 m->m_next = NULL; 1079 return (n); 1080} 1081/* 1082 * Routine to copy from device local memory into mbufs. 1083 */ 1084struct mbuf * 1085m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, 1086 void (*copy)(char *from, caddr_t to, u_int len)) 1087{ 1088 struct mbuf *m; 1089 struct mbuf *top = 0, **mp = ⊤ 1090 int off = off0, len; 1091 char *cp; 1092 char *epkt; 1093 1094 cp = buf; 1095 epkt = cp + totlen; 1096 if (off) { 1097 cp += off + 2 * sizeof(u_short); 1098 totlen -= 2 * sizeof(u_short); 1099 } 1100 MGETHDR(m, M_DONTWAIT, MT_DATA); 1101 if (m == NULL) 1102 return (NULL); 1103 m->m_pkthdr.rcvif = ifp; 1104 m->m_pkthdr.len = totlen; 1105 m->m_len = MHLEN; 1106 1107 while (totlen > 0) { 1108 if (top) { 1109 MGET(m, M_DONTWAIT, MT_DATA); 1110 if (m == NULL) { 1111 m_freem(top); 1112 return (NULL); 1113 } 1114 m->m_len = MLEN; 1115 } 1116 len = min(totlen, epkt - cp); 1117 if (len >= MINCLSIZE) { 1118 MCLGET(m, M_DONTWAIT); 1119 if (m->m_flags & M_EXT) 1120 m->m_len = len = min(len, MCLBYTES); 1121 else 1122 len = m->m_len; 1123 } else { 1124 /* 1125 * Place initial small packet/header at end of mbuf. 1126 */ 1127 if (len < m->m_len) { 1128 if (top == NULL && len + 1129 max_linkhdr <= m->m_len) 1130 m->m_data += max_linkhdr; 1131 m->m_len = len; 1132 } else 1133 len = m->m_len; 1134 } 1135 if (copy) 1136 copy(cp, mtod(m, caddr_t), (unsigned)len); 1137 else 1138 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1139 cp += len; 1140 *mp = m; 1141 mp = &m->m_next; 1142 totlen -= len; 1143 if (cp == epkt) 1144 cp = buf; 1145 } 1146 return (top); 1147} 1148 1149/* 1150 * Copy data from a buffer back into the indicated mbuf chain, 1151 * starting "off" bytes from the beginning, extending the mbuf 1152 * chain if necessary. 1153 */ 1154void 1155m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 1156{ 1157 int mlen; 1158 struct mbuf *m = m0, *n; 1159 int totlen = 0; 1160 1161 if (m0 == NULL) 1162 return; 1163 while (off > (mlen = m->m_len)) { 1164 off -= mlen; 1165 totlen += mlen; 1166 if (m->m_next == NULL) { 1167 n = m_getclr(M_DONTWAIT, m->m_type); 1168 if (n == NULL) 1169 goto out; 1170 n->m_len = min(MLEN, len + off); 1171 m->m_next = n; 1172 } 1173 m = m->m_next; 1174 } 1175 while (len > 0) { 1176 mlen = min (m->m_len - off, len); 1177 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1178 cp += mlen; 1179 len -= mlen; 1180 mlen += off; 1181 off = 0; 1182 totlen += mlen; 1183 if (len == 0) 1184 break; 1185 if (m->m_next == NULL) { 1186 n = m_get(M_DONTWAIT, m->m_type); 1187 if (n == NULL) 1188 break; 1189 n->m_len = min(MLEN, len); 1190 m->m_next = n; 1191 } 1192 m = m->m_next; 1193 } 1194out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1195 m->m_pkthdr.len = totlen; 1196} 1197 1198void 1199m_print(const struct mbuf *m) 1200{ 1201 int len; 1202 const struct mbuf *m2; 1203 1204 len = m->m_pkthdr.len; 1205 m2 = m; 1206 while (len) { 1207 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1208 len -= m2->m_len; 1209 m2 = m2->m_next; 1210 } 1211 return; 1212} 1213