uipc_mbuf.c revision 71089
1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 71089 2001-01-16 01:53:13Z bmilekic $ 35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/malloc.h> 41#include <sys/mbuf.h> 42#include <sys/mutex.h> 43#include <sys/kernel.h> 44#include <sys/sysctl.h> 45#include <sys/domain.h> 46#include <sys/protosw.h> 47#include <vm/vm.h> 48#include <vm/vm_kern.h> 49#include <vm/vm_extern.h> 50 51static void mbinit __P((void *)); 52SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 53 54struct mbuf *mbutl; 55struct mbstat mbstat; 56u_long mbtypes[MT_NTYPES]; 57int max_linkhdr; 58int max_protohdr; 59int max_hdr; 60int max_datalen; 61int nmbclusters; 62int nmbufs; 63int nmbcnt; 64u_long m_mballoc_wid = 0; 65u_long m_clalloc_wid = 0; 66 67/* 68 * freelist header structures... 69 * mbffree_lst, mclfree_lst, mcntfree_lst 70 */ 71struct mbffree_lst mmbfree; 72struct mclfree_lst mclfree; 73struct mcntfree_lst mcntfree; 74 75/* 76 * sysctl(8) exported objects 77 */ 78SYSCTL_DECL(_kern_ipc); 79SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 80 &max_linkhdr, 0, ""); 81SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 82 &max_protohdr, 0, ""); 83SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 84SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 85 &max_datalen, 0, ""); 86SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 87 &mbuf_wait, 0, ""); 88SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RD, &mbstat, mbstat, ""); 89SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 90 sizeof(mbtypes), "LU", ""); 91SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 92 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 93SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 94 "Maximum number of mbufs available"); 95SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0, 96 "Maximum number of ext_buf counters available"); 97#ifndef NMBCLUSTERS 98#define NMBCLUSTERS (512 + MAXUSERS * 16) 99#endif 100TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 101TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); 102TUNABLE_INT_DECL("kern.ipc.nmbcnt", EXT_COUNTERS, nmbcnt); 103 104static void m_reclaim __P((void)); 105 106/* Initial allocation numbers */ 107#define NCL_INIT 2 108#define NMB_INIT 16 109#define REF_INIT NMBCLUSTERS 110 111/* 112 * Full mbuf subsystem initialization done here. 113 * 114 * XXX: If ever we have system specific map setups to do, then move them to 115 * machdep.c - for now, there is no reason for this stuff to go there. 116 */ 117static void 118mbinit(dummy) 119 void *dummy; 120{ 121 vm_offset_t maxaddr, mb_map_size; 122 123 /* 124 * Setup the mb_map, allocate requested VM space. 125 */ 126 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt 127 * sizeof(union mext_refcnt); 128 mb_map_size = roundup2(mb_map_size, PAGE_SIZE); 129 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 130 mb_map_size); 131 /* XXX: mb_map->system_map = 1; */ 132 133 /* 134 * Initialize the free list headers, and setup locks for lists. 135 */ 136 mmbfree.m_head = NULL; 137 mclfree.m_head = NULL; 138 mcntfree.m_head = NULL; 139 mtx_init(&mmbfree.m_mtx, "mbuf free list lock", MTX_DEF); 140 mtx_init(&mclfree.m_mtx, "mcluster free list lock", MTX_DEF); 141 mtx_init(&mcntfree.m_mtx, "m_ext counter free list lock", MTX_DEF); 142 143 /* 144 * Initialize mbuf subsystem (sysctl exported) statistics structure. 145 */ 146 mbstat.m_msize = MSIZE; 147 mbstat.m_mclbytes = MCLBYTES; 148 mbstat.m_minclsize = MINCLSIZE; 149 mbstat.m_mlen = MLEN; 150 mbstat.m_mhlen = MHLEN; 151 152 /* 153 * Perform some initial allocations. 154 */ 155 mtx_enter(&mcntfree.m_mtx, MTX_DEF); 156 if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0) 157 goto bad; 158 mtx_exit(&mcntfree.m_mtx, MTX_DEF); 159 160 mtx_enter(&mmbfree.m_mtx, MTX_DEF); 161 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 162 goto bad; 163 mtx_exit(&mmbfree.m_mtx, MTX_DEF); 164 165 mtx_enter(&mclfree.m_mtx, MTX_DEF); 166 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 167 goto bad; 168 mtx_exit(&mclfree.m_mtx, MTX_DEF); 169 170 return; 171bad: 172 panic("mbinit: failed to initialize mbuf subsystem!"); 173} 174 175/* 176 * Allocate at least nmb reference count structs and place them 177 * on the ref cnt free list. 178 * 179 * Must be called with the mcntfree lock held. 180 */ 181int 182m_alloc_ref(nmb, how) 183 u_int nmb; 184 int how; 185{ 186 caddr_t p; 187 u_int nbytes; 188 int i; 189 190 /* 191 * We don't cap the amount of memory that can be used 192 * by the reference counters, like we do for mbufs and 193 * mbuf clusters. In fact, we're absolutely sure that we 194 * won't ever be going over our allocated space. We keep enough 195 * space in mb_map to accomodate maximum values of allocatable 196 * external buffers including, but not limited to, clusters. 197 * (That's also why we won't have to have wait routines for 198 * counters). 199 * 200 * If we're in here, we're absolutely certain to be returning 201 * succesfully, as long as there is physical memory to accomodate 202 * us. And if there isn't, but we're willing to wait, then 203 * kmem_malloc() will do the only waiting needed. 204 */ 205 206 nbytes = round_page(nmb * sizeof(union mext_refcnt)); 207 mtx_exit(&mcntfree.m_mtx, MTX_DEF); 208#ifdef WITNESS 209 /* 210 * XXX: Make sure we don't create lock order problems. 211 * XXX: We'll grab Giant, but for that to be OK, make sure 212 * XXX: that either Giant is already held OR make sure that 213 * XXX: no other locks are held coming in. 214 * XXX: Revisit once most of the net stuff gets locks added. 215 */ 216 KASSERT(mtx_owned(&Giant) || witness_list(CURPROC) == 0, 217 ("m_alloc_ref: Giant must be owned or no locks held")); 218#endif 219 mtx_enter(&Giant, MTX_DEF); 220 if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ? 221 M_WAITOK : M_NOWAIT)) == NULL) { 222 mtx_exit(&Giant, MTX_DEF); 223 mtx_enter(&mcntfree.m_mtx, MTX_DEF); 224 return (0); 225 } 226 mtx_exit(&Giant, MTX_DEF); 227 nmb = nbytes / sizeof(union mext_refcnt); 228 229 /* 230 * We don't let go of the mutex in order to avoid a race. 231 * It is up to the caller to let go of the mutex. 232 */ 233 mtx_enter(&mcntfree.m_mtx, MTX_DEF); 234 for (i = 0; i < nmb; i++) { 235 ((union mext_refcnt *)p)->next_ref = mcntfree.m_head; 236 mcntfree.m_head = (union mext_refcnt *)p; 237 p += sizeof(union mext_refcnt); 238 mbstat.m_refree++; 239 } 240 mbstat.m_refcnt += nmb; 241 242 return (1); 243} 244 245/* 246 * Allocate at least nmb mbufs and place on mbuf free list. 247 * 248 * Must be called with the mmbfree lock held. 249 */ 250int 251m_mballoc(nmb, how) 252 register int nmb; 253 int how; 254{ 255 register caddr_t p; 256 register int i; 257 int nbytes; 258 259 /* 260 * If we've hit the mbuf limit, stop allocating from mb_map. 261 * Also, once we run out of map space, it will be impossible to 262 * get any more (nothing is ever freed back to the map). 263 */ 264 if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) { 265 /* 266 * Needs to be atomic as we may be incrementing it 267 * while holding another mutex, like mclfree. In other 268 * words, m_drops is not reserved solely for mbufs, 269 * but is also available for clusters. 270 */ 271 atomic_add_long(&mbstat.m_drops, 1); 272 return (0); 273 } 274 275 nbytes = round_page(nmb * MSIZE); 276 277 mtx_exit(&mmbfree.m_mtx, MTX_DEF); 278#ifdef WITNESS 279 /* 280 * XXX: Make sure we don't create lock order problems. 281 * XXX: We'll grab Giant, but for that to be OK, make sure 282 * XXX: that either Giant is already held OR make sure that 283 * XXX: no other locks are held coming in. 284 * XXX: Revisit once most of the net stuff gets locks added. 285 */ 286 KASSERT(mtx_owned(&Giant) || witness_list(CURPROC) == 0, 287 ("m_mballoc: Giant must be owned or no locks held")); 288#endif 289 mtx_enter(&Giant, MTX_DEF); 290 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 291 if (p == 0 && how == M_TRYWAIT) { 292 atomic_add_long(&mbstat.m_wait, 1); 293 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 294 } 295 mtx_exit(&Giant, MTX_DEF); 296 mtx_enter(&mmbfree.m_mtx, MTX_DEF); 297 298 /* 299 * Either the map is now full, or `how' is M_DONTWAIT and there 300 * are no pages left. 301 */ 302 if (p == NULL) 303 return (0); 304 305 nmb = nbytes / MSIZE; 306 307 /* 308 * We don't let go of the mutex in order to avoid a race. 309 * It is up to the caller to let go of the mutex when done 310 * with grabbing the mbuf from the free list. 311 */ 312 for (i = 0; i < nmb; i++) { 313 ((struct mbuf *)p)->m_next = mmbfree.m_head; 314 mmbfree.m_head = (struct mbuf *)p; 315 p += MSIZE; 316 } 317 mbstat.m_mbufs += nmb; 318 mbtypes[MT_FREE] += nmb; 319 return (1); 320} 321 322/* 323 * Once the mb_map has been exhausted and if the call to the allocation macros 324 * (or, in some cases, functions) is with M_TRYWAIT, then it is necessary to 325 * rely solely on reclaimed mbufs. 326 * 327 * Here we request for the protocols to free up some resources and, if we 328 * still cannot get anything, then we wait for an mbuf to be freed for a 329 * designated (mbuf_wait) time. 330 * 331 * Must be called with the mmbfree mutex held. 332 */ 333struct mbuf * 334m_mballoc_wait(void) 335{ 336 struct mbuf *p = NULL; 337 338 /* 339 * See if we can drain some resources out of the protocols. 340 * We drop the mmbfree mutex to avoid recursing into it in some of 341 * the drain routines. Clearly, we're faced with a race here because 342 * once something is freed during the drain, it may be grabbed right 343 * from under us by some other thread. But we accept this possibility 344 * in order to avoid a potentially large lock recursion and, more 345 * importantly, to avoid a potential lock order reversal which may 346 * result in deadlock (See comment above m_reclaim()). 347 */ 348 mtx_exit(&mmbfree.m_mtx, MTX_DEF); 349 m_reclaim(); 350 351 mtx_enter(&mmbfree.m_mtx, MTX_DEF); 352 _MGET(p, M_DONTWAIT); 353 354 if (p == NULL) { 355 m_mballoc_wid++; 356 if (msleep(&m_mballoc_wid, &mmbfree.m_mtx, PVM, "mballc", 357 mbuf_wait) == EWOULDBLOCK) 358 m_mballoc_wid--; 359 360 /* 361 * Try again (one last time). 362 * 363 * We retry to fetch _even_ if the sleep timed out. This 364 * is left this way, purposely, in the [unlikely] case 365 * that an mbuf was freed but the sleep was not awoken 366 * in time. 367 * 368 * If the sleep didn't time out (i.e. we got woken up) then 369 * we have the lock so we just grab an mbuf, hopefully. 370 */ 371 _MGET(p, M_DONTWAIT); 372 } 373 374 /* If we waited and got something... */ 375 if (p != NULL) { 376 atomic_add_long(&mbstat.m_wait, 1); 377 if (mmbfree.m_head != NULL) 378 MBWAKEUP(m_mballoc_wid); 379 } else 380 atomic_add_long(&mbstat.m_drops, 1); 381 382 return (p); 383} 384 385/* 386 * Allocate some number of mbuf clusters 387 * and place on cluster free list. 388 * 389 * Must be called with the mclfree lock held. 390 */ 391int 392m_clalloc(ncl, how) 393 register int ncl; 394 int how; 395{ 396 register caddr_t p; 397 register int i; 398 int npg; 399 400 /* 401 * If the map is now full (nothing will ever be freed to it). 402 * If we've hit the mcluster number limit, stop allocating from 403 * mb_map. 404 */ 405 if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) { 406 atomic_add_long(&mbstat.m_drops, 1); 407 return (0); 408 } 409 410 npg = ncl; 411 mtx_exit(&mclfree.m_mtx, MTX_DEF); 412#ifdef WITNESS 413 /* 414 * XXX: Make sure we don't create lock order problems. 415 * XXX: We'll grab Giant, but for that to be OK, make sure 416 * XXX: that either Giant is already held OR make sure that 417 * XXX: no other locks are held coming in. 418 * XXX: Revisit once most of the net stuff gets locks added. 419 */ 420 KASSERT(mtx_owned(&Giant) || witness_list(CURPROC) == 0, 421 ("m_clalloc: Giant must be owned or no locks held")); 422#endif 423 mtx_enter(&Giant, MTX_DEF); 424 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 425 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT); 426 mtx_exit(&Giant, MTX_DEF); 427 ncl = ncl * PAGE_SIZE / MCLBYTES; 428 mtx_enter(&mclfree.m_mtx, MTX_DEF); 429 430 /* 431 * Either the map is now full, or `how' is M_DONTWAIT and there 432 * are no pages left. 433 */ 434 if (p == NULL) { 435 atomic_add_long(&mbstat.m_drops, 1); 436 return (0); 437 } 438 439 /* 440 * We don't let go of the mutex in order to avoid a race. 441 */ 442 for (i = 0; i < ncl; i++) { 443 ((union mcluster *)p)->mcl_next = mclfree.m_head; 444 mclfree.m_head = (union mcluster *)p; 445 p += MCLBYTES; 446 mbstat.m_clfree++; 447 } 448 mbstat.m_clusters += ncl; 449 return (1); 450} 451 452/* 453 * Once the mb_map submap has been exhausted and the allocation is called with 454 * M_TRYWAIT, we rely on the mclfree list. If nothing is free, we will 455 * sleep for a designated amount of time (mbuf_wait) or until we're woken up 456 * due to sudden mcluster availability. 457 * 458 * Must be called with the mclfree lock held. 459 */ 460caddr_t 461m_clalloc_wait(void) 462{ 463 caddr_t p = NULL; 464 465 m_clalloc_wid++; 466 if (msleep(&m_clalloc_wid, &mclfree.m_mtx, PVM, "mclalc", mbuf_wait) 467 == EWOULDBLOCK) 468 m_clalloc_wid--; 469 470 /* 471 * Now that we (think) that we've got something, try again. 472 */ 473 _MCLALLOC(p, M_DONTWAIT); 474 475 /* If we waited and got something ... */ 476 if (p != NULL) { 477 atomic_add_long(&mbstat.m_wait, 1); 478 if (mclfree.m_head != NULL) 479 MBWAKEUP(m_clalloc_wid); 480 } else 481 atomic_add_long(&mbstat.m_drops, 1); 482 483 return (p); 484} 485 486/* 487 * m_reclaim: drain protocols in hopes to free up some resources... 488 * 489 * XXX: No locks should be held going in here. The drain routines have 490 * to presently acquire some locks which raises the possibility of lock 491 * order violation if we're holding any mutex if that mutex is acquired in 492 * reverse order relative to one of the locks in the drain routines. 493 */ 494static void 495m_reclaim() 496{ 497 register struct domain *dp; 498 register struct protosw *pr; 499 500#ifdef WITNESS 501 KASSERT(witness_list(CURPROC) == 0, 502 ("m_reclaim called with locks held")); 503#endif 504 505 for (dp = domains; dp; dp = dp->dom_next) 506 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 507 if (pr->pr_drain) 508 (*pr->pr_drain)(); 509 mbstat.m_drain++; 510} 511 512/* 513 * Space allocation routines. 514 * These are also available as macros 515 * for critical paths. 516 */ 517struct mbuf * 518m_get(how, type) 519 int how, type; 520{ 521 register struct mbuf *m; 522 523 MGET(m, how, type); 524 return (m); 525} 526 527struct mbuf * 528m_gethdr(how, type) 529 int how, type; 530{ 531 register struct mbuf *m; 532 533 MGETHDR(m, how, type); 534 return (m); 535} 536 537struct mbuf * 538m_getclr(how, type) 539 int how, type; 540{ 541 register struct mbuf *m; 542 543 MGET(m, how, type); 544 if (m == 0) 545 return (0); 546 bzero(mtod(m, caddr_t), MLEN); 547 return (m); 548} 549 550struct mbuf * 551m_free(m) 552 struct mbuf *m; 553{ 554 register struct mbuf *n; 555 556 MFREE(m, n); 557 return (n); 558} 559 560void 561m_freem(m) 562 register struct mbuf *m; 563{ 564 register struct mbuf *n; 565 566 if (m == NULL) 567 return; 568 do { 569 /* 570 * we do need to check non-first mbuf, since some of existing 571 * code does not call M_PREPEND properly. 572 * (example: call to bpf_mtap from drivers) 573 */ 574 if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 575 m_freem(m->m_pkthdr.aux); 576 m->m_pkthdr.aux = NULL; 577 } 578 MFREE(m, n); 579 m = n; 580 } while (m); 581} 582 583/* 584 * Mbuffer utility routines. 585 */ 586 587/* 588 * Lesser-used path for M_PREPEND: 589 * allocate new mbuf to prepend to chain, 590 * copy junk along. 591 */ 592struct mbuf * 593m_prepend(m, len, how) 594 register struct mbuf *m; 595 int len, how; 596{ 597 struct mbuf *mn; 598 599 MGET(mn, how, m->m_type); 600 if (mn == (struct mbuf *)NULL) { 601 m_freem(m); 602 return ((struct mbuf *)NULL); 603 } 604 if (m->m_flags & M_PKTHDR) { 605 M_COPY_PKTHDR(mn, m); 606 m->m_flags &= ~M_PKTHDR; 607 } 608 mn->m_next = m; 609 m = mn; 610 if (len < MHLEN) 611 MH_ALIGN(m, len); 612 m->m_len = len; 613 return (m); 614} 615 616/* 617 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 618 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 619 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 620 * Note that the copy is read-only, because clusters are not copied, 621 * only their reference counts are incremented. 622 */ 623#define MCFail (mbstat.m_mcfail) 624 625struct mbuf * 626m_copym(m, off0, len, wait) 627 register struct mbuf *m; 628 int off0, wait; 629 register int len; 630{ 631 register struct mbuf *n, **np; 632 register int off = off0; 633 struct mbuf *top; 634 int copyhdr = 0; 635 636 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 637 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 638 if (off == 0 && m->m_flags & M_PKTHDR) 639 copyhdr = 1; 640 while (off > 0) { 641 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 642 if (off < m->m_len) 643 break; 644 off -= m->m_len; 645 m = m->m_next; 646 } 647 np = ⊤ 648 top = 0; 649 while (len > 0) { 650 if (m == 0) { 651 KASSERT(len == M_COPYALL, 652 ("m_copym, length > size of mbuf chain")); 653 break; 654 } 655 MGET(n, wait, m->m_type); 656 *np = n; 657 if (n == 0) 658 goto nospace; 659 if (copyhdr) { 660 M_COPY_PKTHDR(n, m); 661 if (len == M_COPYALL) 662 n->m_pkthdr.len -= off0; 663 else 664 n->m_pkthdr.len = len; 665 copyhdr = 0; 666 } 667 n->m_len = min(len, m->m_len - off); 668 if (m->m_flags & M_EXT) { 669 n->m_data = m->m_data + off; 670 n->m_ext = m->m_ext; 671 n->m_flags |= M_EXT; 672 MEXT_ADD_REF(m); 673 } else 674 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 675 (unsigned)n->m_len); 676 if (len != M_COPYALL) 677 len -= n->m_len; 678 off = 0; 679 m = m->m_next; 680 np = &n->m_next; 681 } 682 if (top == 0) 683 atomic_add_long(&MCFail, 1); 684 return (top); 685nospace: 686 m_freem(top); 687 atomic_add_long(&MCFail, 1); 688 return (0); 689} 690 691/* 692 * Copy an entire packet, including header (which must be present). 693 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 694 * Note that the copy is read-only, because clusters are not copied, 695 * only their reference counts are incremented. 696 */ 697struct mbuf * 698m_copypacket(m, how) 699 struct mbuf *m; 700 int how; 701{ 702 struct mbuf *top, *n, *o; 703 704 MGET(n, how, m->m_type); 705 top = n; 706 if (!n) 707 goto nospace; 708 709 M_COPY_PKTHDR(n, m); 710 n->m_len = m->m_len; 711 if (m->m_flags & M_EXT) { 712 n->m_data = m->m_data; 713 n->m_ext = m->m_ext; 714 n->m_flags |= M_EXT; 715 MEXT_ADD_REF(m); 716 } else { 717 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 718 } 719 720 m = m->m_next; 721 while (m) { 722 MGET(o, how, m->m_type); 723 if (!o) 724 goto nospace; 725 726 n->m_next = o; 727 n = n->m_next; 728 729 n->m_len = m->m_len; 730 if (m->m_flags & M_EXT) { 731 n->m_data = m->m_data; 732 n->m_ext = m->m_ext; 733 n->m_flags |= M_EXT; 734 MEXT_ADD_REF(m); 735 } else { 736 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 737 } 738 739 m = m->m_next; 740 } 741 return top; 742nospace: 743 m_freem(top); 744 atomic_add_long(&MCFail, 1); 745 return 0; 746} 747 748/* 749 * Copy data from an mbuf chain starting "off" bytes from the beginning, 750 * continuing for "len" bytes, into the indicated buffer. 751 */ 752void 753m_copydata(m, off, len, cp) 754 register struct mbuf *m; 755 register int off; 756 register int len; 757 caddr_t cp; 758{ 759 register unsigned count; 760 761 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 762 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 763 while (off > 0) { 764 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 765 if (off < m->m_len) 766 break; 767 off -= m->m_len; 768 m = m->m_next; 769 } 770 while (len > 0) { 771 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 772 count = min(m->m_len - off, len); 773 bcopy(mtod(m, caddr_t) + off, cp, count); 774 len -= count; 775 cp += count; 776 off = 0; 777 m = m->m_next; 778 } 779} 780 781/* 782 * Copy a packet header mbuf chain into a completely new chain, including 783 * copying any mbuf clusters. Use this instead of m_copypacket() when 784 * you need a writable copy of an mbuf chain. 785 */ 786struct mbuf * 787m_dup(m, how) 788 struct mbuf *m; 789 int how; 790{ 791 struct mbuf **p, *top = NULL; 792 int remain, moff, nsize; 793 794 /* Sanity check */ 795 if (m == NULL) 796 return (0); 797 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 798 799 /* While there's more data, get a new mbuf, tack it on, and fill it */ 800 remain = m->m_pkthdr.len; 801 moff = 0; 802 p = ⊤ 803 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 804 struct mbuf *n; 805 806 /* Get the next new mbuf */ 807 MGET(n, how, m->m_type); 808 if (n == NULL) 809 goto nospace; 810 if (top == NULL) { /* first one, must be PKTHDR */ 811 M_COPY_PKTHDR(n, m); 812 nsize = MHLEN; 813 } else /* not the first one */ 814 nsize = MLEN; 815 if (remain >= MINCLSIZE) { 816 MCLGET(n, how); 817 if ((n->m_flags & M_EXT) == 0) { 818 (void)m_free(n); 819 goto nospace; 820 } 821 nsize = MCLBYTES; 822 } 823 n->m_len = 0; 824 825 /* Link it into the new chain */ 826 *p = n; 827 p = &n->m_next; 828 829 /* Copy data from original mbuf(s) into new mbuf */ 830 while (n->m_len < nsize && m != NULL) { 831 int chunk = min(nsize - n->m_len, m->m_len - moff); 832 833 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 834 moff += chunk; 835 n->m_len += chunk; 836 remain -= chunk; 837 if (moff == m->m_len) { 838 m = m->m_next; 839 moff = 0; 840 } 841 } 842 843 /* Check correct total mbuf length */ 844 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 845 ("%s: bogus m_pkthdr.len", __FUNCTION__)); 846 } 847 return (top); 848 849nospace: 850 m_freem(top); 851 atomic_add_long(&MCFail, 1); 852 return (0); 853} 854 855/* 856 * Concatenate mbuf chain n to m. 857 * Both chains must be of the same type (e.g. MT_DATA). 858 * Any m_pkthdr is not updated. 859 */ 860void 861m_cat(m, n) 862 register struct mbuf *m, *n; 863{ 864 while (m->m_next) 865 m = m->m_next; 866 while (n) { 867 if (m->m_flags & M_EXT || 868 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 869 /* just join the two chains */ 870 m->m_next = n; 871 return; 872 } 873 /* splat the data from one into the other */ 874 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 875 (u_int)n->m_len); 876 m->m_len += n->m_len; 877 n = m_free(n); 878 } 879} 880 881void 882m_adj(mp, req_len) 883 struct mbuf *mp; 884 int req_len; 885{ 886 register int len = req_len; 887 register struct mbuf *m; 888 register int count; 889 890 if ((m = mp) == NULL) 891 return; 892 if (len >= 0) { 893 /* 894 * Trim from head. 895 */ 896 while (m != NULL && len > 0) { 897 if (m->m_len <= len) { 898 len -= m->m_len; 899 m->m_len = 0; 900 m = m->m_next; 901 } else { 902 m->m_len -= len; 903 m->m_data += len; 904 len = 0; 905 } 906 } 907 m = mp; 908 if (mp->m_flags & M_PKTHDR) 909 m->m_pkthdr.len -= (req_len - len); 910 } else { 911 /* 912 * Trim from tail. Scan the mbuf chain, 913 * calculating its length and finding the last mbuf. 914 * If the adjustment only affects this mbuf, then just 915 * adjust and return. Otherwise, rescan and truncate 916 * after the remaining size. 917 */ 918 len = -len; 919 count = 0; 920 for (;;) { 921 count += m->m_len; 922 if (m->m_next == (struct mbuf *)0) 923 break; 924 m = m->m_next; 925 } 926 if (m->m_len >= len) { 927 m->m_len -= len; 928 if (mp->m_flags & M_PKTHDR) 929 mp->m_pkthdr.len -= len; 930 return; 931 } 932 count -= len; 933 if (count < 0) 934 count = 0; 935 /* 936 * Correct length for chain is "count". 937 * Find the mbuf with last data, adjust its length, 938 * and toss data from remaining mbufs on chain. 939 */ 940 m = mp; 941 if (m->m_flags & M_PKTHDR) 942 m->m_pkthdr.len = count; 943 for (; m; m = m->m_next) { 944 if (m->m_len >= count) { 945 m->m_len = count; 946 break; 947 } 948 count -= m->m_len; 949 } 950 while (m->m_next) 951 (m = m->m_next) ->m_len = 0; 952 } 953} 954 955/* 956 * Rearange an mbuf chain so that len bytes are contiguous 957 * and in the data area of an mbuf (so that mtod and dtom 958 * will work for a structure of size len). Returns the resulting 959 * mbuf chain on success, frees it and returns null on failure. 960 * If there is room, it will add up to max_protohdr-len extra bytes to the 961 * contiguous region in an attempt to avoid being called next time. 962 */ 963#define MPFail (mbstat.m_mpfail) 964 965struct mbuf * 966m_pullup(n, len) 967 register struct mbuf *n; 968 int len; 969{ 970 register struct mbuf *m; 971 register int count; 972 int space; 973 974 /* 975 * If first mbuf has no cluster, and has room for len bytes 976 * without shifting current data, pullup into it, 977 * otherwise allocate a new mbuf to prepend to the chain. 978 */ 979 if ((n->m_flags & M_EXT) == 0 && 980 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 981 if (n->m_len >= len) 982 return (n); 983 m = n; 984 n = n->m_next; 985 len -= m->m_len; 986 } else { 987 if (len > MHLEN) 988 goto bad; 989 MGET(m, M_DONTWAIT, n->m_type); 990 if (m == 0) 991 goto bad; 992 m->m_len = 0; 993 if (n->m_flags & M_PKTHDR) { 994 M_COPY_PKTHDR(m, n); 995 n->m_flags &= ~M_PKTHDR; 996 } 997 } 998 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 999 do { 1000 count = min(min(max(len, max_protohdr), space), n->m_len); 1001 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1002 (unsigned)count); 1003 len -= count; 1004 m->m_len += count; 1005 n->m_len -= count; 1006 space -= count; 1007 if (n->m_len) 1008 n->m_data += count; 1009 else 1010 n = m_free(n); 1011 } while (len > 0 && n); 1012 if (len > 0) { 1013 (void) m_free(m); 1014 goto bad; 1015 } 1016 m->m_next = n; 1017 return (m); 1018bad: 1019 m_freem(n); 1020 atomic_add_long(&MPFail, 1); 1021 return (0); 1022} 1023 1024/* 1025 * Partition an mbuf chain in two pieces, returning the tail -- 1026 * all but the first len0 bytes. In case of failure, it returns NULL and 1027 * attempts to restore the chain to its original state. 1028 */ 1029struct mbuf * 1030m_split(m0, len0, wait) 1031 register struct mbuf *m0; 1032 int len0, wait; 1033{ 1034 register struct mbuf *m, *n; 1035 unsigned len = len0, remain; 1036 1037 for (m = m0; m && len > m->m_len; m = m->m_next) 1038 len -= m->m_len; 1039 if (m == 0) 1040 return (0); 1041 remain = m->m_len - len; 1042 if (m0->m_flags & M_PKTHDR) { 1043 MGETHDR(n, wait, m0->m_type); 1044 if (n == 0) 1045 return (0); 1046 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1047 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1048 m0->m_pkthdr.len = len0; 1049 if (m->m_flags & M_EXT) 1050 goto extpacket; 1051 if (remain > MHLEN) { 1052 /* m can't be the lead packet */ 1053 MH_ALIGN(n, 0); 1054 n->m_next = m_split(m, len, wait); 1055 if (n->m_next == 0) { 1056 (void) m_free(n); 1057 return (0); 1058 } else 1059 return (n); 1060 } else 1061 MH_ALIGN(n, remain); 1062 } else if (remain == 0) { 1063 n = m->m_next; 1064 m->m_next = 0; 1065 return (n); 1066 } else { 1067 MGET(n, wait, m->m_type); 1068 if (n == 0) 1069 return (0); 1070 M_ALIGN(n, remain); 1071 } 1072extpacket: 1073 if (m->m_flags & M_EXT) { 1074 n->m_flags |= M_EXT; 1075 n->m_ext = m->m_ext; 1076 MEXT_ADD_REF(m); 1077 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1078 n->m_data = m->m_data + len; 1079 } else { 1080 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1081 } 1082 n->m_len = remain; 1083 m->m_len = len; 1084 n->m_next = m->m_next; 1085 m->m_next = 0; 1086 return (n); 1087} 1088/* 1089 * Routine to copy from device local memory into mbufs. 1090 */ 1091struct mbuf * 1092m_devget(buf, totlen, off0, ifp, copy) 1093 char *buf; 1094 int totlen, off0; 1095 struct ifnet *ifp; 1096 void (*copy) __P((char *from, caddr_t to, u_int len)); 1097{ 1098 register struct mbuf *m; 1099 struct mbuf *top = 0, **mp = ⊤ 1100 register int off = off0, len; 1101 register char *cp; 1102 char *epkt; 1103 1104 cp = buf; 1105 epkt = cp + totlen; 1106 if (off) { 1107 cp += off + 2 * sizeof(u_short); 1108 totlen -= 2 * sizeof(u_short); 1109 } 1110 MGETHDR(m, M_DONTWAIT, MT_DATA); 1111 if (m == 0) 1112 return (0); 1113 m->m_pkthdr.rcvif = ifp; 1114 m->m_pkthdr.len = totlen; 1115 m->m_len = MHLEN; 1116 1117 while (totlen > 0) { 1118 if (top) { 1119 MGET(m, M_DONTWAIT, MT_DATA); 1120 if (m == 0) { 1121 m_freem(top); 1122 return (0); 1123 } 1124 m->m_len = MLEN; 1125 } 1126 len = min(totlen, epkt - cp); 1127 if (len >= MINCLSIZE) { 1128 MCLGET(m, M_DONTWAIT); 1129 if (m->m_flags & M_EXT) 1130 m->m_len = len = min(len, MCLBYTES); 1131 else 1132 len = m->m_len; 1133 } else { 1134 /* 1135 * Place initial small packet/header at end of mbuf. 1136 */ 1137 if (len < m->m_len) { 1138 if (top == 0 && len + max_linkhdr <= m->m_len) 1139 m->m_data += max_linkhdr; 1140 m->m_len = len; 1141 } else 1142 len = m->m_len; 1143 } 1144 if (copy) 1145 copy(cp, mtod(m, caddr_t), (unsigned)len); 1146 else 1147 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1148 cp += len; 1149 *mp = m; 1150 mp = &m->m_next; 1151 totlen -= len; 1152 if (cp == epkt) 1153 cp = buf; 1154 } 1155 return (top); 1156} 1157 1158/* 1159 * Copy data from a buffer back into the indicated mbuf chain, 1160 * starting "off" bytes from the beginning, extending the mbuf 1161 * chain if necessary. 1162 */ 1163void 1164m_copyback(m0, off, len, cp) 1165 struct mbuf *m0; 1166 register int off; 1167 register int len; 1168 caddr_t cp; 1169{ 1170 register int mlen; 1171 register struct mbuf *m = m0, *n; 1172 int totlen = 0; 1173 1174 if (m0 == 0) 1175 return; 1176 while (off > (mlen = m->m_len)) { 1177 off -= mlen; 1178 totlen += mlen; 1179 if (m->m_next == 0) { 1180 n = m_getclr(M_DONTWAIT, m->m_type); 1181 if (n == 0) 1182 goto out; 1183 n->m_len = min(MLEN, len + off); 1184 m->m_next = n; 1185 } 1186 m = m->m_next; 1187 } 1188 while (len > 0) { 1189 mlen = min (m->m_len - off, len); 1190 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1191 cp += mlen; 1192 len -= mlen; 1193 mlen += off; 1194 off = 0; 1195 totlen += mlen; 1196 if (len == 0) 1197 break; 1198 if (m->m_next == 0) { 1199 n = m_get(M_DONTWAIT, m->m_type); 1200 if (n == 0) 1201 break; 1202 n->m_len = min(MLEN, len); 1203 m->m_next = n; 1204 } 1205 m = m->m_next; 1206 } 1207out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1208 m->m_pkthdr.len = totlen; 1209} 1210 1211void 1212m_print(const struct mbuf *m) 1213{ 1214 int len; 1215 const struct mbuf *m2; 1216 1217 len = m->m_pkthdr.len; 1218 m2 = m; 1219 while (len) { 1220 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1221 len -= m2->m_len; 1222 m2 = m2->m_next; 1223 } 1224 return; 1225} 1226