uipc_mbuf.c revision 66475
1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 66475 2000-09-30 06:30:39Z bmilekic $ 35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/malloc.h> 41#include <sys/mbuf.h> 42#include <sys/kernel.h> 43#include <sys/sysctl.h> 44#include <sys/domain.h> 45#include <sys/protosw.h> 46#include <vm/vm.h> 47#include <vm/vm_kern.h> 48#include <vm/vm_extern.h> 49#include <machine/mutex.h> 50 51static void mbinit __P((void *)); 52SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 53 54struct mbuf *mbutl; 55struct mbstat mbstat; 56u_long mbtypes[MT_NTYPES]; 57int max_linkhdr; 58int max_protohdr; 59int max_hdr; 60int max_datalen; 61int nmbclusters; 62int nmbufs; 63u_long m_mballoc_wid = 0; 64u_long m_clalloc_wid = 0; 65 66/* 67 * freelist header structures... 68 * mbffree_lst, mclfree_lst, mcntfree_lst 69 */ 70struct mbffree_lst mmbfree; 71struct mclfree_lst mclfree; 72struct mcntfree_lst mcntfree; 73 74/* 75 * sysctl(8) exported objects 76 */ 77SYSCTL_DECL(_kern_ipc); 78SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 79 &max_linkhdr, 0, ""); 80SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 81 &max_protohdr, 0, ""); 82SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 83SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 84 &max_datalen, 0, ""); 85SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 86 &mbuf_wait, 0, ""); 87SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RD, &mbstat, mbstat, ""); 88SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 89 sizeof(mbtypes), "LU", ""); 90SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 91 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 92SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 93 "Maximum number of mbufs available"); 94#ifndef NMBCLUSTERS 95#define NMBCLUSTERS (512 + MAXUSERS * 16) 96#endif 97TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 98TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); 99 100static void m_reclaim __P((void)); 101 102/* Initial allocation numbers */ 103#define NCL_INIT 2 104#define NMB_INIT 16 105#define REF_INIT NMBCLUSTERS 106 107/* 108 * Full mbuf subsystem initialization done here. 109 * 110 * XXX: If ever we have system specific map setups to do, then move them to 111 * machdep.c - for now, there is no reason for this stuff to go there. 112 */ 113static void 114mbinit(dummy) 115 void *dummy; 116{ 117 vm_offset_t maxaddr, mb_map_size; 118 119 /* 120 * Setup the mb_map, allocate requested VM space. 121 */ 122 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + EXT_COUNTERS 123 * sizeof(union mext_refcnt); 124 mb_map_size = roundup2(mb_map_size, PAGE_SIZE); 125 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 126 mb_map_size); 127 /* XXX: mb_map->system_map = 1; */ 128 129 /* 130 * Initialize the free list headers, and setup locks for lists. 131 */ 132 mmbfree.m_head = NULL; 133 mclfree.m_head = NULL; 134 mcntfree.m_head = NULL; 135 mtx_init(&mmbfree.m_mtx, "mbuf free list lock", MTX_DEF); 136 mtx_init(&mclfree.m_mtx, "mcluster free list lock", MTX_DEF); 137 mtx_init(&mcntfree.m_mtx, "m_ext counter free list lock", MTX_DEF); 138 139 /* 140 * Initialize mbuf subsystem (sysctl exported) statistics structure. 141 */ 142 mbstat.m_msize = MSIZE; 143 mbstat.m_mclbytes = MCLBYTES; 144 mbstat.m_minclsize = MINCLSIZE; 145 mbstat.m_mlen = MLEN; 146 mbstat.m_mhlen = MHLEN; 147 148 /* 149 * Perform some initial allocations. 150 */ 151 mtx_enter(&mcntfree.m_mtx, MTX_DEF); 152 if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0) 153 goto bad; 154 mtx_exit(&mcntfree.m_mtx, MTX_DEF); 155 156 mtx_enter(&mmbfree.m_mtx, MTX_DEF); 157 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 158 goto bad; 159 mtx_exit(&mmbfree.m_mtx, MTX_DEF); 160 161 mtx_enter(&mclfree.m_mtx, MTX_DEF); 162 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 163 goto bad; 164 mtx_exit(&mclfree.m_mtx, MTX_DEF); 165 166 return; 167bad: 168 panic("mbinit: failed to initialize mbuf subsystem!"); 169} 170 171/* 172 * Allocate at least nmb reference count structs and place them 173 * on the ref cnt free list. 174 * 175 * Must be called with the mcntfree lock held. 176 */ 177int 178m_alloc_ref(nmb, how) 179 u_int nmb; 180 int how; 181{ 182 caddr_t p; 183 u_int nbytes; 184 int i; 185 186 /* 187 * We don't cap the amount of memory that can be used 188 * by the reference counters, like we do for mbufs and 189 * mbuf clusters. In fact, we're absolutely sure that we 190 * won't ever be going over our allocated space. We keep enough 191 * space in mb_map to accomodate maximum values of allocatable 192 * external buffers including, but not limited to, clusters. 193 * (That's also why we won't have to have wait routines for 194 * counters). 195 * 196 * If we're in here, we're absolutely certain to be returning 197 * succesfully, as long as there is physical memory to accomodate 198 * us. And if there isn't, but we're willing to wait, then 199 * kmem_malloc() will do the only waiting needed. 200 */ 201 202 nbytes = round_page(nmb * sizeof(union mext_refcnt)); 203 mtx_exit(&mcntfree.m_mtx, MTX_DEF); 204 mtx_enter(&Giant, MTX_DEF); 205 if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_WAIT ? M_WAIT : 206 M_NOWAIT)) == NULL) { 207 mtx_exit(&Giant, MTX_DEF); 208 mtx_enter(&mcntfree.m_mtx, MTX_DEF); /* XXX: We must be holding 209 it going out. */ 210 return (0); 211 } 212 mtx_exit(&Giant, MTX_DEF); 213 nmb = nbytes / sizeof(union mext_refcnt); 214 215 /* 216 * We don't let go of the mutex in order to avoid a race. 217 * It is up to the caller to let go of the mutex. 218 */ 219 mtx_enter(&mcntfree.m_mtx, MTX_DEF); 220 for (i = 0; i < nmb; i++) { 221 ((union mext_refcnt *)p)->next_ref = mcntfree.m_head; 222 mcntfree.m_head = (union mext_refcnt *)p; 223 p += sizeof(union mext_refcnt); 224 mbstat.m_refree++; 225 } 226 mbstat.m_refcnt += nmb; 227 228 return (1); 229} 230 231/* 232 * Allocate at least nmb mbufs and place on mbuf free list. 233 * 234 * Must be called with the mmbfree lock held. 235 */ 236int 237m_mballoc(nmb, how) 238 register int nmb; 239 int how; 240{ 241 register caddr_t p; 242 register int i; 243 int nbytes; 244 245 /* 246 * If we've hit the mbuf limit, stop allocating from mb_map. 247 * Also, once we run out of map space, it will be impossible to 248 * get any more (nothing is ever freed back to the map). 249 */ 250 if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) { 251 /* 252 * Needs to be atomic as we may be incrementing it 253 * while holding another mutex, like mclfree. In other 254 * words, m_drops is not reserved solely for mbufs, 255 * but is also available for clusters. 256 */ 257 atomic_add_long(&mbstat.m_drops, 1); 258 return (0); 259 } 260 261 nbytes = round_page(nmb * MSIZE); 262 263 /* XXX: The letting go of the mmbfree lock here may eventually 264 be moved to only be done for M_WAIT calls to kmem_malloc() */ 265 mtx_exit(&mmbfree.m_mtx, MTX_DEF); 266 mtx_enter(&Giant, MTX_DEF); 267 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 268 if (p == 0 && how == M_WAIT) { 269 atomic_add_long(&mbstat.m_wait, 1); 270 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 271 } 272 mtx_exit(&Giant, MTX_DEF); 273 mtx_enter(&mmbfree.m_mtx, MTX_DEF); 274 275 /* 276 * Either the map is now full, or `how' is M_DONTWAIT and there 277 * are no pages left. 278 */ 279 if (p == NULL) 280 return (0); 281 282 nmb = nbytes / MSIZE; 283 284 /* 285 * We don't let go of the mutex in order to avoid a race. 286 * It is up to the caller to let go of the mutex when done 287 * with grabbing the mbuf from the free list. 288 */ 289 for (i = 0; i < nmb; i++) { 290 ((struct mbuf *)p)->m_next = mmbfree.m_head; 291 mmbfree.m_head = (struct mbuf *)p; 292 p += MSIZE; 293 } 294 mbstat.m_mbufs += nmb; 295 mbtypes[MT_FREE] += nmb; 296 return (1); 297} 298 299/* 300 * Once the mb_map has been exhausted and if the call to the allocation macros 301 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely 302 * solely on reclaimed mbufs. 303 * 304 * Here we request for the protocols to free up some resources and, if we 305 * still cannot get anything, then we wait for an mbuf to be freed for a 306 * designated (mbuf_wait) time. 307 * 308 * Must be called with the mmbfree mutex held, and we will probably end 309 * up recursing into that lock from some of the drain routines, but 310 * this should be okay, as long as we don't block there, or attempt 311 * to allocate from them (theoretically impossible). 312 */ 313struct mbuf * 314m_mballoc_wait(void) 315{ 316 struct mbuf *p = NULL; 317 318 /* 319 * See if we can drain some resources out of the protocols. 320 */ 321 m_reclaim(); 322 _MGET(p, M_DONTWAIT); 323 324 if (p == NULL) { 325 m_mballoc_wid++; 326 if (msleep(&m_mballoc_wid, &mmbfree.m_mtx, PVM, "mballc", 327 mbuf_wait) == EWOULDBLOCK) 328 m_mballoc_wid--; 329 330 /* 331 * Try again (one last time). 332 * 333 * We retry to fetch _even_ if the sleep timed out. This 334 * is left this way, purposely, in the [unlikely] case 335 * that an mbuf was freed but the sleep was not awoken 336 * in time. 337 * 338 * If the sleep didn't time out (i.e. we got woken up) then 339 * we have the lock so we just grab an mbuf, hopefully. 340 */ 341 _MGET(p, M_DONTWAIT); 342 } 343 344 /* If we waited and got something... */ 345 if (p != NULL) { 346 atomic_add_long(&mbstat.m_wait, 1); 347 if (mmbfree.m_head != NULL) 348 MBWAKEUP(m_mballoc_wid); 349 } else 350 atomic_add_long(&mbstat.m_drops, 1); 351 352 return (p); 353} 354 355/* 356 * Allocate some number of mbuf clusters 357 * and place on cluster free list. 358 * 359 * Must be called with the mclfree lock held. 360 */ 361int 362m_clalloc(ncl, how) 363 register int ncl; 364 int how; 365{ 366 register caddr_t p; 367 register int i; 368 int npg; 369 370 /* 371 * If the map is now full (nothing will ever be freed to it). 372 * If we've hit the mcluster number limit, stop allocating from 373 * mb_map. 374 */ 375 if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) { 376 atomic_add_long(&mbstat.m_drops, 1); 377 return (0); 378 } 379 380 npg = ncl; 381 mtx_exit(&mclfree.m_mtx, MTX_DEF); 382 mtx_enter(&Giant, MTX_DEF); 383 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 384 how != M_WAIT ? M_NOWAIT : M_WAITOK); 385 mtx_exit(&Giant, MTX_DEF); 386 ncl = ncl * PAGE_SIZE / MCLBYTES; 387 mtx_enter(&mclfree.m_mtx, MTX_DEF); 388 389 /* 390 * Either the map is now full, or `how' is M_DONTWAIT and there 391 * are no pages left. 392 */ 393 if (p == NULL) { 394 atomic_add_long(&mbstat.m_drops, 1); 395 return (0); 396 } 397 398 /* 399 * We don't let go of the mutex in order to avoid a race. 400 */ 401 for (i = 0; i < ncl; i++) { 402 ((union mcluster *)p)->mcl_next = mclfree.m_head; 403 mclfree.m_head = (union mcluster *)p; 404 p += MCLBYTES; 405 mbstat.m_clfree++; 406 } 407 mbstat.m_clusters += ncl; 408 return (1); 409} 410 411/* 412 * Once the mb_map submap has been exhausted and the allocation is called with 413 * M_WAIT, we rely on the mclfree list. If nothing is free, we will 414 * sleep for a designated amount of time (mbuf_wait) or until we're woken up 415 * due to sudden mcluster availability. 416 * 417 * Must be called with the mclfree lock held. 418 */ 419caddr_t 420m_clalloc_wait(void) 421{ 422 caddr_t p = NULL; 423 424 m_clalloc_wid++; 425 if (msleep(&m_clalloc_wid, &mclfree.m_mtx, PVM, "mclalc", mbuf_wait) 426 == EWOULDBLOCK) 427 m_clalloc_wid--; 428 429 /* 430 * Now that we (think) that we've got something, try again. 431 */ 432 _MCLALLOC(p, M_DONTWAIT); 433 434 /* If we waited and got something ... */ 435 if (p != NULL) { 436 atomic_add_long(&mbstat.m_wait, 1); 437 if (mclfree.m_head != NULL) 438 MBWAKEUP(m_clalloc_wid); 439 } else 440 atomic_add_long(&mbstat.m_drops, 1); 441 442 return (p); 443} 444 445/* 446 * m_reclaim: drain protocols in hopes to free up some resources... 447 * 448 * Should be called with mmbfree.m_mtx mutex held. We will most likely 449 * recursively grab it from within some drain routines, but that's okay, 450 * as the mutex will never be completely released until we let go of it 451 * after our m_reclaim() is over. 452 * 453 * Note: Drain routines are only allowed to free mbufs (and mclusters, 454 * as a consequence, if need be). They are not allowed to allocate 455 * new ones (that would defeat the purpose, anyway). 456 */ 457static void 458m_reclaim() 459{ 460 register struct domain *dp; 461 register struct protosw *pr; 462 463 for (dp = domains; dp; dp = dp->dom_next) 464 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 465 if (pr->pr_drain) 466 (*pr->pr_drain)(); 467 mbstat.m_drain++; 468} 469 470/* 471 * Space allocation routines. 472 * These are also available as macros 473 * for critical paths. 474 */ 475struct mbuf * 476m_get(how, type) 477 int how, type; 478{ 479 register struct mbuf *m; 480 481 MGET(m, how, type); 482 return (m); 483} 484 485struct mbuf * 486m_gethdr(how, type) 487 int how, type; 488{ 489 register struct mbuf *m; 490 491 MGETHDR(m, how, type); 492 return (m); 493} 494 495struct mbuf * 496m_getclr(how, type) 497 int how, type; 498{ 499 register struct mbuf *m; 500 501 MGET(m, how, type); 502 if (m == 0) 503 return (0); 504 bzero(mtod(m, caddr_t), MLEN); 505 return (m); 506} 507 508struct mbuf * 509m_free(m) 510 struct mbuf *m; 511{ 512 register struct mbuf *n; 513 514 MFREE(m, n); 515 return (n); 516} 517 518void 519m_freem(m) 520 register struct mbuf *m; 521{ 522 register struct mbuf *n; 523 524 if (m == NULL) 525 return; 526 do { 527 /* 528 * we do need to check non-first mbuf, since some of existing 529 * code does not call M_PREPEND properly. 530 * (example: call to bpf_mtap from drivers) 531 */ 532 if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 533 m_freem(m->m_pkthdr.aux); 534 m->m_pkthdr.aux = NULL; 535 } 536 MFREE(m, n); 537 m = n; 538 } while (m); 539} 540 541/* 542 * Mbuffer utility routines. 543 */ 544 545/* 546 * Lesser-used path for M_PREPEND: 547 * allocate new mbuf to prepend to chain, 548 * copy junk along. 549 */ 550struct mbuf * 551m_prepend(m, len, how) 552 register struct mbuf *m; 553 int len, how; 554{ 555 struct mbuf *mn; 556 557 MGET(mn, how, m->m_type); 558 if (mn == (struct mbuf *)NULL) { 559 m_freem(m); 560 return ((struct mbuf *)NULL); 561 } 562 if (m->m_flags & M_PKTHDR) { 563 M_COPY_PKTHDR(mn, m); 564 m->m_flags &= ~M_PKTHDR; 565 } 566 mn->m_next = m; 567 m = mn; 568 if (len < MHLEN) 569 MH_ALIGN(m, len); 570 m->m_len = len; 571 return (m); 572} 573 574/* 575 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 576 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 577 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 578 * Note that the copy is read-only, because clusters are not copied, 579 * only their reference counts are incremented. 580 */ 581#define MCFail (mbstat.m_mcfail) 582 583struct mbuf * 584m_copym(m, off0, len, wait) 585 register struct mbuf *m; 586 int off0, wait; 587 register int len; 588{ 589 register struct mbuf *n, **np; 590 register int off = off0; 591 struct mbuf *top; 592 int copyhdr = 0; 593 594 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 595 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 596 if (off == 0 && m->m_flags & M_PKTHDR) 597 copyhdr = 1; 598 while (off > 0) { 599 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 600 if (off < m->m_len) 601 break; 602 off -= m->m_len; 603 m = m->m_next; 604 } 605 np = ⊤ 606 top = 0; 607 while (len > 0) { 608 if (m == 0) { 609 KASSERT(len == M_COPYALL, 610 ("m_copym, length > size of mbuf chain")); 611 break; 612 } 613 MGET(n, wait, m->m_type); 614 *np = n; 615 if (n == 0) 616 goto nospace; 617 if (copyhdr) { 618 M_COPY_PKTHDR(n, m); 619 if (len == M_COPYALL) 620 n->m_pkthdr.len -= off0; 621 else 622 n->m_pkthdr.len = len; 623 copyhdr = 0; 624 } 625 n->m_len = min(len, m->m_len - off); 626 if (m->m_flags & M_EXT) { 627 n->m_data = m->m_data + off; 628 n->m_ext = m->m_ext; 629 n->m_flags |= M_EXT; 630 MEXT_ADD_REF(m); 631 } else 632 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 633 (unsigned)n->m_len); 634 if (len != M_COPYALL) 635 len -= n->m_len; 636 off = 0; 637 m = m->m_next; 638 np = &n->m_next; 639 } 640 if (top == 0) 641 atomic_add_long(&MCFail, 1); 642 return (top); 643nospace: 644 m_freem(top); 645 atomic_add_long(&MCFail, 1); 646 return (0); 647} 648 649/* 650 * Copy an entire packet, including header (which must be present). 651 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 652 * Note that the copy is read-only, because clusters are not copied, 653 * only their reference counts are incremented. 654 */ 655struct mbuf * 656m_copypacket(m, how) 657 struct mbuf *m; 658 int how; 659{ 660 struct mbuf *top, *n, *o; 661 662 MGET(n, how, m->m_type); 663 top = n; 664 if (!n) 665 goto nospace; 666 667 M_COPY_PKTHDR(n, m); 668 n->m_len = m->m_len; 669 if (m->m_flags & M_EXT) { 670 n->m_data = m->m_data; 671 n->m_ext = m->m_ext; 672 n->m_flags |= M_EXT; 673 MEXT_ADD_REF(m); 674 } else { 675 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 676 } 677 678 m = m->m_next; 679 while (m) { 680 MGET(o, how, m->m_type); 681 if (!o) 682 goto nospace; 683 684 n->m_next = o; 685 n = n->m_next; 686 687 n->m_len = m->m_len; 688 if (m->m_flags & M_EXT) { 689 n->m_data = m->m_data; 690 n->m_ext = m->m_ext; 691 n->m_flags |= M_EXT; 692 MEXT_ADD_REF(m); 693 } else { 694 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 695 } 696 697 m = m->m_next; 698 } 699 return top; 700nospace: 701 m_freem(top); 702 atomic_add_long(&MCFail, 1); 703 return 0; 704} 705 706/* 707 * Copy data from an mbuf chain starting "off" bytes from the beginning, 708 * continuing for "len" bytes, into the indicated buffer. 709 */ 710void 711m_copydata(m, off, len, cp) 712 register struct mbuf *m; 713 register int off; 714 register int len; 715 caddr_t cp; 716{ 717 register unsigned count; 718 719 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 720 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 721 while (off > 0) { 722 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 723 if (off < m->m_len) 724 break; 725 off -= m->m_len; 726 m = m->m_next; 727 } 728 while (len > 0) { 729 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 730 count = min(m->m_len - off, len); 731 bcopy(mtod(m, caddr_t) + off, cp, count); 732 len -= count; 733 cp += count; 734 off = 0; 735 m = m->m_next; 736 } 737} 738 739/* 740 * Copy a packet header mbuf chain into a completely new chain, including 741 * copying any mbuf clusters. Use this instead of m_copypacket() when 742 * you need a writable copy of an mbuf chain. 743 */ 744struct mbuf * 745m_dup(m, how) 746 struct mbuf *m; 747 int how; 748{ 749 struct mbuf **p, *top = NULL; 750 int remain, moff, nsize; 751 752 /* Sanity check */ 753 if (m == NULL) 754 return (0); 755 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 756 757 /* While there's more data, get a new mbuf, tack it on, and fill it */ 758 remain = m->m_pkthdr.len; 759 moff = 0; 760 p = ⊤ 761 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 762 struct mbuf *n; 763 764 /* Get the next new mbuf */ 765 MGET(n, how, m->m_type); 766 if (n == NULL) 767 goto nospace; 768 if (top == NULL) { /* first one, must be PKTHDR */ 769 M_COPY_PKTHDR(n, m); 770 nsize = MHLEN; 771 } else /* not the first one */ 772 nsize = MLEN; 773 if (remain >= MINCLSIZE) { 774 MCLGET(n, how); 775 if ((n->m_flags & M_EXT) == 0) { 776 (void)m_free(n); 777 goto nospace; 778 } 779 nsize = MCLBYTES; 780 } 781 n->m_len = 0; 782 783 /* Link it into the new chain */ 784 *p = n; 785 p = &n->m_next; 786 787 /* Copy data from original mbuf(s) into new mbuf */ 788 while (n->m_len < nsize && m != NULL) { 789 int chunk = min(nsize - n->m_len, m->m_len - moff); 790 791 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 792 moff += chunk; 793 n->m_len += chunk; 794 remain -= chunk; 795 if (moff == m->m_len) { 796 m = m->m_next; 797 moff = 0; 798 } 799 } 800 801 /* Check correct total mbuf length */ 802 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 803 ("%s: bogus m_pkthdr.len", __FUNCTION__)); 804 } 805 return (top); 806 807nospace: 808 m_freem(top); 809 atomic_add_long(&MCFail, 1); 810 return (0); 811} 812 813/* 814 * Concatenate mbuf chain n to m. 815 * Both chains must be of the same type (e.g. MT_DATA). 816 * Any m_pkthdr is not updated. 817 */ 818void 819m_cat(m, n) 820 register struct mbuf *m, *n; 821{ 822 while (m->m_next) 823 m = m->m_next; 824 while (n) { 825 if (m->m_flags & M_EXT || 826 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 827 /* just join the two chains */ 828 m->m_next = n; 829 return; 830 } 831 /* splat the data from one into the other */ 832 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 833 (u_int)n->m_len); 834 m->m_len += n->m_len; 835 n = m_free(n); 836 } 837} 838 839void 840m_adj(mp, req_len) 841 struct mbuf *mp; 842 int req_len; 843{ 844 register int len = req_len; 845 register struct mbuf *m; 846 register int count; 847 848 if ((m = mp) == NULL) 849 return; 850 if (len >= 0) { 851 /* 852 * Trim from head. 853 */ 854 while (m != NULL && len > 0) { 855 if (m->m_len <= len) { 856 len -= m->m_len; 857 m->m_len = 0; 858 m = m->m_next; 859 } else { 860 m->m_len -= len; 861 m->m_data += len; 862 len = 0; 863 } 864 } 865 m = mp; 866 if (mp->m_flags & M_PKTHDR) 867 m->m_pkthdr.len -= (req_len - len); 868 } else { 869 /* 870 * Trim from tail. Scan the mbuf chain, 871 * calculating its length and finding the last mbuf. 872 * If the adjustment only affects this mbuf, then just 873 * adjust and return. Otherwise, rescan and truncate 874 * after the remaining size. 875 */ 876 len = -len; 877 count = 0; 878 for (;;) { 879 count += m->m_len; 880 if (m->m_next == (struct mbuf *)0) 881 break; 882 m = m->m_next; 883 } 884 if (m->m_len >= len) { 885 m->m_len -= len; 886 if (mp->m_flags & M_PKTHDR) 887 mp->m_pkthdr.len -= len; 888 return; 889 } 890 count -= len; 891 if (count < 0) 892 count = 0; 893 /* 894 * Correct length for chain is "count". 895 * Find the mbuf with last data, adjust its length, 896 * and toss data from remaining mbufs on chain. 897 */ 898 m = mp; 899 if (m->m_flags & M_PKTHDR) 900 m->m_pkthdr.len = count; 901 for (; m; m = m->m_next) { 902 if (m->m_len >= count) { 903 m->m_len = count; 904 break; 905 } 906 count -= m->m_len; 907 } 908 while (m->m_next) 909 (m = m->m_next) ->m_len = 0; 910 } 911} 912 913/* 914 * Rearange an mbuf chain so that len bytes are contiguous 915 * and in the data area of an mbuf (so that mtod and dtom 916 * will work for a structure of size len). Returns the resulting 917 * mbuf chain on success, frees it and returns null on failure. 918 * If there is room, it will add up to max_protohdr-len extra bytes to the 919 * contiguous region in an attempt to avoid being called next time. 920 */ 921#define MPFail (mbstat.m_mpfail) 922 923struct mbuf * 924m_pullup(n, len) 925 register struct mbuf *n; 926 int len; 927{ 928 register struct mbuf *m; 929 register int count; 930 int space; 931 932 /* 933 * If first mbuf has no cluster, and has room for len bytes 934 * without shifting current data, pullup into it, 935 * otherwise allocate a new mbuf to prepend to the chain. 936 */ 937 if ((n->m_flags & M_EXT) == 0 && 938 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 939 if (n->m_len >= len) 940 return (n); 941 m = n; 942 n = n->m_next; 943 len -= m->m_len; 944 } else { 945 if (len > MHLEN) 946 goto bad; 947 MGET(m, M_DONTWAIT, n->m_type); 948 if (m == 0) 949 goto bad; 950 m->m_len = 0; 951 if (n->m_flags & M_PKTHDR) { 952 M_COPY_PKTHDR(m, n); 953 n->m_flags &= ~M_PKTHDR; 954 } 955 } 956 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 957 do { 958 count = min(min(max(len, max_protohdr), space), n->m_len); 959 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 960 (unsigned)count); 961 len -= count; 962 m->m_len += count; 963 n->m_len -= count; 964 space -= count; 965 if (n->m_len) 966 n->m_data += count; 967 else 968 n = m_free(n); 969 } while (len > 0 && n); 970 if (len > 0) { 971 (void) m_free(m); 972 goto bad; 973 } 974 m->m_next = n; 975 return (m); 976bad: 977 m_freem(n); 978 atomic_add_long(&MPFail, 1); 979 return (0); 980} 981 982/* 983 * Partition an mbuf chain in two pieces, returning the tail -- 984 * all but the first len0 bytes. In case of failure, it returns NULL and 985 * attempts to restore the chain to its original state. 986 */ 987struct mbuf * 988m_split(m0, len0, wait) 989 register struct mbuf *m0; 990 int len0, wait; 991{ 992 register struct mbuf *m, *n; 993 unsigned len = len0, remain; 994 995 for (m = m0; m && len > m->m_len; m = m->m_next) 996 len -= m->m_len; 997 if (m == 0) 998 return (0); 999 remain = m->m_len - len; 1000 if (m0->m_flags & M_PKTHDR) { 1001 MGETHDR(n, wait, m0->m_type); 1002 if (n == 0) 1003 return (0); 1004 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1005 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1006 m0->m_pkthdr.len = len0; 1007 if (m->m_flags & M_EXT) 1008 goto extpacket; 1009 if (remain > MHLEN) { 1010 /* m can't be the lead packet */ 1011 MH_ALIGN(n, 0); 1012 n->m_next = m_split(m, len, wait); 1013 if (n->m_next == 0) { 1014 (void) m_free(n); 1015 return (0); 1016 } else 1017 return (n); 1018 } else 1019 MH_ALIGN(n, remain); 1020 } else if (remain == 0) { 1021 n = m->m_next; 1022 m->m_next = 0; 1023 return (n); 1024 } else { 1025 MGET(n, wait, m->m_type); 1026 if (n == 0) 1027 return (0); 1028 M_ALIGN(n, remain); 1029 } 1030extpacket: 1031 if (m->m_flags & M_EXT) { 1032 n->m_flags |= M_EXT; 1033 n->m_ext = m->m_ext; 1034 MEXT_ADD_REF(m); 1035 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1036 n->m_data = m->m_data + len; 1037 } else { 1038 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1039 } 1040 n->m_len = remain; 1041 m->m_len = len; 1042 n->m_next = m->m_next; 1043 m->m_next = 0; 1044 return (n); 1045} 1046/* 1047 * Routine to copy from device local memory into mbufs. 1048 */ 1049struct mbuf * 1050m_devget(buf, totlen, off0, ifp, copy) 1051 char *buf; 1052 int totlen, off0; 1053 struct ifnet *ifp; 1054 void (*copy) __P((char *from, caddr_t to, u_int len)); 1055{ 1056 register struct mbuf *m; 1057 struct mbuf *top = 0, **mp = ⊤ 1058 register int off = off0, len; 1059 register char *cp; 1060 char *epkt; 1061 1062 cp = buf; 1063 epkt = cp + totlen; 1064 if (off) { 1065 cp += off + 2 * sizeof(u_short); 1066 totlen -= 2 * sizeof(u_short); 1067 } 1068 MGETHDR(m, M_DONTWAIT, MT_DATA); 1069 if (m == 0) 1070 return (0); 1071 m->m_pkthdr.rcvif = ifp; 1072 m->m_pkthdr.len = totlen; 1073 m->m_len = MHLEN; 1074 1075 while (totlen > 0) { 1076 if (top) { 1077 MGET(m, M_DONTWAIT, MT_DATA); 1078 if (m == 0) { 1079 m_freem(top); 1080 return (0); 1081 } 1082 m->m_len = MLEN; 1083 } 1084 len = min(totlen, epkt - cp); 1085 if (len >= MINCLSIZE) { 1086 MCLGET(m, M_DONTWAIT); 1087 if (m->m_flags & M_EXT) 1088 m->m_len = len = min(len, MCLBYTES); 1089 else 1090 len = m->m_len; 1091 } else { 1092 /* 1093 * Place initial small packet/header at end of mbuf. 1094 */ 1095 if (len < m->m_len) { 1096 if (top == 0 && len + max_linkhdr <= m->m_len) 1097 m->m_data += max_linkhdr; 1098 m->m_len = len; 1099 } else 1100 len = m->m_len; 1101 } 1102 if (copy) 1103 copy(cp, mtod(m, caddr_t), (unsigned)len); 1104 else 1105 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1106 cp += len; 1107 *mp = m; 1108 mp = &m->m_next; 1109 totlen -= len; 1110 if (cp == epkt) 1111 cp = buf; 1112 } 1113 return (top); 1114} 1115 1116/* 1117 * Copy data from a buffer back into the indicated mbuf chain, 1118 * starting "off" bytes from the beginning, extending the mbuf 1119 * chain if necessary. 1120 */ 1121void 1122m_copyback(m0, off, len, cp) 1123 struct mbuf *m0; 1124 register int off; 1125 register int len; 1126 caddr_t cp; 1127{ 1128 register int mlen; 1129 register struct mbuf *m = m0, *n; 1130 int totlen = 0; 1131 1132 if (m0 == 0) 1133 return; 1134 while (off > (mlen = m->m_len)) { 1135 off -= mlen; 1136 totlen += mlen; 1137 if (m->m_next == 0) { 1138 n = m_getclr(M_DONTWAIT, m->m_type); 1139 if (n == 0) 1140 goto out; 1141 n->m_len = min(MLEN, len + off); 1142 m->m_next = n; 1143 } 1144 m = m->m_next; 1145 } 1146 while (len > 0) { 1147 mlen = min (m->m_len - off, len); 1148 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1149 cp += mlen; 1150 len -= mlen; 1151 mlen += off; 1152 off = 0; 1153 totlen += mlen; 1154 if (len == 0) 1155 break; 1156 if (m->m_next == 0) { 1157 n = m_get(M_DONTWAIT, m->m_type); 1158 if (n == 0) 1159 break; 1160 n->m_len = min(MLEN, len); 1161 m->m_next = n; 1162 } 1163 m = m->m_next; 1164 } 1165out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1166 m->m_pkthdr.len = totlen; 1167} 1168 1169void 1170m_print(const struct mbuf *m) 1171{ 1172 int len; 1173 const struct mbuf *m2; 1174 1175 len = m->m_pkthdr.len; 1176 m2 = m; 1177 while (len) { 1178 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1179 len -= m2->m_len; 1180 m2 = m2->m_next; 1181 } 1182 return; 1183} 1184