uipc_mbuf.c revision 72356
1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 72356 2001-02-11 05:02:06Z bmilekic $ 35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/malloc.h> 41#include <sys/mbuf.h> 42#include <sys/mutex.h> 43#include <sys/kernel.h> 44#include <sys/sysctl.h> 45#include <sys/domain.h> 46#include <sys/protosw.h> 47#include <vm/vm.h> 48#include <vm/vm_kern.h> 49#include <vm/vm_extern.h> 50 51static void mbinit(void *); 52SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 53 54struct mbuf *mbutl; 55struct mbstat mbstat; 56u_long mbtypes[MT_NTYPES]; 57int max_linkhdr; 58int max_protohdr; 59int max_hdr; 60int max_datalen; 61int nmbclusters; 62int nmbufs; 63int nmbcnt; 64u_long m_mballoc_wid = 0; 65u_long m_clalloc_wid = 0; 66 67/* 68 * freelist header structures... 69 * mbffree_lst, mclfree_lst, mcntfree_lst 70 */ 71struct mbffree_lst mmbfree; 72struct mclfree_lst mclfree; 73struct mcntfree_lst mcntfree; 74 75/* 76 * sysctl(8) exported objects 77 */ 78SYSCTL_DECL(_kern_ipc); 79SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 80 &max_linkhdr, 0, ""); 81SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 82 &max_protohdr, 0, ""); 83SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 84SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 85 &max_datalen, 0, ""); 86SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 87 &mbuf_wait, 0, ""); 88SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RD, &mbstat, mbstat, ""); 89SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 90 sizeof(mbtypes), "LU", ""); 91SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 92 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 93SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 94 "Maximum number of mbufs available"); 95SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0, 96 "Maximum number of ext_buf counters available"); 97#ifndef NMBCLUSTERS 98#define NMBCLUSTERS (512 + MAXUSERS * 16) 99#endif 100TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 101TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); 102TUNABLE_INT_DECL("kern.ipc.nmbcnt", EXT_COUNTERS, nmbcnt); 103 104static void m_reclaim(void); 105 106/* Initial allocation numbers */ 107#define NCL_INIT 2 108#define NMB_INIT 16 109#define REF_INIT NMBCLUSTERS 110 111/* 112 * Full mbuf subsystem initialization done here. 113 * 114 * XXX: If ever we have system specific map setups to do, then move them to 115 * machdep.c - for now, there is no reason for this stuff to go there. 116 */ 117static void 118mbinit(void *dummy) 119{ 120 vm_offset_t maxaddr, mb_map_size; 121 122 /* 123 * Setup the mb_map, allocate requested VM space. 124 */ 125 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt 126 * sizeof(union mext_refcnt); 127 mb_map_size = roundup2(mb_map_size, PAGE_SIZE); 128 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 129 mb_map_size); 130 /* XXX XXX XXX: mb_map->system_map = 1; */ 131 132 /* 133 * Initialize the free list headers, and setup locks for lists. 134 */ 135 mmbfree.m_head = NULL; 136 mclfree.m_head = NULL; 137 mcntfree.m_head = NULL; 138 mtx_init(&mmbfree.m_mtx, "mbuf free list lock", MTX_DEF); 139 mtx_init(&mclfree.m_mtx, "mcluster free list lock", MTX_DEF); 140 mtx_init(&mcntfree.m_mtx, "m_ext counter free list lock", MTX_DEF); 141 142 /* 143 * Initialize mbuf subsystem (sysctl exported) statistics structure. 144 */ 145 mbstat.m_msize = MSIZE; 146 mbstat.m_mclbytes = MCLBYTES; 147 mbstat.m_minclsize = MINCLSIZE; 148 mbstat.m_mlen = MLEN; 149 mbstat.m_mhlen = MHLEN; 150 151 /* 152 * Perform some initial allocations. 153 */ 154 mtx_lock(&mcntfree.m_mtx); 155 if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0) 156 goto bad; 157 mtx_unlock(&mcntfree.m_mtx); 158 159 mtx_lock(&mmbfree.m_mtx); 160 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 161 goto bad; 162 mtx_unlock(&mmbfree.m_mtx); 163 164 mtx_lock(&mclfree.m_mtx); 165 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 166 goto bad; 167 mtx_unlock(&mclfree.m_mtx); 168 169 return; 170bad: 171 panic("mbinit: failed to initialize mbuf subsystem!"); 172} 173 174/* 175 * Allocate at least nmb reference count structs and place them 176 * on the ref cnt free list. 177 * 178 * Must be called with the mcntfree lock held. 179 */ 180int 181m_alloc_ref(u_int nmb, int how) 182{ 183 caddr_t p; 184 u_int nbytes; 185 int i; 186 187 /* 188 * We don't cap the amount of memory that can be used 189 * by the reference counters, like we do for mbufs and 190 * mbuf clusters. In fact, we're absolutely sure that we 191 * won't ever be going over our allocated space. We keep enough 192 * space in mb_map to accomodate maximum values of allocatable 193 * external buffers including, but not limited to, clusters. 194 * (That's also why we won't have to have wait routines for 195 * counters). 196 * 197 * If we're in here, we're absolutely certain to be returning 198 * succesfully, as long as there is physical memory to accomodate 199 * us. And if there isn't, but we're willing to wait, then 200 * kmem_malloc() will do the only waiting needed. 201 */ 202 203 nbytes = round_page(nmb * sizeof(union mext_refcnt)); 204 mtx_unlock(&mcntfree.m_mtx); 205 if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ? 206 M_WAITOK : M_NOWAIT)) == NULL) { 207 mtx_lock(&mcntfree.m_mtx); 208 return (0); 209 } 210 nmb = nbytes / sizeof(union mext_refcnt); 211 212 /* 213 * We don't let go of the mutex in order to avoid a race. 214 * It is up to the caller to let go of the mutex. 215 */ 216 mtx_lock(&mcntfree.m_mtx); 217 for (i = 0; i < nmb; i++) { 218 ((union mext_refcnt *)p)->next_ref = mcntfree.m_head; 219 mcntfree.m_head = (union mext_refcnt *)p; 220 p += sizeof(union mext_refcnt); 221 mbstat.m_refree++; 222 } 223 mbstat.m_refcnt += nmb; 224 225 return (1); 226} 227 228/* 229 * Allocate at least nmb mbufs and place on mbuf free list. 230 * 231 * Must be called with the mmbfree lock held. 232 */ 233int 234m_mballoc(int nmb, int how) 235{ 236 caddr_t p; 237 int i; 238 int nbytes; 239 240 /* 241 * If we've hit the mbuf limit, stop allocating from mb_map. 242 * Also, once we run out of map space, it will be impossible to 243 * get any more (nothing is ever freed back to the map). 244 */ 245 if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) { 246 /* 247 * Needs to be atomic as we may be incrementing it 248 * while holding another mutex, like mclfree. In other 249 * words, m_drops is not reserved solely for mbufs, 250 * but is also available for clusters. 251 */ 252 atomic_add_long(&mbstat.m_drops, 1); 253 return (0); 254 } 255 256 nbytes = round_page(nmb * MSIZE); 257 258 mtx_unlock(&mmbfree.m_mtx); 259 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 260 if (p == NULL && how == M_TRYWAIT) { 261 atomic_add_long(&mbstat.m_wait, 1); 262 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 263 } 264 mtx_lock(&mmbfree.m_mtx); 265 266 /* 267 * Either the map is now full, or `how' is M_DONTWAIT and there 268 * are no pages left. 269 */ 270 if (p == NULL) 271 return (0); 272 273 nmb = nbytes / MSIZE; 274 275 /* 276 * We don't let go of the mutex in order to avoid a race. 277 * It is up to the caller to let go of the mutex when done 278 * with grabbing the mbuf from the free list. 279 */ 280 for (i = 0; i < nmb; i++) { 281 ((struct mbuf *)p)->m_next = mmbfree.m_head; 282 mmbfree.m_head = (struct mbuf *)p; 283 p += MSIZE; 284 } 285 mbstat.m_mbufs += nmb; 286 mbtypes[MT_FREE] += nmb; 287 return (1); 288} 289 290/* 291 * Once the mb_map has been exhausted and if the call to the allocation macros 292 * (or, in some cases, functions) is with M_TRYWAIT, then it is necessary to 293 * rely solely on reclaimed mbufs. 294 * 295 * Here we request for the protocols to free up some resources and, if we 296 * still cannot get anything, then we wait for an mbuf to be freed for a 297 * designated (mbuf_wait) time. 298 * 299 * Must be called with the mmbfree mutex held. 300 */ 301struct mbuf * 302m_mballoc_wait(void) 303{ 304 struct mbuf *p = NULL; 305 306 /* 307 * See if we can drain some resources out of the protocols. 308 * We drop the mmbfree mutex to avoid recursing into it in some of 309 * the drain routines. Clearly, we're faced with a race here because 310 * once something is freed during the drain, it may be grabbed right 311 * from under us by some other thread. But we accept this possibility 312 * in order to avoid a potentially large lock recursion and, more 313 * importantly, to avoid a potential lock order reversal which may 314 * result in deadlock (See comment above m_reclaim()). 315 */ 316 mtx_unlock(&mmbfree.m_mtx); 317 m_reclaim(); 318 319 mtx_lock(&mmbfree.m_mtx); 320 _MGET(p, M_DONTWAIT); 321 322 if (p == NULL) { 323 m_mballoc_wid++; 324 msleep(&m_mballoc_wid, &mmbfree.m_mtx, PVM, "mballc", 325 mbuf_wait); 326 m_mballoc_wid--; 327 328 /* 329 * Try again (one last time). 330 * 331 * We retry to fetch _even_ if the sleep timed out. This 332 * is left this way, purposely, in the [unlikely] case 333 * that an mbuf was freed but the sleep was not awoken 334 * in time. 335 * 336 * If the sleep didn't time out (i.e. we got woken up) then 337 * we have the lock so we just grab an mbuf, hopefully. 338 */ 339 _MGET(p, M_DONTWAIT); 340 } 341 342 /* If we waited and got something... */ 343 if (p != NULL) { 344 atomic_add_long(&mbstat.m_wait, 1); 345 if (mmbfree.m_head != NULL) 346 MBWAKEUP(m_mballoc_wid); 347 } else 348 atomic_add_long(&mbstat.m_drops, 1); 349 350 return (p); 351} 352 353/* 354 * Allocate some number of mbuf clusters 355 * and place on cluster free list. 356 * 357 * Must be called with the mclfree lock held. 358 */ 359int 360m_clalloc(int ncl, int how) 361{ 362 caddr_t p; 363 int i; 364 int npg; 365 366 /* 367 * If the map is now full (nothing will ever be freed to it). 368 * If we've hit the mcluster number limit, stop allocating from 369 * mb_map. 370 */ 371 if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) { 372 atomic_add_long(&mbstat.m_drops, 1); 373 return (0); 374 } 375 376 npg = ncl; 377 mtx_unlock(&mclfree.m_mtx); 378 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 379 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT); 380 ncl = ncl * PAGE_SIZE / MCLBYTES; 381 mtx_lock(&mclfree.m_mtx); 382 383 /* 384 * Either the map is now full, or `how' is M_DONTWAIT and there 385 * are no pages left. 386 */ 387 if (p == NULL) { 388 atomic_add_long(&mbstat.m_drops, 1); 389 return (0); 390 } 391 392 /* 393 * We don't let go of the mutex in order to avoid a race. 394 */ 395 for (i = 0; i < ncl; i++) { 396 ((union mcluster *)p)->mcl_next = mclfree.m_head; 397 mclfree.m_head = (union mcluster *)p; 398 p += MCLBYTES; 399 mbstat.m_clfree++; 400 } 401 mbstat.m_clusters += ncl; 402 return (1); 403} 404 405/* 406 * Once the mb_map submap has been exhausted and the allocation is called with 407 * M_TRYWAIT, we rely on the mclfree list. If nothing is free, we will 408 * sleep for a designated amount of time (mbuf_wait) or until we're woken up 409 * due to sudden mcluster availability. 410 * 411 * Must be called with the mclfree lock held. 412 */ 413caddr_t 414m_clalloc_wait(void) 415{ 416 caddr_t p = NULL; 417 418 m_clalloc_wid++; 419 msleep(&m_clalloc_wid, &mclfree.m_mtx, PVM, "mclalc", mbuf_wait); 420 m_clalloc_wid--; 421 422 /* 423 * Now that we (think) that we've got something, try again. 424 */ 425 _MCLALLOC(p, M_DONTWAIT); 426 427 /* If we waited and got something ... */ 428 if (p != NULL) { 429 atomic_add_long(&mbstat.m_wait, 1); 430 if (mclfree.m_head != NULL) 431 MBWAKEUP(m_clalloc_wid); 432 } else 433 atomic_add_long(&mbstat.m_drops, 1); 434 435 return (p); 436} 437 438/* 439 * m_reclaim: drain protocols in hopes to free up some resources... 440 * 441 * XXX: No locks should be held going in here. The drain routines have 442 * to presently acquire some locks which raises the possibility of lock 443 * order violation if we're holding any mutex if that mutex is acquired in 444 * reverse order relative to one of the locks in the drain routines. 445 */ 446static void 447m_reclaim(void) 448{ 449 struct domain *dp; 450 struct protosw *pr; 451 452#ifdef WITNESS 453 KASSERT(witness_list(CURPROC) == 0, 454 ("m_reclaim called with locks held")); 455#endif 456 457 for (dp = domains; dp; dp = dp->dom_next) 458 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 459 if (pr->pr_drain) 460 (*pr->pr_drain)(); 461 mbstat.m_drain++; 462} 463 464/* 465 * Space allocation routines. 466 * These are also available as macros 467 * for critical paths. 468 */ 469struct mbuf * 470m_get(int how, int type) 471{ 472 struct mbuf *m; 473 474 MGET(m, how, type); 475 return (m); 476} 477 478struct mbuf * 479m_gethdr(int how, int type) 480{ 481 struct mbuf *m; 482 483 MGETHDR(m, how, type); 484 return (m); 485} 486 487struct mbuf * 488m_getclr(int how, int type) 489{ 490 struct mbuf *m; 491 492 MGET(m, how, type); 493 if (m == NULL) 494 return (NULL); 495 bzero(mtod(m, caddr_t), MLEN); 496 return (m); 497} 498 499struct mbuf * 500m_free(struct mbuf *m) 501{ 502 struct mbuf *n; 503 504 MFREE(m, n); 505 return (n); 506} 507 508void 509m_freem(struct mbuf *m) 510{ 511 struct mbuf *n; 512 513 if (m == NULL) 514 return; 515 do { 516 /* 517 * we do need to check non-first mbuf, since some of existing 518 * code does not call M_PREPEND properly. 519 * (example: call to bpf_mtap from drivers) 520 */ 521 if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 522 m_freem(m->m_pkthdr.aux); 523 m->m_pkthdr.aux = NULL; 524 } 525 MFREE(m, n); 526 m = n; 527 } while (m); 528} 529 530/* 531 * Lesser-used path for M_PREPEND: 532 * allocate new mbuf to prepend to chain, 533 * copy junk along. 534 */ 535struct mbuf * 536m_prepend(struct mbuf *m, int len, int how) 537{ 538 struct mbuf *mn; 539 540 MGET(mn, how, m->m_type); 541 if (mn == NULL) { 542 m_freem(m); 543 return (NULL); 544 } 545 if (m->m_flags & M_PKTHDR) { 546 M_COPY_PKTHDR(mn, m); 547 m->m_flags &= ~M_PKTHDR; 548 } 549 mn->m_next = m; 550 m = mn; 551 if (len < MHLEN) 552 MH_ALIGN(m, len); 553 m->m_len = len; 554 return (m); 555} 556 557/* 558 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 559 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 560 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 561 * Note that the copy is read-only, because clusters are not copied, 562 * only their reference counts are incremented. 563 */ 564#define MCFail (mbstat.m_mcfail) 565 566struct mbuf * 567m_copym(struct mbuf *m, int off0, int len, int wait) 568{ 569 struct mbuf *n, **np; 570 int off = off0; 571 struct mbuf *top; 572 int copyhdr = 0; 573 574 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 575 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 576 if (off == 0 && m->m_flags & M_PKTHDR) 577 copyhdr = 1; 578 while (off > 0) { 579 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 580 if (off < m->m_len) 581 break; 582 off -= m->m_len; 583 m = m->m_next; 584 } 585 np = ⊤ 586 top = 0; 587 while (len > 0) { 588 if (m == NULL) { 589 KASSERT(len == M_COPYALL, 590 ("m_copym, length > size of mbuf chain")); 591 break; 592 } 593 MGET(n, wait, m->m_type); 594 *np = n; 595 if (n == NULL) 596 goto nospace; 597 if (copyhdr) { 598 M_COPY_PKTHDR(n, m); 599 if (len == M_COPYALL) 600 n->m_pkthdr.len -= off0; 601 else 602 n->m_pkthdr.len = len; 603 copyhdr = 0; 604 } 605 n->m_len = min(len, m->m_len - off); 606 if (m->m_flags & M_EXT) { 607 n->m_data = m->m_data + off; 608 n->m_ext = m->m_ext; 609 n->m_flags |= M_EXT; 610 MEXT_ADD_REF(m); 611 } else 612 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 613 (unsigned)n->m_len); 614 if (len != M_COPYALL) 615 len -= n->m_len; 616 off = 0; 617 m = m->m_next; 618 np = &n->m_next; 619 } 620 if (top == NULL) 621 atomic_add_long(&MCFail, 1); 622 return (top); 623nospace: 624 m_freem(top); 625 atomic_add_long(&MCFail, 1); 626 return (NULL); 627} 628 629/* 630 * Copy an entire packet, including header (which must be present). 631 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 632 * Note that the copy is read-only, because clusters are not copied, 633 * only their reference counts are incremented. 634 */ 635struct mbuf * 636m_copypacket(struct mbuf *m, int how) 637{ 638 struct mbuf *top, *n, *o; 639 640 MGET(n, how, m->m_type); 641 top = n; 642 if (n == NULL) 643 goto nospace; 644 645 M_COPY_PKTHDR(n, m); 646 n->m_len = m->m_len; 647 if (m->m_flags & M_EXT) { 648 n->m_data = m->m_data; 649 n->m_ext = m->m_ext; 650 n->m_flags |= M_EXT; 651 MEXT_ADD_REF(m); 652 } else { 653 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 654 } 655 656 m = m->m_next; 657 while (m) { 658 MGET(o, how, m->m_type); 659 if (o == NULL) 660 goto nospace; 661 662 n->m_next = o; 663 n = n->m_next; 664 665 n->m_len = m->m_len; 666 if (m->m_flags & M_EXT) { 667 n->m_data = m->m_data; 668 n->m_ext = m->m_ext; 669 n->m_flags |= M_EXT; 670 MEXT_ADD_REF(m); 671 } else { 672 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 673 } 674 675 m = m->m_next; 676 } 677 return top; 678nospace: 679 m_freem(top); 680 atomic_add_long(&MCFail, 1); 681 return (NULL); 682} 683 684/* 685 * Copy data from an mbuf chain starting "off" bytes from the beginning, 686 * continuing for "len" bytes, into the indicated buffer. 687 */ 688void 689m_copydata(struct mbuf *m, int off, int len, caddr_t cp) 690{ 691 unsigned count; 692 693 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 694 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 695 while (off > 0) { 696 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 697 if (off < m->m_len) 698 break; 699 off -= m->m_len; 700 m = m->m_next; 701 } 702 while (len > 0) { 703 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 704 count = min(m->m_len - off, len); 705 bcopy(mtod(m, caddr_t) + off, cp, count); 706 len -= count; 707 cp += count; 708 off = 0; 709 m = m->m_next; 710 } 711} 712 713/* 714 * Copy a packet header mbuf chain into a completely new chain, including 715 * copying any mbuf clusters. Use this instead of m_copypacket() when 716 * you need a writable copy of an mbuf chain. 717 */ 718struct mbuf * 719m_dup(struct mbuf *m, int how) 720{ 721 struct mbuf **p, *top = NULL; 722 int remain, moff, nsize; 723 724 /* Sanity check */ 725 if (m == NULL) 726 return (NULL); 727 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 728 729 /* While there's more data, get a new mbuf, tack it on, and fill it */ 730 remain = m->m_pkthdr.len; 731 moff = 0; 732 p = ⊤ 733 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 734 struct mbuf *n; 735 736 /* Get the next new mbuf */ 737 MGET(n, how, m->m_type); 738 if (n == NULL) 739 goto nospace; 740 if (top == NULL) { /* first one, must be PKTHDR */ 741 M_COPY_PKTHDR(n, m); 742 nsize = MHLEN; 743 } else /* not the first one */ 744 nsize = MLEN; 745 if (remain >= MINCLSIZE) { 746 MCLGET(n, how); 747 if ((n->m_flags & M_EXT) == 0) { 748 (void)m_free(n); 749 goto nospace; 750 } 751 nsize = MCLBYTES; 752 } 753 n->m_len = 0; 754 755 /* Link it into the new chain */ 756 *p = n; 757 p = &n->m_next; 758 759 /* Copy data from original mbuf(s) into new mbuf */ 760 while (n->m_len < nsize && m != NULL) { 761 int chunk = min(nsize - n->m_len, m->m_len - moff); 762 763 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 764 moff += chunk; 765 n->m_len += chunk; 766 remain -= chunk; 767 if (moff == m->m_len) { 768 m = m->m_next; 769 moff = 0; 770 } 771 } 772 773 /* Check correct total mbuf length */ 774 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 775 ("%s: bogus m_pkthdr.len", __FUNCTION__)); 776 } 777 return (top); 778 779nospace: 780 m_freem(top); 781 atomic_add_long(&MCFail, 1); 782 return (NULL); 783} 784 785/* 786 * Concatenate mbuf chain n to m. 787 * Both chains must be of the same type (e.g. MT_DATA). 788 * Any m_pkthdr is not updated. 789 */ 790void 791m_cat(struct mbuf *m, struct mbuf *n) 792{ 793 while (m->m_next) 794 m = m->m_next; 795 while (n) { 796 if (m->m_flags & M_EXT || 797 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 798 /* just join the two chains */ 799 m->m_next = n; 800 return; 801 } 802 /* splat the data from one into the other */ 803 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 804 (u_int)n->m_len); 805 m->m_len += n->m_len; 806 n = m_free(n); 807 } 808} 809 810void 811m_adj(struct mbuf *mp, int req_len) 812{ 813 int len = req_len; 814 struct mbuf *m; 815 int count; 816 817 if ((m = mp) == NULL) 818 return; 819 if (len >= 0) { 820 /* 821 * Trim from head. 822 */ 823 while (m != NULL && len > 0) { 824 if (m->m_len <= len) { 825 len -= m->m_len; 826 m->m_len = 0; 827 m = m->m_next; 828 } else { 829 m->m_len -= len; 830 m->m_data += len; 831 len = 0; 832 } 833 } 834 m = mp; 835 if (mp->m_flags & M_PKTHDR) 836 m->m_pkthdr.len -= (req_len - len); 837 } else { 838 /* 839 * Trim from tail. Scan the mbuf chain, 840 * calculating its length and finding the last mbuf. 841 * If the adjustment only affects this mbuf, then just 842 * adjust and return. Otherwise, rescan and truncate 843 * after the remaining size. 844 */ 845 len = -len; 846 count = 0; 847 for (;;) { 848 count += m->m_len; 849 if (m->m_next == (struct mbuf *)0) 850 break; 851 m = m->m_next; 852 } 853 if (m->m_len >= len) { 854 m->m_len -= len; 855 if (mp->m_flags & M_PKTHDR) 856 mp->m_pkthdr.len -= len; 857 return; 858 } 859 count -= len; 860 if (count < 0) 861 count = 0; 862 /* 863 * Correct length for chain is "count". 864 * Find the mbuf with last data, adjust its length, 865 * and toss data from remaining mbufs on chain. 866 */ 867 m = mp; 868 if (m->m_flags & M_PKTHDR) 869 m->m_pkthdr.len = count; 870 for (; m; m = m->m_next) { 871 if (m->m_len >= count) { 872 m->m_len = count; 873 break; 874 } 875 count -= m->m_len; 876 } 877 while (m->m_next) 878 (m = m->m_next) ->m_len = 0; 879 } 880} 881 882/* 883 * Rearange an mbuf chain so that len bytes are contiguous 884 * and in the data area of an mbuf (so that mtod and dtom 885 * will work for a structure of size len). Returns the resulting 886 * mbuf chain on success, frees it and returns null on failure. 887 * If there is room, it will add up to max_protohdr-len extra bytes to the 888 * contiguous region in an attempt to avoid being called next time. 889 */ 890#define MPFail (mbstat.m_mpfail) 891 892struct mbuf * 893m_pullup(struct mbuf *n, int len) 894{ 895 struct mbuf *m; 896 int count; 897 int space; 898 899 /* 900 * If first mbuf has no cluster, and has room for len bytes 901 * without shifting current data, pullup into it, 902 * otherwise allocate a new mbuf to prepend to the chain. 903 */ 904 if ((n->m_flags & M_EXT) == 0 && 905 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 906 if (n->m_len >= len) 907 return (n); 908 m = n; 909 n = n->m_next; 910 len -= m->m_len; 911 } else { 912 if (len > MHLEN) 913 goto bad; 914 MGET(m, M_DONTWAIT, n->m_type); 915 if (m == NULL) 916 goto bad; 917 m->m_len = 0; 918 if (n->m_flags & M_PKTHDR) { 919 M_COPY_PKTHDR(m, n); 920 n->m_flags &= ~M_PKTHDR; 921 } 922 } 923 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 924 do { 925 count = min(min(max(len, max_protohdr), space), n->m_len); 926 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 927 (unsigned)count); 928 len -= count; 929 m->m_len += count; 930 n->m_len -= count; 931 space -= count; 932 if (n->m_len) 933 n->m_data += count; 934 else 935 n = m_free(n); 936 } while (len > 0 && n); 937 if (len > 0) { 938 (void) m_free(m); 939 goto bad; 940 } 941 m->m_next = n; 942 return (m); 943bad: 944 m_freem(n); 945 atomic_add_long(&MPFail, 1); 946 return (NULL); 947} 948 949/* 950 * Partition an mbuf chain in two pieces, returning the tail -- 951 * all but the first len0 bytes. In case of failure, it returns NULL and 952 * attempts to restore the chain to its original state. 953 */ 954struct mbuf * 955m_split(struct mbuf *m0, int len0, int wait) 956{ 957 struct mbuf *m, *n; 958 unsigned len = len0, remain; 959 960 for (m = m0; m && len > m->m_len; m = m->m_next) 961 len -= m->m_len; 962 if (m == NULL) 963 return (NULL); 964 remain = m->m_len - len; 965 if (m0->m_flags & M_PKTHDR) { 966 MGETHDR(n, wait, m0->m_type); 967 if (n == NULL) 968 return (NULL); 969 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 970 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 971 m0->m_pkthdr.len = len0; 972 if (m->m_flags & M_EXT) 973 goto extpacket; 974 if (remain > MHLEN) { 975 /* m can't be the lead packet */ 976 MH_ALIGN(n, 0); 977 n->m_next = m_split(m, len, wait); 978 if (n->m_next == NULL) { 979 (void) m_free(n); 980 return (NULL); 981 } else 982 return (n); 983 } else 984 MH_ALIGN(n, remain); 985 } else if (remain == 0) { 986 n = m->m_next; 987 m->m_next = NULL; 988 return (n); 989 } else { 990 MGET(n, wait, m->m_type); 991 if (n == NULL) 992 return (NULL); 993 M_ALIGN(n, remain); 994 } 995extpacket: 996 if (m->m_flags & M_EXT) { 997 n->m_flags |= M_EXT; 998 n->m_ext = m->m_ext; 999 MEXT_ADD_REF(m); 1000 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1001 n->m_data = m->m_data + len; 1002 } else { 1003 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1004 } 1005 n->m_len = remain; 1006 m->m_len = len; 1007 n->m_next = m->m_next; 1008 m->m_next = NULL; 1009 return (n); 1010} 1011/* 1012 * Routine to copy from device local memory into mbufs. 1013 */ 1014struct mbuf * 1015m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, 1016 void (*copy)(char *from, caddr_t to, u_int len)) 1017{ 1018 struct mbuf *m; 1019 struct mbuf *top = 0, **mp = ⊤ 1020 int off = off0, len; 1021 char *cp; 1022 char *epkt; 1023 1024 cp = buf; 1025 epkt = cp + totlen; 1026 if (off) { 1027 cp += off + 2 * sizeof(u_short); 1028 totlen -= 2 * sizeof(u_short); 1029 } 1030 MGETHDR(m, M_DONTWAIT, MT_DATA); 1031 if (m == NULL) 1032 return (NULL); 1033 m->m_pkthdr.rcvif = ifp; 1034 m->m_pkthdr.len = totlen; 1035 m->m_len = MHLEN; 1036 1037 while (totlen > 0) { 1038 if (top) { 1039 MGET(m, M_DONTWAIT, MT_DATA); 1040 if (m == NULL) { 1041 m_freem(top); 1042 return (NULL); 1043 } 1044 m->m_len = MLEN; 1045 } 1046 len = min(totlen, epkt - cp); 1047 if (len >= MINCLSIZE) { 1048 MCLGET(m, M_DONTWAIT); 1049 if (m->m_flags & M_EXT) 1050 m->m_len = len = min(len, MCLBYTES); 1051 else 1052 len = m->m_len; 1053 } else { 1054 /* 1055 * Place initial small packet/header at end of mbuf. 1056 */ 1057 if (len < m->m_len) { 1058 if (top == NULL && len + 1059 max_linkhdr <= m->m_len) 1060 m->m_data += max_linkhdr; 1061 m->m_len = len; 1062 } else 1063 len = m->m_len; 1064 } 1065 if (copy) 1066 copy(cp, mtod(m, caddr_t), (unsigned)len); 1067 else 1068 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1069 cp += len; 1070 *mp = m; 1071 mp = &m->m_next; 1072 totlen -= len; 1073 if (cp == epkt) 1074 cp = buf; 1075 } 1076 return (top); 1077} 1078 1079/* 1080 * Copy data from a buffer back into the indicated mbuf chain, 1081 * starting "off" bytes from the beginning, extending the mbuf 1082 * chain if necessary. 1083 */ 1084void 1085m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 1086{ 1087 int mlen; 1088 struct mbuf *m = m0, *n; 1089 int totlen = 0; 1090 1091 if (m0 == NULL) 1092 return; 1093 while (off > (mlen = m->m_len)) { 1094 off -= mlen; 1095 totlen += mlen; 1096 if (m->m_next == NULL) { 1097 n = m_getclr(M_DONTWAIT, m->m_type); 1098 if (n == NULL) 1099 goto out; 1100 n->m_len = min(MLEN, len + off); 1101 m->m_next = n; 1102 } 1103 m = m->m_next; 1104 } 1105 while (len > 0) { 1106 mlen = min (m->m_len - off, len); 1107 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1108 cp += mlen; 1109 len -= mlen; 1110 mlen += off; 1111 off = 0; 1112 totlen += mlen; 1113 if (len == 0) 1114 break; 1115 if (m->m_next == NULL) { 1116 n = m_get(M_DONTWAIT, m->m_type); 1117 if (n == NULL) 1118 break; 1119 n->m_len = min(MLEN, len); 1120 m->m_next = n; 1121 } 1122 m = m->m_next; 1123 } 1124out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1125 m->m_pkthdr.len = totlen; 1126} 1127 1128void 1129m_print(const struct mbuf *m) 1130{ 1131 int len; 1132 const struct mbuf *m2; 1133 1134 len = m->m_pkthdr.len; 1135 m2 = m; 1136 while (len) { 1137 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1138 len -= m2->m_len; 1139 m2 = m2->m_next; 1140 } 1141 return; 1142} 1143