uipc_mbuf.c revision 64837
1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 64837 2000-08-19 08:32:59Z dwmalone $ 35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/malloc.h> 41#include <sys/mbuf.h> 42#include <sys/kernel.h> 43#include <sys/sysctl.h> 44#include <sys/domain.h> 45#include <sys/protosw.h> 46 47#include <vm/vm.h> 48#include <vm/vm_kern.h> 49#include <vm/vm_extern.h> 50 51#ifdef INVARIANTS 52#include <machine/cpu.h> 53#endif 54 55static void mbinit __P((void *)); 56SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 57 58struct mbuf *mbutl; 59struct mbstat mbstat; 60u_long mbtypes[MT_NTYPES]; 61struct mbuf *mmbfree; 62union mcluster *mclfree; 63union mext_refcnt *mext_refcnt_free; 64int max_linkhdr; 65int max_protohdr; 66int max_hdr; 67int max_datalen; 68int nmbclusters; 69int nmbufs; 70u_int m_mballoc_wid = 0; 71u_int m_clalloc_wid = 0; 72 73SYSCTL_DECL(_kern_ipc); 74SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 75 &max_linkhdr, 0, ""); 76SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 77 &max_protohdr, 0, ""); 78SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 79SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 80 &max_datalen, 0, ""); 81SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 82 &mbuf_wait, 0, ""); 83SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RD, &mbstat, mbstat, ""); 84SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 85 sizeof(mbtypes), "LU", ""); 86SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 87 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 88SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 89 "Maximum number of mbufs available"); 90#ifndef NMBCLUSTERS 91#define NMBCLUSTERS (512 + MAXUSERS * 16) 92#endif 93TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 94TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); 95 96static void m_reclaim __P((void)); 97 98#define NCL_INIT 2 99#define NMB_INIT 16 100#define REF_INIT (NMBCLUSTERS * 2) 101 102/* ARGSUSED*/ 103static void 104mbinit(dummy) 105 void *dummy; 106{ 107 int s; 108 109 mmbfree = NULL; 110 mclfree = NULL; 111 mext_refcnt_free = NULL; 112 113 mbstat.m_msize = MSIZE; 114 mbstat.m_mclbytes = MCLBYTES; 115 mbstat.m_minclsize = MINCLSIZE; 116 mbstat.m_mlen = MLEN; 117 mbstat.m_mhlen = MHLEN; 118 119 s = splimp(); 120 if (m_alloc_ref(REF_INIT) == 0) 121 goto bad; 122 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 123 goto bad; 124#if MCLBYTES <= PAGE_SIZE 125 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 126 goto bad; 127#else 128 /* It's OK to call contigmalloc in this context. */ 129 if (m_clalloc(16, M_WAIT) == 0) 130 goto bad; 131#endif 132 splx(s); 133 return; 134bad: 135 panic("mbinit: failed to initialize mbuf subsystem!"); 136} 137 138/* 139 * Allocate at least nmb reference count structs and place them 140 * on the ref cnt free list. 141 * Must be called at splimp. 142 */ 143int 144m_alloc_ref(nmb) 145 u_int nmb; 146{ 147 caddr_t p; 148 u_int nbytes; 149 int i; 150 151 /* 152 * XXX: 153 * We don't cap the amount of memory that can be used 154 * by the reference counters, like we do for mbufs and 155 * mbuf clusters. The reason is that we don't really expect 156 * to have to be allocating too many of these guys with m_alloc_ref(), 157 * and if we are, we're probably not out of the woods anyway, 158 * so leave this way for now. 159 */ 160 161 if (mb_map_full) 162 return (0); 163 164 nbytes = round_page(nmb * sizeof(union mext_refcnt)); 165 if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT)) == NULL) 166 return (0); 167 nmb = nbytes / sizeof(union mext_refcnt); 168 169 for (i = 0; i < nmb; i++) { 170 ((union mext_refcnt *)p)->next_ref = mext_refcnt_free; 171 mext_refcnt_free = (union mext_refcnt *)p; 172 p += sizeof(union mext_refcnt); 173 mbstat.m_refree++; 174 } 175 mbstat.m_refcnt += nmb; 176 177 return (1); 178} 179 180/* 181 * Allocate at least nmb mbufs and place on mbuf free list. 182 * Must be called at splimp. 183 */ 184/* ARGSUSED */ 185int 186m_mballoc(nmb, how) 187 register int nmb; 188 int how; 189{ 190 register caddr_t p; 191 register int i; 192 int nbytes; 193 194 /* 195 * If we've hit the mbuf limit, stop allocating from mb_map, 196 * (or trying to) in order to avoid dipping into the section of 197 * mb_map which we've "reserved" for clusters. 198 */ 199 if ((nmb + mbstat.m_mbufs) > nmbufs) 200 return (0); 201 202 /* 203 * Once we run out of map space, it will be impossible to get 204 * any more (nothing is ever freed back to the map) 205 * -- however you are not dead as m_reclaim might 206 * still be able to free a substantial amount of space. 207 * 208 * XXX Furthermore, we can also work with "recycled" mbufs (when 209 * we're calling with M_WAIT the sleep procedure will be woken 210 * up when an mbuf is freed. See m_mballoc_wait()). 211 */ 212 if (mb_map_full) 213 return (0); 214 215 nbytes = round_page(nmb * MSIZE); 216 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 217 if (p == 0 && how == M_WAIT) { 218 mbstat.m_wait++; 219 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 220 } 221 222 /* 223 * Either the map is now full, or `how' is M_NOWAIT and there 224 * are no pages left. 225 */ 226 if (p == NULL) 227 return (0); 228 229 nmb = nbytes / MSIZE; 230 for (i = 0; i < nmb; i++) { 231 ((struct mbuf *)p)->m_next = mmbfree; 232 mmbfree = (struct mbuf *)p; 233 p += MSIZE; 234 } 235 mbstat.m_mbufs += nmb; 236 mbtypes[MT_FREE] += nmb; 237 return (1); 238} 239 240/* 241 * Once the mb_map has been exhausted and if the call to the allocation macros 242 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely 243 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a 244 * designated (mbuf_wait) time. 245 */ 246struct mbuf * 247m_mballoc_wait(int caller, int type) 248{ 249 struct mbuf *p; 250 int s; 251 252 m_mballoc_wid++; 253 if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK) 254 m_mballoc_wid--; 255 256 /* 257 * Now that we (think) that we've got something, we will redo an 258 * MGET, but avoid getting into another instance of m_mballoc_wait() 259 * XXX: We retry to fetch _even_ if the sleep timed out. This is left 260 * this way, purposely, in the [unlikely] case that an mbuf was 261 * freed but the sleep was not awakened in time. 262 */ 263 p = NULL; 264 switch (caller) { 265 case MGET_C: 266 MGET(p, M_DONTWAIT, type); 267 break; 268 case MGETHDR_C: 269 MGETHDR(p, M_DONTWAIT, type); 270 break; 271 default: 272 panic("m_mballoc_wait: invalid caller (%d)", caller); 273 } 274 275 s = splimp(); 276 if (p != NULL) { /* We waited and got something... */ 277 mbstat.m_wait++; 278 /* Wake up another if we have more free. */ 279 if (mmbfree != NULL) 280 MMBWAKEUP(); 281 } 282 splx(s); 283 return (p); 284} 285 286#if MCLBYTES > PAGE_SIZE 287static int i_want_my_mcl; 288 289static void 290kproc_mclalloc(void) 291{ 292 int status; 293 294 while (1) { 295 tsleep(&i_want_my_mcl, PVM, "mclalloc", 0); 296 297 for (; i_want_my_mcl; i_want_my_mcl--) { 298 if (m_clalloc(1, M_WAIT) == 0) 299 printf("m_clalloc failed even in process context!\n"); 300 } 301 } 302} 303 304static struct proc *mclallocproc; 305static struct kproc_desc mclalloc_kp = { 306 "mclalloc", 307 kproc_mclalloc, 308 &mclallocproc 309}; 310SYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, 311 &mclalloc_kp); 312#endif 313 314/* 315 * Allocate some number of mbuf clusters 316 * and place on cluster free list. 317 * Must be called at splimp. 318 */ 319/* ARGSUSED */ 320int 321m_clalloc(ncl, how) 322 register int ncl; 323 int how; 324{ 325 register caddr_t p; 326 register int i; 327 int npg; 328 329 /* 330 * If we've hit the mcluster number limit, stop allocating from 331 * mb_map, (or trying to) in order to avoid dipping into the section 332 * of mb_map which we've "reserved" for mbufs. 333 */ 334 if ((ncl + mbstat.m_clusters) > nmbclusters) { 335 mbstat.m_drops++; 336 return (0); 337 } 338 339 /* 340 * Once we run out of map space, it will be impossible 341 * to get any more (nothing is ever freed back to the 342 * map). From this point on, we solely rely on freed 343 * mclusters. 344 */ 345 if (mb_map_full) { 346 mbstat.m_drops++; 347 return (0); 348 } 349 350#if MCLBYTES > PAGE_SIZE 351 if (how != M_WAIT) { 352 i_want_my_mcl += ncl; 353 wakeup(&i_want_my_mcl); 354 mbstat.m_wait++; 355 p = 0; 356 } else { 357 p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul, 358 ~0ul, PAGE_SIZE, 0, mb_map); 359 } 360#else 361 npg = ncl; 362 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 363 how != M_WAIT ? M_NOWAIT : M_WAITOK); 364 ncl = ncl * PAGE_SIZE / MCLBYTES; 365#endif 366 /* 367 * Either the map is now full, or `how' is M_NOWAIT and there 368 * are no pages left. 369 */ 370 if (p == NULL) { 371 mbstat.m_drops++; 372 return (0); 373 } 374 375 for (i = 0; i < ncl; i++) { 376 ((union mcluster *)p)->mcl_next = mclfree; 377 mclfree = (union mcluster *)p; 378 p += MCLBYTES; 379 mbstat.m_clfree++; 380 } 381 mbstat.m_clusters += ncl; 382 return (1); 383} 384 385/* 386 * Once the mb_map submap has been exhausted and the allocation is called with 387 * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will 388 * sleep for a designated amount of time (mbuf_wait) or until we're woken up 389 * due to sudden mcluster availability. 390 */ 391caddr_t 392m_clalloc_wait(void) 393{ 394 caddr_t p; 395 int s; 396 397#ifdef __i386__ 398 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */ 399 KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT")); 400#endif 401 402 /* Sleep until something's available or until we expire. */ 403 m_clalloc_wid++; 404 if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK) 405 m_clalloc_wid--; 406 407 /* 408 * Now that we (think) that we've got something, we will redo and 409 * MGET, but avoid getting into another instance of m_clalloc_wait() 410 */ 411 p = NULL; 412 _MCLALLOC(p, M_DONTWAIT); 413 414 s = splimp(); 415 if (p != NULL) { /* We waited and got something... */ 416 mbstat.m_wait++; 417 /* Wake up another if we have more free. */ 418 if (mclfree != NULL) 419 MCLWAKEUP(); 420 } 421 422 splx(s); 423 return (p); 424} 425 426/* 427 * When MGET fails, ask protocols to free space when short of memory, 428 * then re-attempt to allocate an mbuf. 429 */ 430struct mbuf * 431m_retry(i, t) 432 int i, t; 433{ 434 register struct mbuf *m; 435 436 /* 437 * Must only do the reclaim if not in an interrupt context. 438 */ 439 if (i == M_WAIT) { 440#ifdef __i386__ 441 KASSERT(intr_nesting_level == 0, 442 ("MBALLOC: CANNOT WAIT IN INTERRUPT")); 443#endif 444 m_reclaim(); 445 } 446 447 /* 448 * Both m_mballoc_wait and m_retry must be nulled because 449 * when the MGET macro is run from here, we deffinately do _not_ 450 * want to enter an instance of m_mballoc_wait() or m_retry() (again!) 451 */ 452#define m_mballoc_wait(caller,type) (struct mbuf *)0 453#define m_retry(i, t) (struct mbuf *)0 454 MGET(m, i, t); 455#undef m_retry 456#undef m_mballoc_wait 457 458 if (m != NULL) 459 mbstat.m_wait++; 460 else 461 mbstat.m_drops++; 462 463 return (m); 464} 465 466/* 467 * As above; retry an MGETHDR. 468 */ 469struct mbuf * 470m_retryhdr(i, t) 471 int i, t; 472{ 473 register struct mbuf *m; 474 475 /* 476 * Must only do the reclaim if not in an interrupt context. 477 */ 478 if (i == M_WAIT) { 479#ifdef __i386__ 480 KASSERT(intr_nesting_level == 0, 481 ("MBALLOC: CANNOT WAIT IN INTERRUPT")); 482#endif 483 m_reclaim(); 484 } 485 486#define m_mballoc_wait(caller,type) (struct mbuf *)0 487#define m_retryhdr(i, t) (struct mbuf *)0 488 MGETHDR(m, i, t); 489#undef m_retryhdr 490#undef m_mballoc_wait 491 492 if (m != NULL) 493 mbstat.m_wait++; 494 else 495 mbstat.m_drops++; 496 497 return (m); 498} 499 500static void 501m_reclaim() 502{ 503 register struct domain *dp; 504 register struct protosw *pr; 505 int s = splimp(); 506 507 for (dp = domains; dp; dp = dp->dom_next) 508 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 509 if (pr->pr_drain) 510 (*pr->pr_drain)(); 511 splx(s); 512 mbstat.m_drain++; 513} 514 515/* 516 * Space allocation routines. 517 * These are also available as macros 518 * for critical paths. 519 */ 520struct mbuf * 521m_get(how, type) 522 int how, type; 523{ 524 register struct mbuf *m; 525 526 MGET(m, how, type); 527 return (m); 528} 529 530struct mbuf * 531m_gethdr(how, type) 532 int how, type; 533{ 534 register struct mbuf *m; 535 536 MGETHDR(m, how, type); 537 return (m); 538} 539 540struct mbuf * 541m_getclr(how, type) 542 int how, type; 543{ 544 register struct mbuf *m; 545 546 MGET(m, how, type); 547 if (m == 0) 548 return (0); 549 bzero(mtod(m, caddr_t), MLEN); 550 return (m); 551} 552 553struct mbuf * 554m_free(m) 555 struct mbuf *m; 556{ 557 register struct mbuf *n; 558 559 MFREE(m, n); 560 return (n); 561} 562 563void 564m_freem(m) 565 register struct mbuf *m; 566{ 567 register struct mbuf *n; 568 569 if (m == NULL) 570 return; 571 do { 572 /* 573 * we do need to check non-first mbuf, since some of existing 574 * code does not call M_PREPEND properly. 575 * (example: call to bpf_mtap from drivers) 576 */ 577 if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 578 m_freem(m->m_pkthdr.aux); 579 m->m_pkthdr.aux = NULL; 580 } 581 MFREE(m, n); 582 m = n; 583 } while (m); 584} 585 586/* 587 * Mbuffer utility routines. 588 */ 589 590/* 591 * Lesser-used path for M_PREPEND: 592 * allocate new mbuf to prepend to chain, 593 * copy junk along. 594 */ 595struct mbuf * 596m_prepend(m, len, how) 597 register struct mbuf *m; 598 int len, how; 599{ 600 struct mbuf *mn; 601 602 MGET(mn, how, m->m_type); 603 if (mn == (struct mbuf *)NULL) { 604 m_freem(m); 605 return ((struct mbuf *)NULL); 606 } 607 if (m->m_flags & M_PKTHDR) { 608 M_COPY_PKTHDR(mn, m); 609 m->m_flags &= ~M_PKTHDR; 610 } 611 mn->m_next = m; 612 m = mn; 613 if (len < MHLEN) 614 MH_ALIGN(m, len); 615 m->m_len = len; 616 return (m); 617} 618 619/* 620 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 621 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 622 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 623 * Note that the copy is read-only, because clusters are not copied, 624 * only their reference counts are incremented. 625 */ 626#define MCFail (mbstat.m_mcfail) 627 628struct mbuf * 629m_copym(m, off0, len, wait) 630 register struct mbuf *m; 631 int off0, wait; 632 register int len; 633{ 634 register struct mbuf *n, **np; 635 register int off = off0; 636 struct mbuf *top; 637 int copyhdr = 0; 638 639 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 640 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 641 if (off == 0 && m->m_flags & M_PKTHDR) 642 copyhdr = 1; 643 while (off > 0) { 644 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 645 if (off < m->m_len) 646 break; 647 off -= m->m_len; 648 m = m->m_next; 649 } 650 np = ⊤ 651 top = 0; 652 while (len > 0) { 653 if (m == 0) { 654 KASSERT(len == M_COPYALL, 655 ("m_copym, length > size of mbuf chain")); 656 break; 657 } 658 MGET(n, wait, m->m_type); 659 *np = n; 660 if (n == 0) 661 goto nospace; 662 if (copyhdr) { 663 M_COPY_PKTHDR(n, m); 664 if (len == M_COPYALL) 665 n->m_pkthdr.len -= off0; 666 else 667 n->m_pkthdr.len = len; 668 copyhdr = 0; 669 } 670 n->m_len = min(len, m->m_len - off); 671 if (m->m_flags & M_EXT) { 672 n->m_data = m->m_data + off; 673 n->m_ext = m->m_ext; 674 n->m_flags |= M_EXT; 675 MEXT_ADD_REF(m); 676 } else 677 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 678 (unsigned)n->m_len); 679 if (len != M_COPYALL) 680 len -= n->m_len; 681 off = 0; 682 m = m->m_next; 683 np = &n->m_next; 684 } 685 if (top == 0) 686 MCFail++; 687 return (top); 688nospace: 689 m_freem(top); 690 MCFail++; 691 return (0); 692} 693 694/* 695 * Copy an entire packet, including header (which must be present). 696 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 697 * Note that the copy is read-only, because clusters are not copied, 698 * only their reference counts are incremented. 699 */ 700struct mbuf * 701m_copypacket(m, how) 702 struct mbuf *m; 703 int how; 704{ 705 struct mbuf *top, *n, *o; 706 707 MGET(n, how, m->m_type); 708 top = n; 709 if (!n) 710 goto nospace; 711 712 M_COPY_PKTHDR(n, m); 713 n->m_len = m->m_len; 714 if (m->m_flags & M_EXT) { 715 n->m_data = m->m_data; 716 n->m_ext = m->m_ext; 717 n->m_flags |= M_EXT; 718 MEXT_ADD_REF(m); 719 } else { 720 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 721 } 722 723 m = m->m_next; 724 while (m) { 725 MGET(o, how, m->m_type); 726 if (!o) 727 goto nospace; 728 729 n->m_next = o; 730 n = n->m_next; 731 732 n->m_len = m->m_len; 733 if (m->m_flags & M_EXT) { 734 n->m_data = m->m_data; 735 n->m_ext = m->m_ext; 736 n->m_flags |= M_EXT; 737 MEXT_ADD_REF(m); 738 } else { 739 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 740 } 741 742 m = m->m_next; 743 } 744 return top; 745nospace: 746 m_freem(top); 747 MCFail++; 748 return 0; 749} 750 751/* 752 * Copy data from an mbuf chain starting "off" bytes from the beginning, 753 * continuing for "len" bytes, into the indicated buffer. 754 */ 755void 756m_copydata(m, off, len, cp) 757 register struct mbuf *m; 758 register int off; 759 register int len; 760 caddr_t cp; 761{ 762 register unsigned count; 763 764 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 765 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 766 while (off > 0) { 767 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 768 if (off < m->m_len) 769 break; 770 off -= m->m_len; 771 m = m->m_next; 772 } 773 while (len > 0) { 774 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 775 count = min(m->m_len - off, len); 776 bcopy(mtod(m, caddr_t) + off, cp, count); 777 len -= count; 778 cp += count; 779 off = 0; 780 m = m->m_next; 781 } 782} 783 784/* 785 * Copy a packet header mbuf chain into a completely new chain, including 786 * copying any mbuf clusters. Use this instead of m_copypacket() when 787 * you need a writable copy of an mbuf chain. 788 */ 789struct mbuf * 790m_dup(m, how) 791 struct mbuf *m; 792 int how; 793{ 794 struct mbuf **p, *top = NULL; 795 int remain, moff, nsize; 796 797 /* Sanity check */ 798 if (m == NULL) 799 return (0); 800 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 801 802 /* While there's more data, get a new mbuf, tack it on, and fill it */ 803 remain = m->m_pkthdr.len; 804 moff = 0; 805 p = ⊤ 806 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 807 struct mbuf *n; 808 809 /* Get the next new mbuf */ 810 MGET(n, how, m->m_type); 811 if (n == NULL) 812 goto nospace; 813 if (top == NULL) { /* first one, must be PKTHDR */ 814 M_COPY_PKTHDR(n, m); 815 nsize = MHLEN; 816 } else /* not the first one */ 817 nsize = MLEN; 818 if (remain >= MINCLSIZE) { 819 MCLGET(n, how); 820 if ((n->m_flags & M_EXT) == 0) { 821 (void)m_free(n); 822 goto nospace; 823 } 824 nsize = MCLBYTES; 825 } 826 n->m_len = 0; 827 828 /* Link it into the new chain */ 829 *p = n; 830 p = &n->m_next; 831 832 /* Copy data from original mbuf(s) into new mbuf */ 833 while (n->m_len < nsize && m != NULL) { 834 int chunk = min(nsize - n->m_len, m->m_len - moff); 835 836 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 837 moff += chunk; 838 n->m_len += chunk; 839 remain -= chunk; 840 if (moff == m->m_len) { 841 m = m->m_next; 842 moff = 0; 843 } 844 } 845 846 /* Check correct total mbuf length */ 847 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 848 ("%s: bogus m_pkthdr.len", __FUNCTION__)); 849 } 850 return (top); 851 852nospace: 853 m_freem(top); 854 MCFail++; 855 return (0); 856} 857 858/* 859 * Concatenate mbuf chain n to m. 860 * Both chains must be of the same type (e.g. MT_DATA). 861 * Any m_pkthdr is not updated. 862 */ 863void 864m_cat(m, n) 865 register struct mbuf *m, *n; 866{ 867 while (m->m_next) 868 m = m->m_next; 869 while (n) { 870 if (m->m_flags & M_EXT || 871 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 872 /* just join the two chains */ 873 m->m_next = n; 874 return; 875 } 876 /* splat the data from one into the other */ 877 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 878 (u_int)n->m_len); 879 m->m_len += n->m_len; 880 n = m_free(n); 881 } 882} 883 884void 885m_adj(mp, req_len) 886 struct mbuf *mp; 887 int req_len; 888{ 889 register int len = req_len; 890 register struct mbuf *m; 891 register int count; 892 893 if ((m = mp) == NULL) 894 return; 895 if (len >= 0) { 896 /* 897 * Trim from head. 898 */ 899 while (m != NULL && len > 0) { 900 if (m->m_len <= len) { 901 len -= m->m_len; 902 m->m_len = 0; 903 m = m->m_next; 904 } else { 905 m->m_len -= len; 906 m->m_data += len; 907 len = 0; 908 } 909 } 910 m = mp; 911 if (mp->m_flags & M_PKTHDR) 912 m->m_pkthdr.len -= (req_len - len); 913 } else { 914 /* 915 * Trim from tail. Scan the mbuf chain, 916 * calculating its length and finding the last mbuf. 917 * If the adjustment only affects this mbuf, then just 918 * adjust and return. Otherwise, rescan and truncate 919 * after the remaining size. 920 */ 921 len = -len; 922 count = 0; 923 for (;;) { 924 count += m->m_len; 925 if (m->m_next == (struct mbuf *)0) 926 break; 927 m = m->m_next; 928 } 929 if (m->m_len >= len) { 930 m->m_len -= len; 931 if (mp->m_flags & M_PKTHDR) 932 mp->m_pkthdr.len -= len; 933 return; 934 } 935 count -= len; 936 if (count < 0) 937 count = 0; 938 /* 939 * Correct length for chain is "count". 940 * Find the mbuf with last data, adjust its length, 941 * and toss data from remaining mbufs on chain. 942 */ 943 m = mp; 944 if (m->m_flags & M_PKTHDR) 945 m->m_pkthdr.len = count; 946 for (; m; m = m->m_next) { 947 if (m->m_len >= count) { 948 m->m_len = count; 949 break; 950 } 951 count -= m->m_len; 952 } 953 while (m->m_next) 954 (m = m->m_next) ->m_len = 0; 955 } 956} 957 958/* 959 * Rearange an mbuf chain so that len bytes are contiguous 960 * and in the data area of an mbuf (so that mtod and dtom 961 * will work for a structure of size len). Returns the resulting 962 * mbuf chain on success, frees it and returns null on failure. 963 * If there is room, it will add up to max_protohdr-len extra bytes to the 964 * contiguous region in an attempt to avoid being called next time. 965 */ 966#define MPFail (mbstat.m_mpfail) 967 968struct mbuf * 969m_pullup(n, len) 970 register struct mbuf *n; 971 int len; 972{ 973 register struct mbuf *m; 974 register int count; 975 int space; 976 977 /* 978 * If first mbuf has no cluster, and has room for len bytes 979 * without shifting current data, pullup into it, 980 * otherwise allocate a new mbuf to prepend to the chain. 981 */ 982 if ((n->m_flags & M_EXT) == 0 && 983 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 984 if (n->m_len >= len) 985 return (n); 986 m = n; 987 n = n->m_next; 988 len -= m->m_len; 989 } else { 990 if (len > MHLEN) 991 goto bad; 992 MGET(m, M_DONTWAIT, n->m_type); 993 if (m == 0) 994 goto bad; 995 m->m_len = 0; 996 if (n->m_flags & M_PKTHDR) { 997 M_COPY_PKTHDR(m, n); 998 n->m_flags &= ~M_PKTHDR; 999 } 1000 } 1001 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1002 do { 1003 count = min(min(max(len, max_protohdr), space), n->m_len); 1004 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1005 (unsigned)count); 1006 len -= count; 1007 m->m_len += count; 1008 n->m_len -= count; 1009 space -= count; 1010 if (n->m_len) 1011 n->m_data += count; 1012 else 1013 n = m_free(n); 1014 } while (len > 0 && n); 1015 if (len > 0) { 1016 (void) m_free(m); 1017 goto bad; 1018 } 1019 m->m_next = n; 1020 return (m); 1021bad: 1022 m_freem(n); 1023 MPFail++; 1024 return (0); 1025} 1026 1027/* 1028 * Partition an mbuf chain in two pieces, returning the tail -- 1029 * all but the first len0 bytes. In case of failure, it returns NULL and 1030 * attempts to restore the chain to its original state. 1031 */ 1032struct mbuf * 1033m_split(m0, len0, wait) 1034 register struct mbuf *m0; 1035 int len0, wait; 1036{ 1037 register struct mbuf *m, *n; 1038 unsigned len = len0, remain; 1039 1040 for (m = m0; m && len > m->m_len; m = m->m_next) 1041 len -= m->m_len; 1042 if (m == 0) 1043 return (0); 1044 remain = m->m_len - len; 1045 if (m0->m_flags & M_PKTHDR) { 1046 MGETHDR(n, wait, m0->m_type); 1047 if (n == 0) 1048 return (0); 1049 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1050 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1051 m0->m_pkthdr.len = len0; 1052 if (m->m_flags & M_EXT) 1053 goto extpacket; 1054 if (remain > MHLEN) { 1055 /* m can't be the lead packet */ 1056 MH_ALIGN(n, 0); 1057 n->m_next = m_split(m, len, wait); 1058 if (n->m_next == 0) { 1059 (void) m_free(n); 1060 return (0); 1061 } else 1062 return (n); 1063 } else 1064 MH_ALIGN(n, remain); 1065 } else if (remain == 0) { 1066 n = m->m_next; 1067 m->m_next = 0; 1068 return (n); 1069 } else { 1070 MGET(n, wait, m->m_type); 1071 if (n == 0) 1072 return (0); 1073 M_ALIGN(n, remain); 1074 } 1075extpacket: 1076 if (m->m_flags & M_EXT) { 1077 n->m_flags |= M_EXT; 1078 n->m_ext = m->m_ext; 1079 MEXT_ADD_REF(m); 1080 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1081 n->m_data = m->m_data + len; 1082 } else { 1083 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1084 } 1085 n->m_len = remain; 1086 m->m_len = len; 1087 n->m_next = m->m_next; 1088 m->m_next = 0; 1089 return (n); 1090} 1091/* 1092 * Routine to copy from device local memory into mbufs. 1093 */ 1094struct mbuf * 1095m_devget(buf, totlen, off0, ifp, copy) 1096 char *buf; 1097 int totlen, off0; 1098 struct ifnet *ifp; 1099 void (*copy) __P((char *from, caddr_t to, u_int len)); 1100{ 1101 register struct mbuf *m; 1102 struct mbuf *top = 0, **mp = ⊤ 1103 register int off = off0, len; 1104 register char *cp; 1105 char *epkt; 1106 1107 cp = buf; 1108 epkt = cp + totlen; 1109 if (off) { 1110 cp += off + 2 * sizeof(u_short); 1111 totlen -= 2 * sizeof(u_short); 1112 } 1113 MGETHDR(m, M_DONTWAIT, MT_DATA); 1114 if (m == 0) 1115 return (0); 1116 m->m_pkthdr.rcvif = ifp; 1117 m->m_pkthdr.len = totlen; 1118 m->m_len = MHLEN; 1119 1120 while (totlen > 0) { 1121 if (top) { 1122 MGET(m, M_DONTWAIT, MT_DATA); 1123 if (m == 0) { 1124 m_freem(top); 1125 return (0); 1126 } 1127 m->m_len = MLEN; 1128 } 1129 len = min(totlen, epkt - cp); 1130 if (len >= MINCLSIZE) { 1131 MCLGET(m, M_DONTWAIT); 1132 if (m->m_flags & M_EXT) 1133 m->m_len = len = min(len, MCLBYTES); 1134 else 1135 len = m->m_len; 1136 } else { 1137 /* 1138 * Place initial small packet/header at end of mbuf. 1139 */ 1140 if (len < m->m_len) { 1141 if (top == 0 && len + max_linkhdr <= m->m_len) 1142 m->m_data += max_linkhdr; 1143 m->m_len = len; 1144 } else 1145 len = m->m_len; 1146 } 1147 if (copy) 1148 copy(cp, mtod(m, caddr_t), (unsigned)len); 1149 else 1150 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1151 cp += len; 1152 *mp = m; 1153 mp = &m->m_next; 1154 totlen -= len; 1155 if (cp == epkt) 1156 cp = buf; 1157 } 1158 return (top); 1159} 1160 1161/* 1162 * Copy data from a buffer back into the indicated mbuf chain, 1163 * starting "off" bytes from the beginning, extending the mbuf 1164 * chain if necessary. 1165 */ 1166void 1167m_copyback(m0, off, len, cp) 1168 struct mbuf *m0; 1169 register int off; 1170 register int len; 1171 caddr_t cp; 1172{ 1173 register int mlen; 1174 register struct mbuf *m = m0, *n; 1175 int totlen = 0; 1176 1177 if (m0 == 0) 1178 return; 1179 while (off > (mlen = m->m_len)) { 1180 off -= mlen; 1181 totlen += mlen; 1182 if (m->m_next == 0) { 1183 n = m_getclr(M_DONTWAIT, m->m_type); 1184 if (n == 0) 1185 goto out; 1186 n->m_len = min(MLEN, len + off); 1187 m->m_next = n; 1188 } 1189 m = m->m_next; 1190 } 1191 while (len > 0) { 1192 mlen = min (m->m_len - off, len); 1193 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1194 cp += mlen; 1195 len -= mlen; 1196 mlen += off; 1197 off = 0; 1198 totlen += mlen; 1199 if (len == 0) 1200 break; 1201 if (m->m_next == 0) { 1202 n = m_get(M_DONTWAIT, m->m_type); 1203 if (n == 0) 1204 break; 1205 n->m_len = min(MLEN, len); 1206 m->m_next = n; 1207 } 1208 m = m->m_next; 1209 } 1210out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1211 m->m_pkthdr.len = totlen; 1212} 1213 1214void 1215m_print(const struct mbuf *m) 1216{ 1217 int len; 1218 const struct mbuf *m2; 1219 1220 len = m->m_pkthdr.len; 1221 m2 = m; 1222 while (len) { 1223 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1224 len -= m2->m_len; 1225 m2 = m2->m_next; 1226 } 1227 return; 1228} 1229