uipc_mbuf.c revision 65095
1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 65095 2000-08-25 22:28:08Z peter $ 35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/malloc.h> 41#include <sys/mbuf.h> 42#include <sys/kernel.h> 43#include <sys/sysctl.h> 44#include <sys/domain.h> 45#include <sys/protosw.h> 46 47#include <vm/vm.h> 48#include <vm/vm_kern.h> 49#include <vm/vm_extern.h> 50 51#ifdef INVARIANTS 52#include <machine/cpu.h> 53#endif 54 55static void mbinit __P((void *)); 56SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 57 58struct mbuf *mbutl; 59struct mbstat mbstat; 60u_long mbtypes[MT_NTYPES]; 61struct mbuf *mmbfree; 62union mcluster *mclfree; 63union mext_refcnt *mext_refcnt_free; 64int max_linkhdr; 65int max_protohdr; 66int max_hdr; 67int max_datalen; 68int nmbclusters; 69int nmbufs; 70u_int m_mballoc_wid = 0; 71u_int m_clalloc_wid = 0; 72 73SYSCTL_DECL(_kern_ipc); 74SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 75 &max_linkhdr, 0, ""); 76SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 77 &max_protohdr, 0, ""); 78SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 79SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 80 &max_datalen, 0, ""); 81SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 82 &mbuf_wait, 0, ""); 83SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RD, &mbstat, mbstat, ""); 84SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 85 sizeof(mbtypes), "LU", ""); 86SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 87 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 88SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 89 "Maximum number of mbufs available"); 90#ifndef NMBCLUSTERS 91#define NMBCLUSTERS (512 + MAXUSERS * 16) 92#endif 93TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 94TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); 95 96static void m_reclaim __P((void)); 97 98#define NCL_INIT 2 99#define NMB_INIT 16 100#define REF_INIT (NMBCLUSTERS * 2) 101 102/* ARGSUSED*/ 103static void 104mbinit(dummy) 105 void *dummy; 106{ 107 int s; 108 109 mmbfree = NULL; 110 mclfree = NULL; 111 mext_refcnt_free = NULL; 112 113 mbstat.m_msize = MSIZE; 114 mbstat.m_mclbytes = MCLBYTES; 115 mbstat.m_minclsize = MINCLSIZE; 116 mbstat.m_mlen = MLEN; 117 mbstat.m_mhlen = MHLEN; 118 119 s = splimp(); 120 if (m_alloc_ref(REF_INIT) == 0) 121 goto bad; 122 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 123 goto bad; 124#if MCLBYTES <= PAGE_SIZE 125 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 126 goto bad; 127#else 128 /* It's OK to call contigmalloc in this context. */ 129 if (m_clalloc(16, M_WAIT) == 0) 130 goto bad; 131#endif 132 splx(s); 133 return; 134bad: 135 panic("mbinit: failed to initialize mbuf subsystem!"); 136} 137 138/* 139 * Allocate at least nmb reference count structs and place them 140 * on the ref cnt free list. 141 * Must be called at splimp. 142 */ 143int 144m_alloc_ref(nmb) 145 u_int nmb; 146{ 147 caddr_t p; 148 u_int nbytes; 149 int i; 150 151 /* 152 * XXX: 153 * We don't cap the amount of memory that can be used 154 * by the reference counters, like we do for mbufs and 155 * mbuf clusters. The reason is that we don't really expect 156 * to have to be allocating too many of these guys with m_alloc_ref(), 157 * and if we are, we're probably not out of the woods anyway, 158 * so leave this way for now. 159 */ 160 161 if (mb_map_full) 162 return (0); 163 164 nbytes = round_page(nmb * sizeof(union mext_refcnt)); 165 if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT)) == NULL) 166 return (0); 167 nmb = nbytes / sizeof(union mext_refcnt); 168 169 for (i = 0; i < nmb; i++) { 170 ((union mext_refcnt *)p)->next_ref = mext_refcnt_free; 171 mext_refcnt_free = (union mext_refcnt *)p; 172 p += sizeof(union mext_refcnt); 173 mbstat.m_refree++; 174 } 175 mbstat.m_refcnt += nmb; 176 177 return (1); 178} 179 180/* 181 * Allocate at least nmb mbufs and place on mbuf free list. 182 * Must be called at splimp. 183 */ 184/* ARGSUSED */ 185int 186m_mballoc(nmb, how) 187 register int nmb; 188 int how; 189{ 190 register caddr_t p; 191 register int i; 192 int nbytes; 193 194 /* 195 * If we've hit the mbuf limit, stop allocating from mb_map, 196 * (or trying to) in order to avoid dipping into the section of 197 * mb_map which we've "reserved" for clusters. 198 */ 199 if ((nmb + mbstat.m_mbufs) > nmbufs) 200 return (0); 201 202 /* 203 * Once we run out of map space, it will be impossible to get 204 * any more (nothing is ever freed back to the map) 205 * -- however you are not dead as m_reclaim might 206 * still be able to free a substantial amount of space. 207 * 208 * XXX Furthermore, we can also work with "recycled" mbufs (when 209 * we're calling with M_WAIT the sleep procedure will be woken 210 * up when an mbuf is freed. See m_mballoc_wait()). 211 */ 212 if (mb_map_full) 213 return (0); 214 215 nbytes = round_page(nmb * MSIZE); 216 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 217 if (p == 0 && how == M_WAIT) { 218 mbstat.m_wait++; 219 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 220 } 221 222 /* 223 * Either the map is now full, or `how' is M_NOWAIT and there 224 * are no pages left. 225 */ 226 if (p == NULL) 227 return (0); 228 229 nmb = nbytes / MSIZE; 230 for (i = 0; i < nmb; i++) { 231 ((struct mbuf *)p)->m_next = mmbfree; 232 mmbfree = (struct mbuf *)p; 233 p += MSIZE; 234 } 235 mbstat.m_mbufs += nmb; 236 mbtypes[MT_FREE] += nmb; 237 return (1); 238} 239 240/* 241 * Once the mb_map has been exhausted and if the call to the allocation macros 242 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely 243 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a 244 * designated (mbuf_wait) time. 245 */ 246struct mbuf * 247m_mballoc_wait(int caller, int type) 248{ 249 struct mbuf *p; 250 int s; 251 252 s = splimp(); 253 m_mballoc_wid++; 254 if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK) 255 m_mballoc_wid--; 256 splx(s); 257 258 /* 259 * Now that we (think) that we've got something, we will redo an 260 * MGET, but avoid getting into another instance of m_mballoc_wait() 261 * XXX: We retry to fetch _even_ if the sleep timed out. This is left 262 * this way, purposely, in the [unlikely] case that an mbuf was 263 * freed but the sleep was not awakened in time. 264 */ 265 p = NULL; 266 switch (caller) { 267 case MGET_C: 268 MGET(p, M_DONTWAIT, type); 269 break; 270 case MGETHDR_C: 271 MGETHDR(p, M_DONTWAIT, type); 272 break; 273 default: 274 panic("m_mballoc_wait: invalid caller (%d)", caller); 275 } 276 277 s = splimp(); 278 if (p != NULL) { /* We waited and got something... */ 279 mbstat.m_wait++; 280 /* Wake up another if we have more free. */ 281 if (mmbfree != NULL) 282 MMBWAKEUP(); 283 } 284 splx(s); 285 return (p); 286} 287 288#if MCLBYTES > PAGE_SIZE 289static int i_want_my_mcl; 290 291static void 292kproc_mclalloc(void) 293{ 294 int status; 295 296 while (1) { 297 tsleep(&i_want_my_mcl, PVM, "mclalloc", 0); 298 299 for (; i_want_my_mcl; i_want_my_mcl--) { 300 if (m_clalloc(1, M_WAIT) == 0) 301 printf("m_clalloc failed even in process context!\n"); 302 } 303 } 304} 305 306static struct proc *mclallocproc; 307static struct kproc_desc mclalloc_kp = { 308 "mclalloc", 309 kproc_mclalloc, 310 &mclallocproc 311}; 312SYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, 313 &mclalloc_kp); 314#endif 315 316/* 317 * Allocate some number of mbuf clusters 318 * and place on cluster free list. 319 * Must be called at splimp. 320 */ 321/* ARGSUSED */ 322int 323m_clalloc(ncl, how) 324 register int ncl; 325 int how; 326{ 327 register caddr_t p; 328 register int i; 329 int npg; 330 331 /* 332 * If we've hit the mcluster number limit, stop allocating from 333 * mb_map, (or trying to) in order to avoid dipping into the section 334 * of mb_map which we've "reserved" for mbufs. 335 */ 336 if ((ncl + mbstat.m_clusters) > nmbclusters) { 337 mbstat.m_drops++; 338 return (0); 339 } 340 341 /* 342 * Once we run out of map space, it will be impossible 343 * to get any more (nothing is ever freed back to the 344 * map). From this point on, we solely rely on freed 345 * mclusters. 346 */ 347 if (mb_map_full) { 348 mbstat.m_drops++; 349 return (0); 350 } 351 352#if MCLBYTES > PAGE_SIZE 353 if (how != M_WAIT) { 354 i_want_my_mcl += ncl; 355 wakeup(&i_want_my_mcl); 356 mbstat.m_wait++; 357 p = 0; 358 } else { 359 p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul, 360 ~0ul, PAGE_SIZE, 0, mb_map); 361 } 362#else 363 npg = ncl; 364 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 365 how != M_WAIT ? M_NOWAIT : M_WAITOK); 366 ncl = ncl * PAGE_SIZE / MCLBYTES; 367#endif 368 /* 369 * Either the map is now full, or `how' is M_NOWAIT and there 370 * are no pages left. 371 */ 372 if (p == NULL) { 373 mbstat.m_drops++; 374 return (0); 375 } 376 377 for (i = 0; i < ncl; i++) { 378 ((union mcluster *)p)->mcl_next = mclfree; 379 mclfree = (union mcluster *)p; 380 p += MCLBYTES; 381 mbstat.m_clfree++; 382 } 383 mbstat.m_clusters += ncl; 384 return (1); 385} 386 387/* 388 * Once the mb_map submap has been exhausted and the allocation is called with 389 * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will 390 * sleep for a designated amount of time (mbuf_wait) or until we're woken up 391 * due to sudden mcluster availability. 392 */ 393caddr_t 394m_clalloc_wait(void) 395{ 396 caddr_t p; 397 int s; 398 399#ifdef __i386__ 400 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */ 401 KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT")); 402#endif 403 404 /* Sleep until something's available or until we expire. */ 405 m_clalloc_wid++; 406 if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK) 407 m_clalloc_wid--; 408 409 /* 410 * Now that we (think) that we've got something, we will redo and 411 * MGET, but avoid getting into another instance of m_clalloc_wait() 412 */ 413 p = NULL; 414 _MCLALLOC(p, M_DONTWAIT); 415 416 s = splimp(); 417 if (p != NULL) { /* We waited and got something... */ 418 mbstat.m_wait++; 419 /* Wake up another if we have more free. */ 420 if (mclfree != NULL) 421 MCLWAKEUP(); 422 } 423 424 splx(s); 425 return (p); 426} 427 428/* 429 * When MGET fails, ask protocols to free space when short of memory, 430 * then re-attempt to allocate an mbuf. 431 */ 432struct mbuf * 433m_retry(i, t) 434 int i, t; 435{ 436 register struct mbuf *m; 437 438 /* 439 * Must only do the reclaim if not in an interrupt context. 440 */ 441 if (i == M_WAIT) { 442#ifdef __i386__ 443 KASSERT(intr_nesting_level == 0, 444 ("MBALLOC: CANNOT WAIT IN INTERRUPT")); 445#endif 446 m_reclaim(); 447 } 448 449 /* 450 * Both m_mballoc_wait and m_retry must be nulled because 451 * when the MGET macro is run from here, we deffinately do _not_ 452 * want to enter an instance of m_mballoc_wait() or m_retry() (again!) 453 */ 454#define m_mballoc_wait(caller,type) (struct mbuf *)0 455#define m_retry(i, t) (struct mbuf *)0 456 MGET(m, i, t); 457#undef m_retry 458#undef m_mballoc_wait 459 460 if (m != NULL) 461 mbstat.m_wait++; 462 else 463 mbstat.m_drops++; 464 465 return (m); 466} 467 468/* 469 * As above; retry an MGETHDR. 470 */ 471struct mbuf * 472m_retryhdr(i, t) 473 int i, t; 474{ 475 register struct mbuf *m; 476 477 /* 478 * Must only do the reclaim if not in an interrupt context. 479 */ 480 if (i == M_WAIT) { 481#ifdef __i386__ 482 KASSERT(intr_nesting_level == 0, 483 ("MBALLOC: CANNOT WAIT IN INTERRUPT")); 484#endif 485 m_reclaim(); 486 } 487 488#define m_mballoc_wait(caller,type) (struct mbuf *)0 489#define m_retryhdr(i, t) (struct mbuf *)0 490 MGETHDR(m, i, t); 491#undef m_retryhdr 492#undef m_mballoc_wait 493 494 if (m != NULL) 495 mbstat.m_wait++; 496 else 497 mbstat.m_drops++; 498 499 return (m); 500} 501 502static void 503m_reclaim() 504{ 505 register struct domain *dp; 506 register struct protosw *pr; 507 int s = splimp(); 508 509 for (dp = domains; dp; dp = dp->dom_next) 510 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 511 if (pr->pr_drain) 512 (*pr->pr_drain)(); 513 splx(s); 514 mbstat.m_drain++; 515} 516 517/* 518 * Space allocation routines. 519 * These are also available as macros 520 * for critical paths. 521 */ 522struct mbuf * 523m_get(how, type) 524 int how, type; 525{ 526 register struct mbuf *m; 527 528 MGET(m, how, type); 529 return (m); 530} 531 532struct mbuf * 533m_gethdr(how, type) 534 int how, type; 535{ 536 register struct mbuf *m; 537 538 MGETHDR(m, how, type); 539 return (m); 540} 541 542struct mbuf * 543m_getclr(how, type) 544 int how, type; 545{ 546 register struct mbuf *m; 547 548 MGET(m, how, type); 549 if (m == 0) 550 return (0); 551 bzero(mtod(m, caddr_t), MLEN); 552 return (m); 553} 554 555struct mbuf * 556m_free(m) 557 struct mbuf *m; 558{ 559 register struct mbuf *n; 560 561 MFREE(m, n); 562 return (n); 563} 564 565void 566m_freem(m) 567 register struct mbuf *m; 568{ 569 register struct mbuf *n; 570 571 if (m == NULL) 572 return; 573 do { 574 /* 575 * we do need to check non-first mbuf, since some of existing 576 * code does not call M_PREPEND properly. 577 * (example: call to bpf_mtap from drivers) 578 */ 579 if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 580 m_freem(m->m_pkthdr.aux); 581 m->m_pkthdr.aux = NULL; 582 } 583 MFREE(m, n); 584 m = n; 585 } while (m); 586} 587 588/* 589 * Mbuffer utility routines. 590 */ 591 592/* 593 * Lesser-used path for M_PREPEND: 594 * allocate new mbuf to prepend to chain, 595 * copy junk along. 596 */ 597struct mbuf * 598m_prepend(m, len, how) 599 register struct mbuf *m; 600 int len, how; 601{ 602 struct mbuf *mn; 603 604 MGET(mn, how, m->m_type); 605 if (mn == (struct mbuf *)NULL) { 606 m_freem(m); 607 return ((struct mbuf *)NULL); 608 } 609 if (m->m_flags & M_PKTHDR) { 610 M_COPY_PKTHDR(mn, m); 611 m->m_flags &= ~M_PKTHDR; 612 } 613 mn->m_next = m; 614 m = mn; 615 if (len < MHLEN) 616 MH_ALIGN(m, len); 617 m->m_len = len; 618 return (m); 619} 620 621/* 622 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 623 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 624 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 625 * Note that the copy is read-only, because clusters are not copied, 626 * only their reference counts are incremented. 627 */ 628#define MCFail (mbstat.m_mcfail) 629 630struct mbuf * 631m_copym(m, off0, len, wait) 632 register struct mbuf *m; 633 int off0, wait; 634 register int len; 635{ 636 register struct mbuf *n, **np; 637 register int off = off0; 638 struct mbuf *top; 639 int copyhdr = 0; 640 641 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 642 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 643 if (off == 0 && m->m_flags & M_PKTHDR) 644 copyhdr = 1; 645 while (off > 0) { 646 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 647 if (off < m->m_len) 648 break; 649 off -= m->m_len; 650 m = m->m_next; 651 } 652 np = ⊤ 653 top = 0; 654 while (len > 0) { 655 if (m == 0) { 656 KASSERT(len == M_COPYALL, 657 ("m_copym, length > size of mbuf chain")); 658 break; 659 } 660 MGET(n, wait, m->m_type); 661 *np = n; 662 if (n == 0) 663 goto nospace; 664 if (copyhdr) { 665 M_COPY_PKTHDR(n, m); 666 if (len == M_COPYALL) 667 n->m_pkthdr.len -= off0; 668 else 669 n->m_pkthdr.len = len; 670 copyhdr = 0; 671 } 672 n->m_len = min(len, m->m_len - off); 673 if (m->m_flags & M_EXT) { 674 n->m_data = m->m_data + off; 675 n->m_ext = m->m_ext; 676 n->m_flags |= M_EXT; 677 MEXT_ADD_REF(m); 678 } else 679 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 680 (unsigned)n->m_len); 681 if (len != M_COPYALL) 682 len -= n->m_len; 683 off = 0; 684 m = m->m_next; 685 np = &n->m_next; 686 } 687 if (top == 0) 688 MCFail++; 689 return (top); 690nospace: 691 m_freem(top); 692 MCFail++; 693 return (0); 694} 695 696/* 697 * Copy an entire packet, including header (which must be present). 698 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 699 * Note that the copy is read-only, because clusters are not copied, 700 * only their reference counts are incremented. 701 */ 702struct mbuf * 703m_copypacket(m, how) 704 struct mbuf *m; 705 int how; 706{ 707 struct mbuf *top, *n, *o; 708 709 MGET(n, how, m->m_type); 710 top = n; 711 if (!n) 712 goto nospace; 713 714 M_COPY_PKTHDR(n, m); 715 n->m_len = m->m_len; 716 if (m->m_flags & M_EXT) { 717 n->m_data = m->m_data; 718 n->m_ext = m->m_ext; 719 n->m_flags |= M_EXT; 720 MEXT_ADD_REF(m); 721 } else { 722 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 723 } 724 725 m = m->m_next; 726 while (m) { 727 MGET(o, how, m->m_type); 728 if (!o) 729 goto nospace; 730 731 n->m_next = o; 732 n = n->m_next; 733 734 n->m_len = m->m_len; 735 if (m->m_flags & M_EXT) { 736 n->m_data = m->m_data; 737 n->m_ext = m->m_ext; 738 n->m_flags |= M_EXT; 739 MEXT_ADD_REF(m); 740 } else { 741 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 742 } 743 744 m = m->m_next; 745 } 746 return top; 747nospace: 748 m_freem(top); 749 MCFail++; 750 return 0; 751} 752 753/* 754 * Copy data from an mbuf chain starting "off" bytes from the beginning, 755 * continuing for "len" bytes, into the indicated buffer. 756 */ 757void 758m_copydata(m, off, len, cp) 759 register struct mbuf *m; 760 register int off; 761 register int len; 762 caddr_t cp; 763{ 764 register unsigned count; 765 766 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 767 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 768 while (off > 0) { 769 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 770 if (off < m->m_len) 771 break; 772 off -= m->m_len; 773 m = m->m_next; 774 } 775 while (len > 0) { 776 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 777 count = min(m->m_len - off, len); 778 bcopy(mtod(m, caddr_t) + off, cp, count); 779 len -= count; 780 cp += count; 781 off = 0; 782 m = m->m_next; 783 } 784} 785 786/* 787 * Copy a packet header mbuf chain into a completely new chain, including 788 * copying any mbuf clusters. Use this instead of m_copypacket() when 789 * you need a writable copy of an mbuf chain. 790 */ 791struct mbuf * 792m_dup(m, how) 793 struct mbuf *m; 794 int how; 795{ 796 struct mbuf **p, *top = NULL; 797 int remain, moff, nsize; 798 799 /* Sanity check */ 800 if (m == NULL) 801 return (0); 802 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 803 804 /* While there's more data, get a new mbuf, tack it on, and fill it */ 805 remain = m->m_pkthdr.len; 806 moff = 0; 807 p = ⊤ 808 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 809 struct mbuf *n; 810 811 /* Get the next new mbuf */ 812 MGET(n, how, m->m_type); 813 if (n == NULL) 814 goto nospace; 815 if (top == NULL) { /* first one, must be PKTHDR */ 816 M_COPY_PKTHDR(n, m); 817 nsize = MHLEN; 818 } else /* not the first one */ 819 nsize = MLEN; 820 if (remain >= MINCLSIZE) { 821 MCLGET(n, how); 822 if ((n->m_flags & M_EXT) == 0) { 823 (void)m_free(n); 824 goto nospace; 825 } 826 nsize = MCLBYTES; 827 } 828 n->m_len = 0; 829 830 /* Link it into the new chain */ 831 *p = n; 832 p = &n->m_next; 833 834 /* Copy data from original mbuf(s) into new mbuf */ 835 while (n->m_len < nsize && m != NULL) { 836 int chunk = min(nsize - n->m_len, m->m_len - moff); 837 838 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 839 moff += chunk; 840 n->m_len += chunk; 841 remain -= chunk; 842 if (moff == m->m_len) { 843 m = m->m_next; 844 moff = 0; 845 } 846 } 847 848 /* Check correct total mbuf length */ 849 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 850 ("%s: bogus m_pkthdr.len", __FUNCTION__)); 851 } 852 return (top); 853 854nospace: 855 m_freem(top); 856 MCFail++; 857 return (0); 858} 859 860/* 861 * Concatenate mbuf chain n to m. 862 * Both chains must be of the same type (e.g. MT_DATA). 863 * Any m_pkthdr is not updated. 864 */ 865void 866m_cat(m, n) 867 register struct mbuf *m, *n; 868{ 869 while (m->m_next) 870 m = m->m_next; 871 while (n) { 872 if (m->m_flags & M_EXT || 873 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 874 /* just join the two chains */ 875 m->m_next = n; 876 return; 877 } 878 /* splat the data from one into the other */ 879 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 880 (u_int)n->m_len); 881 m->m_len += n->m_len; 882 n = m_free(n); 883 } 884} 885 886void 887m_adj(mp, req_len) 888 struct mbuf *mp; 889 int req_len; 890{ 891 register int len = req_len; 892 register struct mbuf *m; 893 register int count; 894 895 if ((m = mp) == NULL) 896 return; 897 if (len >= 0) { 898 /* 899 * Trim from head. 900 */ 901 while (m != NULL && len > 0) { 902 if (m->m_len <= len) { 903 len -= m->m_len; 904 m->m_len = 0; 905 m = m->m_next; 906 } else { 907 m->m_len -= len; 908 m->m_data += len; 909 len = 0; 910 } 911 } 912 m = mp; 913 if (mp->m_flags & M_PKTHDR) 914 m->m_pkthdr.len -= (req_len - len); 915 } else { 916 /* 917 * Trim from tail. Scan the mbuf chain, 918 * calculating its length and finding the last mbuf. 919 * If the adjustment only affects this mbuf, then just 920 * adjust and return. Otherwise, rescan and truncate 921 * after the remaining size. 922 */ 923 len = -len; 924 count = 0; 925 for (;;) { 926 count += m->m_len; 927 if (m->m_next == (struct mbuf *)0) 928 break; 929 m = m->m_next; 930 } 931 if (m->m_len >= len) { 932 m->m_len -= len; 933 if (mp->m_flags & M_PKTHDR) 934 mp->m_pkthdr.len -= len; 935 return; 936 } 937 count -= len; 938 if (count < 0) 939 count = 0; 940 /* 941 * Correct length for chain is "count". 942 * Find the mbuf with last data, adjust its length, 943 * and toss data from remaining mbufs on chain. 944 */ 945 m = mp; 946 if (m->m_flags & M_PKTHDR) 947 m->m_pkthdr.len = count; 948 for (; m; m = m->m_next) { 949 if (m->m_len >= count) { 950 m->m_len = count; 951 break; 952 } 953 count -= m->m_len; 954 } 955 while (m->m_next) 956 (m = m->m_next) ->m_len = 0; 957 } 958} 959 960/* 961 * Rearange an mbuf chain so that len bytes are contiguous 962 * and in the data area of an mbuf (so that mtod and dtom 963 * will work for a structure of size len). Returns the resulting 964 * mbuf chain on success, frees it and returns null on failure. 965 * If there is room, it will add up to max_protohdr-len extra bytes to the 966 * contiguous region in an attempt to avoid being called next time. 967 */ 968#define MPFail (mbstat.m_mpfail) 969 970struct mbuf * 971m_pullup(n, len) 972 register struct mbuf *n; 973 int len; 974{ 975 register struct mbuf *m; 976 register int count; 977 int space; 978 979 /* 980 * If first mbuf has no cluster, and has room for len bytes 981 * without shifting current data, pullup into it, 982 * otherwise allocate a new mbuf to prepend to the chain. 983 */ 984 if ((n->m_flags & M_EXT) == 0 && 985 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 986 if (n->m_len >= len) 987 return (n); 988 m = n; 989 n = n->m_next; 990 len -= m->m_len; 991 } else { 992 if (len > MHLEN) 993 goto bad; 994 MGET(m, M_DONTWAIT, n->m_type); 995 if (m == 0) 996 goto bad; 997 m->m_len = 0; 998 if (n->m_flags & M_PKTHDR) { 999 M_COPY_PKTHDR(m, n); 1000 n->m_flags &= ~M_PKTHDR; 1001 } 1002 } 1003 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1004 do { 1005 count = min(min(max(len, max_protohdr), space), n->m_len); 1006 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1007 (unsigned)count); 1008 len -= count; 1009 m->m_len += count; 1010 n->m_len -= count; 1011 space -= count; 1012 if (n->m_len) 1013 n->m_data += count; 1014 else 1015 n = m_free(n); 1016 } while (len > 0 && n); 1017 if (len > 0) { 1018 (void) m_free(m); 1019 goto bad; 1020 } 1021 m->m_next = n; 1022 return (m); 1023bad: 1024 m_freem(n); 1025 MPFail++; 1026 return (0); 1027} 1028 1029/* 1030 * Partition an mbuf chain in two pieces, returning the tail -- 1031 * all but the first len0 bytes. In case of failure, it returns NULL and 1032 * attempts to restore the chain to its original state. 1033 */ 1034struct mbuf * 1035m_split(m0, len0, wait) 1036 register struct mbuf *m0; 1037 int len0, wait; 1038{ 1039 register struct mbuf *m, *n; 1040 unsigned len = len0, remain; 1041 1042 for (m = m0; m && len > m->m_len; m = m->m_next) 1043 len -= m->m_len; 1044 if (m == 0) 1045 return (0); 1046 remain = m->m_len - len; 1047 if (m0->m_flags & M_PKTHDR) { 1048 MGETHDR(n, wait, m0->m_type); 1049 if (n == 0) 1050 return (0); 1051 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1052 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1053 m0->m_pkthdr.len = len0; 1054 if (m->m_flags & M_EXT) 1055 goto extpacket; 1056 if (remain > MHLEN) { 1057 /* m can't be the lead packet */ 1058 MH_ALIGN(n, 0); 1059 n->m_next = m_split(m, len, wait); 1060 if (n->m_next == 0) { 1061 (void) m_free(n); 1062 return (0); 1063 } else 1064 return (n); 1065 } else 1066 MH_ALIGN(n, remain); 1067 } else if (remain == 0) { 1068 n = m->m_next; 1069 m->m_next = 0; 1070 return (n); 1071 } else { 1072 MGET(n, wait, m->m_type); 1073 if (n == 0) 1074 return (0); 1075 M_ALIGN(n, remain); 1076 } 1077extpacket: 1078 if (m->m_flags & M_EXT) { 1079 n->m_flags |= M_EXT; 1080 n->m_ext = m->m_ext; 1081 MEXT_ADD_REF(m); 1082 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1083 n->m_data = m->m_data + len; 1084 } else { 1085 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1086 } 1087 n->m_len = remain; 1088 m->m_len = len; 1089 n->m_next = m->m_next; 1090 m->m_next = 0; 1091 return (n); 1092} 1093/* 1094 * Routine to copy from device local memory into mbufs. 1095 */ 1096struct mbuf * 1097m_devget(buf, totlen, off0, ifp, copy) 1098 char *buf; 1099 int totlen, off0; 1100 struct ifnet *ifp; 1101 void (*copy) __P((char *from, caddr_t to, u_int len)); 1102{ 1103 register struct mbuf *m; 1104 struct mbuf *top = 0, **mp = ⊤ 1105 register int off = off0, len; 1106 register char *cp; 1107 char *epkt; 1108 1109 cp = buf; 1110 epkt = cp + totlen; 1111 if (off) { 1112 cp += off + 2 * sizeof(u_short); 1113 totlen -= 2 * sizeof(u_short); 1114 } 1115 MGETHDR(m, M_DONTWAIT, MT_DATA); 1116 if (m == 0) 1117 return (0); 1118 m->m_pkthdr.rcvif = ifp; 1119 m->m_pkthdr.len = totlen; 1120 m->m_len = MHLEN; 1121 1122 while (totlen > 0) { 1123 if (top) { 1124 MGET(m, M_DONTWAIT, MT_DATA); 1125 if (m == 0) { 1126 m_freem(top); 1127 return (0); 1128 } 1129 m->m_len = MLEN; 1130 } 1131 len = min(totlen, epkt - cp); 1132 if (len >= MINCLSIZE) { 1133 MCLGET(m, M_DONTWAIT); 1134 if (m->m_flags & M_EXT) 1135 m->m_len = len = min(len, MCLBYTES); 1136 else 1137 len = m->m_len; 1138 } else { 1139 /* 1140 * Place initial small packet/header at end of mbuf. 1141 */ 1142 if (len < m->m_len) { 1143 if (top == 0 && len + max_linkhdr <= m->m_len) 1144 m->m_data += max_linkhdr; 1145 m->m_len = len; 1146 } else 1147 len = m->m_len; 1148 } 1149 if (copy) 1150 copy(cp, mtod(m, caddr_t), (unsigned)len); 1151 else 1152 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1153 cp += len; 1154 *mp = m; 1155 mp = &m->m_next; 1156 totlen -= len; 1157 if (cp == epkt) 1158 cp = buf; 1159 } 1160 return (top); 1161} 1162 1163/* 1164 * Copy data from a buffer back into the indicated mbuf chain, 1165 * starting "off" bytes from the beginning, extending the mbuf 1166 * chain if necessary. 1167 */ 1168void 1169m_copyback(m0, off, len, cp) 1170 struct mbuf *m0; 1171 register int off; 1172 register int len; 1173 caddr_t cp; 1174{ 1175 register int mlen; 1176 register struct mbuf *m = m0, *n; 1177 int totlen = 0; 1178 1179 if (m0 == 0) 1180 return; 1181 while (off > (mlen = m->m_len)) { 1182 off -= mlen; 1183 totlen += mlen; 1184 if (m->m_next == 0) { 1185 n = m_getclr(M_DONTWAIT, m->m_type); 1186 if (n == 0) 1187 goto out; 1188 n->m_len = min(MLEN, len + off); 1189 m->m_next = n; 1190 } 1191 m = m->m_next; 1192 } 1193 while (len > 0) { 1194 mlen = min (m->m_len - off, len); 1195 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1196 cp += mlen; 1197 len -= mlen; 1198 mlen += off; 1199 off = 0; 1200 totlen += mlen; 1201 if (len == 0) 1202 break; 1203 if (m->m_next == 0) { 1204 n = m_get(M_DONTWAIT, m->m_type); 1205 if (n == 0) 1206 break; 1207 n->m_len = min(MLEN, len); 1208 m->m_next = n; 1209 } 1210 m = m->m_next; 1211 } 1212out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1213 m->m_pkthdr.len = totlen; 1214} 1215 1216void 1217m_print(const struct mbuf *m) 1218{ 1219 int len; 1220 const struct mbuf *m2; 1221 1222 len = m->m_pkthdr.len; 1223 m2 = m; 1224 while (len) { 1225 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1226 len -= m2->m_len; 1227 m2 = m2->m_next; 1228 } 1229 return; 1230} 1231