uipc_mbuf.c revision 254520
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 254520 2013-08-19 11:16:53Z andre $"); 34 35#include "opt_param.h" 36#include "opt_mbuf_stress_test.h" 37#include "opt_mbuf_profiling.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/kernel.h> 42#include <sys/limits.h> 43#include <sys/lock.h> 44#include <sys/malloc.h> 45#include <sys/mbuf.h> 46#include <sys/sysctl.h> 47#include <sys/domain.h> 48#include <sys/protosw.h> 49#include <sys/uio.h> 50 51int max_linkhdr; 52int max_protohdr; 53int max_hdr; 54int max_datalen; 55#ifdef MBUF_STRESS_TEST 56int m_defragpackets; 57int m_defragbytes; 58int m_defraguseless; 59int m_defragfailure; 60int m_defragrandomfailures; 61#endif 62 63/* 64 * sysctl(8) exported objects 65 */ 66SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD, 67 &max_linkhdr, 0, "Size of largest link layer header"); 68SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD, 69 &max_protohdr, 0, "Size of largest protocol layer header"); 70SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD, 71 &max_hdr, 0, "Size of largest link plus protocol header"); 72SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD, 73 &max_datalen, 0, "Minimum space left in mbuf after max_hdr"); 74#ifdef MBUF_STRESS_TEST 75SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 76 &m_defragpackets, 0, ""); 77SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 78 &m_defragbytes, 0, ""); 79SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 80 &m_defraguseless, 0, ""); 81SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 82 &m_defragfailure, 0, ""); 83SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 84 &m_defragrandomfailures, 0, ""); 85#endif 86 87/* 88 * m_get2() allocates minimum mbuf that would fit "size" argument. 89 */ 90struct mbuf * 91m_get2(int size, int how, short type, int flags) 92{ 93 struct mb_args args; 94 struct mbuf *m, *n; 95 96 args.flags = flags; 97 args.type = type; 98 99 if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0)) 100 return (uma_zalloc_arg(zone_mbuf, &args, how)); 101 if (size <= MCLBYTES) 102 return (uma_zalloc_arg(zone_pack, &args, how)); 103 104 if (size > MJUMPAGESIZE) 105 return (NULL); 106 107 m = uma_zalloc_arg(zone_mbuf, &args, how); 108 if (m == NULL) 109 return (NULL); 110 111 n = uma_zalloc_arg(zone_jumbop, m, how); 112 if (n == NULL) { 113 uma_zfree(zone_mbuf, m); 114 return (NULL); 115 } 116 117 return (m); 118} 119 120/* 121 * m_getjcl() returns an mbuf with a cluster of the specified size attached. 122 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 123 */ 124struct mbuf * 125m_getjcl(int how, short type, int flags, int size) 126{ 127 struct mb_args args; 128 struct mbuf *m, *n; 129 uma_zone_t zone; 130 131 if (size == MCLBYTES) 132 return m_getcl(how, type, flags); 133 134 args.flags = flags; 135 args.type = type; 136 137 m = uma_zalloc_arg(zone_mbuf, &args, how); 138 if (m == NULL) 139 return (NULL); 140 141 zone = m_getzone(size); 142 n = uma_zalloc_arg(zone, m, how); 143 if (n == NULL) { 144 uma_zfree(zone_mbuf, m); 145 return (NULL); 146 } 147 return (m); 148} 149 150/* 151 * Allocate a given length worth of mbufs and/or clusters (whatever fits 152 * best) and return a pointer to the top of the allocated chain. If an 153 * existing mbuf chain is provided, then we will append the new chain 154 * to the existing one but still return the top of the newly allocated 155 * chain. 156 */ 157struct mbuf * 158m_getm2(struct mbuf *m, int len, int how, short type, int flags) 159{ 160 struct mbuf *mb, *nm = NULL, *mtail = NULL; 161 162 KASSERT(len >= 0, ("%s: len is < 0", __func__)); 163 164 /* Validate flags. */ 165 flags &= (M_PKTHDR | M_EOR); 166 167 /* Packet header mbuf must be first in chain. */ 168 if ((flags & M_PKTHDR) && m != NULL) 169 flags &= ~M_PKTHDR; 170 171 /* Loop and append maximum sized mbufs to the chain tail. */ 172 while (len > 0) { 173 if (len > MCLBYTES) 174 mb = m_getjcl(how, type, (flags & M_PKTHDR), 175 MJUMPAGESIZE); 176 else if (len >= MINCLSIZE) 177 mb = m_getcl(how, type, (flags & M_PKTHDR)); 178 else if (flags & M_PKTHDR) 179 mb = m_gethdr(how, type); 180 else 181 mb = m_get(how, type); 182 183 /* Fail the whole operation if one mbuf can't be allocated. */ 184 if (mb == NULL) { 185 if (nm != NULL) 186 m_freem(nm); 187 return (NULL); 188 } 189 190 /* Book keeping. */ 191 len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size : 192 ((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN); 193 if (mtail != NULL) 194 mtail->m_next = mb; 195 else 196 nm = mb; 197 mtail = mb; 198 flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ 199 } 200 if (flags & M_EOR) 201 mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ 202 203 /* If mbuf was supplied, append new chain to the end of it. */ 204 if (m != NULL) { 205 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next) 206 ; 207 mtail->m_next = nm; 208 mtail->m_flags &= ~M_EOR; 209 } else 210 m = nm; 211 212 return (m); 213} 214 215/* 216 * Free an entire chain of mbufs and associated external buffers, if 217 * applicable. 218 */ 219void 220m_freem(struct mbuf *mb) 221{ 222 223 while (mb != NULL) 224 mb = m_free(mb); 225} 226 227/*- 228 * Configure a provided mbuf to refer to the provided external storage 229 * buffer and setup a reference count for said buffer. If the setting 230 * up of the reference count fails, the M_EXT bit will not be set. If 231 * successfull, the M_EXT bit is set in the mbuf's flags. 232 * 233 * Arguments: 234 * mb The existing mbuf to which to attach the provided buffer. 235 * buf The address of the provided external storage buffer. 236 * size The size of the provided buffer. 237 * freef A pointer to a routine that is responsible for freeing the 238 * provided external storage buffer. 239 * args A pointer to an argument structure (of any type) to be passed 240 * to the provided freef routine (may be NULL). 241 * flags Any other flags to be passed to the provided mbuf. 242 * type The type that the external storage buffer should be 243 * labeled with. 244 * 245 * Returns: 246 * Nothing. 247 */ 248int 249m_extadd(struct mbuf *mb, caddr_t buf, u_int size, 250 void (*freef)(void *, void *), void *arg1, void *arg2, int flags, int type, 251 int wait) 252{ 253 KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); 254 255 if (type != EXT_EXTREF) 256 mb->m_ext.ref_cnt = uma_zalloc(zone_ext_refcnt, wait); 257 258 if (mb->m_ext.ref_cnt == NULL) 259 return (ENOMEM); 260 261 *(mb->m_ext.ref_cnt) = 1; 262 mb->m_flags |= (M_EXT | flags); 263 mb->m_ext.ext_buf = buf; 264 mb->m_data = mb->m_ext.ext_buf; 265 mb->m_ext.ext_size = size; 266 mb->m_ext.ext_free = freef; 267 mb->m_ext.ext_arg1 = arg1; 268 mb->m_ext.ext_arg2 = arg2; 269 mb->m_ext.ext_type = type; 270 271 return (0); 272} 273 274/* 275 * Non-directly-exported function to clean up after mbufs with M_EXT 276 * storage attached to them if the reference count hits 1. 277 */ 278void 279mb_free_ext(struct mbuf *m) 280{ 281 282 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 283 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__)); 284 285 /* Free attached storage if this mbuf is the only reference to it. */ 286 if (*(m->m_ext.ref_cnt) == 1 || 287 atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) { 288 switch (m->m_ext.ext_type) { 289 case EXT_PACKET: /* The packet zone is special. */ 290 if (*(m->m_ext.ref_cnt) == 0) 291 *(m->m_ext.ref_cnt) = 1; 292 uma_zfree(zone_pack, m); 293 return; /* Job done. */ 294 case EXT_CLUSTER: 295 uma_zfree(zone_clust, m->m_ext.ext_buf); 296 break; 297 case EXT_JUMBOP: 298 uma_zfree(zone_jumbop, m->m_ext.ext_buf); 299 break; 300 case EXT_JUMBO9: 301 uma_zfree(zone_jumbo9, m->m_ext.ext_buf); 302 break; 303 case EXT_JUMBO16: 304 uma_zfree(zone_jumbo16, m->m_ext.ext_buf); 305 break; 306 case EXT_SFBUF: 307 case EXT_NET_DRV: 308 case EXT_MOD_TYPE: 309 case EXT_DISPOSABLE: 310 *(m->m_ext.ref_cnt) = 0; 311 uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *, 312 m->m_ext.ref_cnt)); 313 /* FALLTHROUGH */ 314 case EXT_EXTREF: 315 KASSERT(m->m_ext.ext_free != NULL, 316 ("%s: ext_free not set", __func__)); 317 (*(m->m_ext.ext_free))(m->m_ext.ext_arg1, 318 m->m_ext.ext_arg2); 319 break; 320 default: 321 KASSERT(m->m_ext.ext_type == 0, 322 ("%s: unknown ext_type", __func__)); 323 } 324 } 325 326 /* 327 * Free this mbuf back to the mbuf zone with all m_ext 328 * information purged. 329 */ 330 m->m_ext.ext_buf = NULL; 331 m->m_ext.ext_free = NULL; 332 m->m_ext.ext_arg1 = NULL; 333 m->m_ext.ext_arg2 = NULL; 334 m->m_ext.ref_cnt = NULL; 335 m->m_ext.ext_size = 0; 336 m->m_ext.ext_type = 0; 337 m->m_flags &= ~M_EXT; 338 uma_zfree(zone_mbuf, m); 339} 340 341/* 342 * Attach the cluster from *m to *n, set up m_ext in *n 343 * and bump the refcount of the cluster. 344 */ 345static void 346mb_dupcl(struct mbuf *n, struct mbuf *m) 347{ 348 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 349 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__)); 350 KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); 351 352 if (*(m->m_ext.ref_cnt) == 1) 353 *(m->m_ext.ref_cnt) += 1; 354 else 355 atomic_add_int(m->m_ext.ref_cnt, 1); 356 n->m_ext.ext_buf = m->m_ext.ext_buf; 357 n->m_ext.ext_free = m->m_ext.ext_free; 358 n->m_ext.ext_arg1 = m->m_ext.ext_arg1; 359 n->m_ext.ext_arg2 = m->m_ext.ext_arg2; 360 n->m_ext.ext_size = m->m_ext.ext_size; 361 n->m_ext.ref_cnt = m->m_ext.ref_cnt; 362 n->m_ext.ext_type = m->m_ext.ext_type; 363 n->m_flags |= M_EXT; 364 n->m_flags |= m->m_flags & M_RDONLY; 365} 366 367/* 368 * Clean up mbuf (chain) from any tags and packet headers. 369 * If "all" is set then the first mbuf in the chain will be 370 * cleaned too. 371 */ 372void 373m_demote(struct mbuf *m0, int all) 374{ 375 struct mbuf *m; 376 377 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) { 378 if (m->m_flags & M_PKTHDR) { 379 m_tag_delete_chain(m, NULL); 380 m->m_flags &= ~M_PKTHDR; 381 bzero(&m->m_pkthdr, sizeof(struct pkthdr)); 382 } 383 if (m != m0 && m->m_nextpkt != NULL) { 384 KASSERT(m->m_nextpkt == NULL, 385 ("%s: m_nextpkt not NULL", __func__)); 386 m_freem(m->m_nextpkt); 387 m->m_nextpkt = NULL; 388 } 389 m->m_flags = m->m_flags & (M_EXT|M_RDONLY); 390 } 391} 392 393/* 394 * Sanity checks on mbuf (chain) for use in KASSERT() and general 395 * debugging. 396 * Returns 0 or panics when bad and 1 on all tests passed. 397 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they 398 * blow up later. 399 */ 400int 401m_sanity(struct mbuf *m0, int sanitize) 402{ 403 struct mbuf *m; 404 caddr_t a, b; 405 int pktlen = 0; 406 407#ifdef INVARIANTS 408#define M_SANITY_ACTION(s) panic("mbuf %p: " s, m) 409#else 410#define M_SANITY_ACTION(s) printf("mbuf %p: " s, m) 411#endif 412 413 for (m = m0; m != NULL; m = m->m_next) { 414 /* 415 * Basic pointer checks. If any of these fails then some 416 * unrelated kernel memory before or after us is trashed. 417 * No way to recover from that. 418 */ 419 a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf : 420 ((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) : 421 (caddr_t)(&m->m_dat)) ); 422 b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size : 423 ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN))); 424 if ((caddr_t)m->m_data < a) 425 M_SANITY_ACTION("m_data outside mbuf data range left"); 426 if ((caddr_t)m->m_data > b) 427 M_SANITY_ACTION("m_data outside mbuf data range right"); 428 if ((caddr_t)m->m_data + m->m_len > b) 429 M_SANITY_ACTION("m_data + m_len exeeds mbuf space"); 430 if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.header) { 431 if ((caddr_t)m->m_pkthdr.header < a || 432 (caddr_t)m->m_pkthdr.header > b) 433 M_SANITY_ACTION("m_pkthdr.header outside mbuf data range"); 434 } 435 436 /* m->m_nextpkt may only be set on first mbuf in chain. */ 437 if (m != m0 && m->m_nextpkt != NULL) { 438 if (sanitize) { 439 m_freem(m->m_nextpkt); 440 m->m_nextpkt = (struct mbuf *)0xDEADC0DE; 441 } else 442 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf"); 443 } 444 445 /* packet length (not mbuf length!) calculation */ 446 if (m0->m_flags & M_PKTHDR) 447 pktlen += m->m_len; 448 449 /* m_tags may only be attached to first mbuf in chain. */ 450 if (m != m0 && m->m_flags & M_PKTHDR && 451 !SLIST_EMPTY(&m->m_pkthdr.tags)) { 452 if (sanitize) { 453 m_tag_delete_chain(m, NULL); 454 /* put in 0xDEADC0DE perhaps? */ 455 } else 456 M_SANITY_ACTION("m_tags on in-chain mbuf"); 457 } 458 459 /* M_PKTHDR may only be set on first mbuf in chain */ 460 if (m != m0 && m->m_flags & M_PKTHDR) { 461 if (sanitize) { 462 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 463 m->m_flags &= ~M_PKTHDR; 464 /* put in 0xDEADCODE and leave hdr flag in */ 465 } else 466 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf"); 467 } 468 } 469 m = m0; 470 if (pktlen && pktlen != m->m_pkthdr.len) { 471 if (sanitize) 472 m->m_pkthdr.len = 0; 473 else 474 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length"); 475 } 476 return 1; 477 478#undef M_SANITY_ACTION 479} 480 481 482/* 483 * "Move" mbuf pkthdr from "from" to "to". 484 * "from" must have M_PKTHDR set, and "to" must be empty. 485 */ 486void 487m_move_pkthdr(struct mbuf *to, struct mbuf *from) 488{ 489 490#if 0 491 /* see below for why these are not enabled */ 492 M_ASSERTPKTHDR(to); 493 /* Note: with MAC, this may not be a good assertion. */ 494 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), 495 ("m_move_pkthdr: to has tags")); 496#endif 497#ifdef MAC 498 /* 499 * XXXMAC: It could be this should also occur for non-MAC? 500 */ 501 if (to->m_flags & M_PKTHDR) 502 m_tag_delete_chain(to, NULL); 503#endif 504 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 505 if ((to->m_flags & M_EXT) == 0) 506 to->m_data = to->m_pktdat; 507 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 508 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 509 from->m_flags &= ~M_PKTHDR; 510} 511 512/* 513 * Duplicate "from"'s mbuf pkthdr in "to". 514 * "from" must have M_PKTHDR set, and "to" must be empty. 515 * In particular, this does a deep copy of the packet tags. 516 */ 517int 518m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how) 519{ 520 521#if 0 522 /* 523 * The mbuf allocator only initializes the pkthdr 524 * when the mbuf is allocated with m_gethdr(). Many users 525 * (e.g. m_copy*, m_prepend) use m_get() and then 526 * smash the pkthdr as needed causing these 527 * assertions to trip. For now just disable them. 528 */ 529 M_ASSERTPKTHDR(to); 530 /* Note: with MAC, this may not be a good assertion. */ 531 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags")); 532#endif 533 MBUF_CHECKSLEEP(how); 534#ifdef MAC 535 if (to->m_flags & M_PKTHDR) 536 m_tag_delete_chain(to, NULL); 537#endif 538 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 539 if ((to->m_flags & M_EXT) == 0) 540 to->m_data = to->m_pktdat; 541 to->m_pkthdr = from->m_pkthdr; 542 SLIST_INIT(&to->m_pkthdr.tags); 543 return (m_tag_copy_chain(to, from, MBTOM(how))); 544} 545 546/* 547 * Lesser-used path for M_PREPEND: 548 * allocate new mbuf to prepend to chain, 549 * copy junk along. 550 */ 551struct mbuf * 552m_prepend(struct mbuf *m, int len, int how) 553{ 554 struct mbuf *mn; 555 556 if (m->m_flags & M_PKTHDR) 557 mn = m_gethdr(how, m->m_type); 558 else 559 mn = m_get(how, m->m_type); 560 if (mn == NULL) { 561 m_freem(m); 562 return (NULL); 563 } 564 if (m->m_flags & M_PKTHDR) 565 m_move_pkthdr(mn, m); 566 mn->m_next = m; 567 m = mn; 568 if(m->m_flags & M_PKTHDR) { 569 if (len < MHLEN) 570 MH_ALIGN(m, len); 571 } else { 572 if (len < MLEN) 573 M_ALIGN(m, len); 574 } 575 m->m_len = len; 576 return (m); 577} 578 579/* 580 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 581 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 582 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller. 583 * Note that the copy is read-only, because clusters are not copied, 584 * only their reference counts are incremented. 585 */ 586struct mbuf * 587m_copym(struct mbuf *m, int off0, int len, int wait) 588{ 589 struct mbuf *n, **np; 590 int off = off0; 591 struct mbuf *top; 592 int copyhdr = 0; 593 594 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 595 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 596 MBUF_CHECKSLEEP(wait); 597 if (off == 0 && m->m_flags & M_PKTHDR) 598 copyhdr = 1; 599 while (off > 0) { 600 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 601 if (off < m->m_len) 602 break; 603 off -= m->m_len; 604 m = m->m_next; 605 } 606 np = ⊤ 607 top = 0; 608 while (len > 0) { 609 if (m == NULL) { 610 KASSERT(len == M_COPYALL, 611 ("m_copym, length > size of mbuf chain")); 612 break; 613 } 614 if (copyhdr) 615 n = m_gethdr(wait, m->m_type); 616 else 617 n = m_get(wait, m->m_type); 618 *np = n; 619 if (n == NULL) 620 goto nospace; 621 if (copyhdr) { 622 if (!m_dup_pkthdr(n, m, wait)) 623 goto nospace; 624 if (len == M_COPYALL) 625 n->m_pkthdr.len -= off0; 626 else 627 n->m_pkthdr.len = len; 628 copyhdr = 0; 629 } 630 n->m_len = min(len, m->m_len - off); 631 if (m->m_flags & M_EXT) { 632 n->m_data = m->m_data + off; 633 mb_dupcl(n, m); 634 } else 635 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 636 (u_int)n->m_len); 637 if (len != M_COPYALL) 638 len -= n->m_len; 639 off = 0; 640 m = m->m_next; 641 np = &n->m_next; 642 } 643 644 return (top); 645nospace: 646 m_freem(top); 647 return (NULL); 648} 649 650/* 651 * Returns mbuf chain with new head for the prepending case. 652 * Copies from mbuf (chain) n from off for len to mbuf (chain) m 653 * either prepending or appending the data. 654 * The resulting mbuf (chain) m is fully writeable. 655 * m is destination (is made writeable) 656 * n is source, off is offset in source, len is len from offset 657 * dir, 0 append, 1 prepend 658 * how, wait or nowait 659 */ 660 661static int 662m_bcopyxxx(void *s, void *t, u_int len) 663{ 664 bcopy(s, t, (size_t)len); 665 return 0; 666} 667 668struct mbuf * 669m_copymdata(struct mbuf *m, struct mbuf *n, int off, int len, 670 int prep, int how) 671{ 672 struct mbuf *mm, *x, *z, *prev = NULL; 673 caddr_t p; 674 int i, nlen = 0; 675 caddr_t buf[MLEN]; 676 677 KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source")); 678 KASSERT(off >= 0, ("m_copymdata, negative off %d", off)); 679 KASSERT(len >= 0, ("m_copymdata, negative len %d", len)); 680 KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep)); 681 682 mm = m; 683 if (!prep) { 684 while(mm->m_next) { 685 prev = mm; 686 mm = mm->m_next; 687 } 688 } 689 for (z = n; z != NULL; z = z->m_next) 690 nlen += z->m_len; 691 if (len == M_COPYALL) 692 len = nlen - off; 693 if (off + len > nlen || len < 1) 694 return NULL; 695 696 if (!M_WRITABLE(mm)) { 697 /* XXX: Use proper m_xxx function instead. */ 698 x = m_getcl(how, MT_DATA, mm->m_flags); 699 if (x == NULL) 700 return NULL; 701 bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size); 702 p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf); 703 x->m_data = p; 704 mm->m_next = NULL; 705 if (mm != m) 706 prev->m_next = x; 707 m_free(mm); 708 mm = x; 709 } 710 711 /* 712 * Append/prepend the data. Allocating mbufs as necessary. 713 */ 714 /* Shortcut if enough free space in first/last mbuf. */ 715 if (!prep && M_TRAILINGSPACE(mm) >= len) { 716 m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) + 717 mm->m_len); 718 mm->m_len += len; 719 mm->m_pkthdr.len += len; 720 return m; 721 } 722 if (prep && M_LEADINGSPACE(mm) >= len) { 723 mm->m_data = mtod(mm, caddr_t) - len; 724 m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t)); 725 mm->m_len += len; 726 mm->m_pkthdr.len += len; 727 return mm; 728 } 729 730 /* Expand first/last mbuf to cluster if possible. */ 731 if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) { 732 bcopy(mm->m_data, &buf, mm->m_len); 733 m_clget(mm, how); 734 if (!(mm->m_flags & M_EXT)) 735 return NULL; 736 bcopy(&buf, mm->m_ext.ext_buf, mm->m_len); 737 mm->m_data = mm->m_ext.ext_buf; 738 mm->m_pkthdr.header = NULL; 739 } 740 if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) { 741 bcopy(mm->m_data, &buf, mm->m_len); 742 m_clget(mm, how); 743 if (!(mm->m_flags & M_EXT)) 744 return NULL; 745 bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf + 746 mm->m_ext.ext_size - mm->m_len, mm->m_len); 747 mm->m_data = (caddr_t)mm->m_ext.ext_buf + 748 mm->m_ext.ext_size - mm->m_len; 749 mm->m_pkthdr.header = NULL; 750 } 751 752 /* Append/prepend as many mbuf (clusters) as necessary to fit len. */ 753 if (!prep && len > M_TRAILINGSPACE(mm)) { 754 if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA)) 755 return NULL; 756 } 757 if (prep && len > M_LEADINGSPACE(mm)) { 758 if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA))) 759 return NULL; 760 i = 0; 761 for (x = z; x != NULL; x = x->m_next) { 762 i += x->m_flags & M_EXT ? x->m_ext.ext_size : 763 (x->m_flags & M_PKTHDR ? MHLEN : MLEN); 764 if (!x->m_next) 765 break; 766 } 767 z->m_data += i - len; 768 m_move_pkthdr(mm, z); 769 x->m_next = mm; 770 mm = z; 771 } 772 773 /* Seek to start position in source mbuf. Optimization for long chains. */ 774 while (off > 0) { 775 if (off < n->m_len) 776 break; 777 off -= n->m_len; 778 n = n->m_next; 779 } 780 781 /* Copy data into target mbuf. */ 782 z = mm; 783 while (len > 0) { 784 KASSERT(z != NULL, ("m_copymdata, falling off target edge")); 785 i = M_TRAILINGSPACE(z); 786 m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len); 787 z->m_len += i; 788 /* fixup pkthdr.len if necessary */ 789 if ((prep ? mm : m)->m_flags & M_PKTHDR) 790 (prep ? mm : m)->m_pkthdr.len += i; 791 off += i; 792 len -= i; 793 z = z->m_next; 794 } 795 return (prep ? mm : m); 796} 797 798/* 799 * Copy an entire packet, including header (which must be present). 800 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 801 * Note that the copy is read-only, because clusters are not copied, 802 * only their reference counts are incremented. 803 * Preserve alignment of the first mbuf so if the creator has left 804 * some room at the beginning (e.g. for inserting protocol headers) 805 * the copies still have the room available. 806 */ 807struct mbuf * 808m_copypacket(struct mbuf *m, int how) 809{ 810 struct mbuf *top, *n, *o; 811 812 MBUF_CHECKSLEEP(how); 813 n = m_get(how, m->m_type); 814 top = n; 815 if (n == NULL) 816 goto nospace; 817 818 if (!m_dup_pkthdr(n, m, how)) 819 goto nospace; 820 n->m_len = m->m_len; 821 if (m->m_flags & M_EXT) { 822 n->m_data = m->m_data; 823 mb_dupcl(n, m); 824 } else { 825 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 826 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 827 } 828 829 m = m->m_next; 830 while (m) { 831 o = m_get(how, m->m_type); 832 if (o == NULL) 833 goto nospace; 834 835 n->m_next = o; 836 n = n->m_next; 837 838 n->m_len = m->m_len; 839 if (m->m_flags & M_EXT) { 840 n->m_data = m->m_data; 841 mb_dupcl(n, m); 842 } else { 843 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 844 } 845 846 m = m->m_next; 847 } 848 return top; 849nospace: 850 m_freem(top); 851 return (NULL); 852} 853 854/* 855 * Copy data from an mbuf chain starting "off" bytes from the beginning, 856 * continuing for "len" bytes, into the indicated buffer. 857 */ 858void 859m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 860{ 861 u_int count; 862 863 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 864 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 865 while (off > 0) { 866 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 867 if (off < m->m_len) 868 break; 869 off -= m->m_len; 870 m = m->m_next; 871 } 872 while (len > 0) { 873 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 874 count = min(m->m_len - off, len); 875 bcopy(mtod(m, caddr_t) + off, cp, count); 876 len -= count; 877 cp += count; 878 off = 0; 879 m = m->m_next; 880 } 881} 882 883/* 884 * Copy a packet header mbuf chain into a completely new chain, including 885 * copying any mbuf clusters. Use this instead of m_copypacket() when 886 * you need a writable copy of an mbuf chain. 887 */ 888struct mbuf * 889m_dup(struct mbuf *m, int how) 890{ 891 struct mbuf **p, *top = NULL; 892 int remain, moff, nsize; 893 894 MBUF_CHECKSLEEP(how); 895 /* Sanity check */ 896 if (m == NULL) 897 return (NULL); 898 M_ASSERTPKTHDR(m); 899 900 /* While there's more data, get a new mbuf, tack it on, and fill it */ 901 remain = m->m_pkthdr.len; 902 moff = 0; 903 p = ⊤ 904 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 905 struct mbuf *n; 906 907 /* Get the next new mbuf */ 908 if (remain >= MINCLSIZE) { 909 n = m_getcl(how, m->m_type, 0); 910 nsize = MCLBYTES; 911 } else { 912 n = m_get(how, m->m_type); 913 nsize = MLEN; 914 } 915 if (n == NULL) 916 goto nospace; 917 918 if (top == NULL) { /* First one, must be PKTHDR */ 919 if (!m_dup_pkthdr(n, m, how)) { 920 m_free(n); 921 goto nospace; 922 } 923 if ((n->m_flags & M_EXT) == 0) 924 nsize = MHLEN; 925 } 926 n->m_len = 0; 927 928 /* Link it into the new chain */ 929 *p = n; 930 p = &n->m_next; 931 932 /* Copy data from original mbuf(s) into new mbuf */ 933 while (n->m_len < nsize && m != NULL) { 934 int chunk = min(nsize - n->m_len, m->m_len - moff); 935 936 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 937 moff += chunk; 938 n->m_len += chunk; 939 remain -= chunk; 940 if (moff == m->m_len) { 941 m = m->m_next; 942 moff = 0; 943 } 944 } 945 946 /* Check correct total mbuf length */ 947 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 948 ("%s: bogus m_pkthdr.len", __func__)); 949 } 950 return (top); 951 952nospace: 953 m_freem(top); 954 return (NULL); 955} 956 957/* 958 * Concatenate mbuf chain n to m. 959 * Both chains must be of the same type (e.g. MT_DATA). 960 * Any m_pkthdr is not updated. 961 */ 962void 963m_cat(struct mbuf *m, struct mbuf *n) 964{ 965 while (m->m_next) 966 m = m->m_next; 967 while (n) { 968 if (!M_WRITABLE(m) || 969 M_TRAILINGSPACE(m) < n->m_len) { 970 /* just join the two chains */ 971 m->m_next = n; 972 return; 973 } 974 /* splat the data from one into the other */ 975 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 976 (u_int)n->m_len); 977 m->m_len += n->m_len; 978 n = m_free(n); 979 } 980} 981 982void 983m_adj(struct mbuf *mp, int req_len) 984{ 985 int len = req_len; 986 struct mbuf *m; 987 int count; 988 989 if ((m = mp) == NULL) 990 return; 991 if (len >= 0) { 992 /* 993 * Trim from head. 994 */ 995 while (m != NULL && len > 0) { 996 if (m->m_len <= len) { 997 len -= m->m_len; 998 m->m_len = 0; 999 m = m->m_next; 1000 } else { 1001 m->m_len -= len; 1002 m->m_data += len; 1003 len = 0; 1004 } 1005 } 1006 if (mp->m_flags & M_PKTHDR) 1007 mp->m_pkthdr.len -= (req_len - len); 1008 } else { 1009 /* 1010 * Trim from tail. Scan the mbuf chain, 1011 * calculating its length and finding the last mbuf. 1012 * If the adjustment only affects this mbuf, then just 1013 * adjust and return. Otherwise, rescan and truncate 1014 * after the remaining size. 1015 */ 1016 len = -len; 1017 count = 0; 1018 for (;;) { 1019 count += m->m_len; 1020 if (m->m_next == (struct mbuf *)0) 1021 break; 1022 m = m->m_next; 1023 } 1024 if (m->m_len >= len) { 1025 m->m_len -= len; 1026 if (mp->m_flags & M_PKTHDR) 1027 mp->m_pkthdr.len -= len; 1028 return; 1029 } 1030 count -= len; 1031 if (count < 0) 1032 count = 0; 1033 /* 1034 * Correct length for chain is "count". 1035 * Find the mbuf with last data, adjust its length, 1036 * and toss data from remaining mbufs on chain. 1037 */ 1038 m = mp; 1039 if (m->m_flags & M_PKTHDR) 1040 m->m_pkthdr.len = count; 1041 for (; m; m = m->m_next) { 1042 if (m->m_len >= count) { 1043 m->m_len = count; 1044 if (m->m_next != NULL) { 1045 m_freem(m->m_next); 1046 m->m_next = NULL; 1047 } 1048 break; 1049 } 1050 count -= m->m_len; 1051 } 1052 } 1053} 1054 1055/* 1056 * Rearange an mbuf chain so that len bytes are contiguous 1057 * and in the data area of an mbuf (so that mtod will work 1058 * for a structure of size len). Returns the resulting 1059 * mbuf chain on success, frees it and returns null on failure. 1060 * If there is room, it will add up to max_protohdr-len extra bytes to the 1061 * contiguous region in an attempt to avoid being called next time. 1062 */ 1063struct mbuf * 1064m_pullup(struct mbuf *n, int len) 1065{ 1066 struct mbuf *m; 1067 int count; 1068 int space; 1069 1070 /* 1071 * If first mbuf has no cluster, and has room for len bytes 1072 * without shifting current data, pullup into it, 1073 * otherwise allocate a new mbuf to prepend to the chain. 1074 */ 1075 if ((n->m_flags & M_EXT) == 0 && 1076 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 1077 if (n->m_len >= len) 1078 return (n); 1079 m = n; 1080 n = n->m_next; 1081 len -= m->m_len; 1082 } else { 1083 if (len > MHLEN) 1084 goto bad; 1085 m = m_get(M_NOWAIT, n->m_type); 1086 if (m == NULL) 1087 goto bad; 1088 if (n->m_flags & M_PKTHDR) 1089 m_move_pkthdr(m, n); 1090 } 1091 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1092 do { 1093 count = min(min(max(len, max_protohdr), space), n->m_len); 1094 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1095 (u_int)count); 1096 len -= count; 1097 m->m_len += count; 1098 n->m_len -= count; 1099 space -= count; 1100 if (n->m_len) 1101 n->m_data += count; 1102 else 1103 n = m_free(n); 1104 } while (len > 0 && n); 1105 if (len > 0) { 1106 (void) m_free(m); 1107 goto bad; 1108 } 1109 m->m_next = n; 1110 return (m); 1111bad: 1112 m_freem(n); 1113 return (NULL); 1114} 1115 1116/* 1117 * Like m_pullup(), except a new mbuf is always allocated, and we allow 1118 * the amount of empty space before the data in the new mbuf to be specified 1119 * (in the event that the caller expects to prepend later). 1120 */ 1121int MSFail; 1122 1123struct mbuf * 1124m_copyup(struct mbuf *n, int len, int dstoff) 1125{ 1126 struct mbuf *m; 1127 int count, space; 1128 1129 if (len > (MHLEN - dstoff)) 1130 goto bad; 1131 m = m_get(M_NOWAIT, n->m_type); 1132 if (m == NULL) 1133 goto bad; 1134 if (n->m_flags & M_PKTHDR) 1135 m_move_pkthdr(m, n); 1136 m->m_data += dstoff; 1137 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1138 do { 1139 count = min(min(max(len, max_protohdr), space), n->m_len); 1140 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 1141 (unsigned)count); 1142 len -= count; 1143 m->m_len += count; 1144 n->m_len -= count; 1145 space -= count; 1146 if (n->m_len) 1147 n->m_data += count; 1148 else 1149 n = m_free(n); 1150 } while (len > 0 && n); 1151 if (len > 0) { 1152 (void) m_free(m); 1153 goto bad; 1154 } 1155 m->m_next = n; 1156 return (m); 1157 bad: 1158 m_freem(n); 1159 MSFail++; 1160 return (NULL); 1161} 1162 1163/* 1164 * Partition an mbuf chain in two pieces, returning the tail -- 1165 * all but the first len0 bytes. In case of failure, it returns NULL and 1166 * attempts to restore the chain to its original state. 1167 * 1168 * Note that the resulting mbufs might be read-only, because the new 1169 * mbuf can end up sharing an mbuf cluster with the original mbuf if 1170 * the "breaking point" happens to lie within a cluster mbuf. Use the 1171 * M_WRITABLE() macro to check for this case. 1172 */ 1173struct mbuf * 1174m_split(struct mbuf *m0, int len0, int wait) 1175{ 1176 struct mbuf *m, *n; 1177 u_int len = len0, remain; 1178 1179 MBUF_CHECKSLEEP(wait); 1180 for (m = m0; m && len > m->m_len; m = m->m_next) 1181 len -= m->m_len; 1182 if (m == NULL) 1183 return (NULL); 1184 remain = m->m_len - len; 1185 if (m0->m_flags & M_PKTHDR && remain == 0) { 1186 n = m_gethdr(wait, m0->m_type); 1187 return (NULL); 1188 n->m_next = m->m_next; 1189 m->m_next = NULL; 1190 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1191 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1192 m0->m_pkthdr.len = len0; 1193 return (n); 1194 } else if (m0->m_flags & M_PKTHDR) { 1195 n = m_gethdr(wait, m0->m_type); 1196 if (n == NULL) 1197 return (NULL); 1198 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1199 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1200 m0->m_pkthdr.len = len0; 1201 if (m->m_flags & M_EXT) 1202 goto extpacket; 1203 if (remain > MHLEN) { 1204 /* m can't be the lead packet */ 1205 MH_ALIGN(n, 0); 1206 n->m_next = m_split(m, len, wait); 1207 if (n->m_next == NULL) { 1208 (void) m_free(n); 1209 return (NULL); 1210 } else { 1211 n->m_len = 0; 1212 return (n); 1213 } 1214 } else 1215 MH_ALIGN(n, remain); 1216 } else if (remain == 0) { 1217 n = m->m_next; 1218 m->m_next = NULL; 1219 return (n); 1220 } else { 1221 n = m_get(wait, m->m_type); 1222 if (n == NULL) 1223 return (NULL); 1224 M_ALIGN(n, remain); 1225 } 1226extpacket: 1227 if (m->m_flags & M_EXT) { 1228 n->m_data = m->m_data + len; 1229 mb_dupcl(n, m); 1230 } else { 1231 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1232 } 1233 n->m_len = remain; 1234 m->m_len = len; 1235 n->m_next = m->m_next; 1236 m->m_next = NULL; 1237 return (n); 1238} 1239/* 1240 * Routine to copy from device local memory into mbufs. 1241 * Note that `off' argument is offset into first mbuf of target chain from 1242 * which to begin copying the data to. 1243 */ 1244struct mbuf * 1245m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 1246 void (*copy)(char *from, caddr_t to, u_int len)) 1247{ 1248 struct mbuf *m; 1249 struct mbuf *top = NULL, **mp = ⊤ 1250 int len; 1251 1252 if (off < 0 || off > MHLEN) 1253 return (NULL); 1254 1255 while (totlen > 0) { 1256 if (top == NULL) { /* First one, must be PKTHDR */ 1257 if (totlen + off >= MINCLSIZE) { 1258 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1259 len = MCLBYTES; 1260 } else { 1261 m = m_gethdr(M_NOWAIT, MT_DATA); 1262 len = MHLEN; 1263 1264 /* Place initial small packet/header at end of mbuf */ 1265 if (m && totlen + off + max_linkhdr <= MLEN) { 1266 m->m_data += max_linkhdr; 1267 len -= max_linkhdr; 1268 } 1269 } 1270 if (m == NULL) 1271 return NULL; 1272 m->m_pkthdr.rcvif = ifp; 1273 m->m_pkthdr.len = totlen; 1274 } else { 1275 if (totlen + off >= MINCLSIZE) { 1276 m = m_getcl(M_NOWAIT, MT_DATA, 0); 1277 len = MCLBYTES; 1278 } else { 1279 m = m_get(M_NOWAIT, MT_DATA); 1280 len = MLEN; 1281 } 1282 if (m == NULL) { 1283 m_freem(top); 1284 return NULL; 1285 } 1286 } 1287 if (off) { 1288 m->m_data += off; 1289 len -= off; 1290 off = 0; 1291 } 1292 m->m_len = len = min(totlen, len); 1293 if (copy) 1294 copy(buf, mtod(m, caddr_t), (u_int)len); 1295 else 1296 bcopy(buf, mtod(m, caddr_t), (u_int)len); 1297 buf += len; 1298 *mp = m; 1299 mp = &m->m_next; 1300 totlen -= len; 1301 } 1302 return (top); 1303} 1304 1305/* 1306 * Copy data from a buffer back into the indicated mbuf chain, 1307 * starting "off" bytes from the beginning, extending the mbuf 1308 * chain if necessary. 1309 */ 1310void 1311m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp) 1312{ 1313 int mlen; 1314 struct mbuf *m = m0, *n; 1315 int totlen = 0; 1316 1317 if (m0 == NULL) 1318 return; 1319 while (off > (mlen = m->m_len)) { 1320 off -= mlen; 1321 totlen += mlen; 1322 if (m->m_next == NULL) { 1323 n = m_get(M_NOWAIT, m->m_type); 1324 if (n == NULL) 1325 goto out; 1326 bzero(mtod(n, caddr_t), MLEN); 1327 n->m_len = min(MLEN, len + off); 1328 m->m_next = n; 1329 } 1330 m = m->m_next; 1331 } 1332 while (len > 0) { 1333 if (m->m_next == NULL && (len > m->m_len - off)) { 1334 m->m_len += min(len - (m->m_len - off), 1335 M_TRAILINGSPACE(m)); 1336 } 1337 mlen = min (m->m_len - off, len); 1338 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); 1339 cp += mlen; 1340 len -= mlen; 1341 mlen += off; 1342 off = 0; 1343 totlen += mlen; 1344 if (len == 0) 1345 break; 1346 if (m->m_next == NULL) { 1347 n = m_get(M_NOWAIT, m->m_type); 1348 if (n == NULL) 1349 break; 1350 n->m_len = min(MLEN, len); 1351 m->m_next = n; 1352 } 1353 m = m->m_next; 1354 } 1355out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1356 m->m_pkthdr.len = totlen; 1357} 1358 1359/* 1360 * Append the specified data to the indicated mbuf chain, 1361 * Extend the mbuf chain if the new data does not fit in 1362 * existing space. 1363 * 1364 * Return 1 if able to complete the job; otherwise 0. 1365 */ 1366int 1367m_append(struct mbuf *m0, int len, c_caddr_t cp) 1368{ 1369 struct mbuf *m, *n; 1370 int remainder, space; 1371 1372 for (m = m0; m->m_next != NULL; m = m->m_next) 1373 ; 1374 remainder = len; 1375 space = M_TRAILINGSPACE(m); 1376 if (space > 0) { 1377 /* 1378 * Copy into available space. 1379 */ 1380 if (space > remainder) 1381 space = remainder; 1382 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 1383 m->m_len += space; 1384 cp += space, remainder -= space; 1385 } 1386 while (remainder > 0) { 1387 /* 1388 * Allocate a new mbuf; could check space 1389 * and allocate a cluster instead. 1390 */ 1391 n = m_get(M_NOWAIT, m->m_type); 1392 if (n == NULL) 1393 break; 1394 n->m_len = min(MLEN, remainder); 1395 bcopy(cp, mtod(n, caddr_t), n->m_len); 1396 cp += n->m_len, remainder -= n->m_len; 1397 m->m_next = n; 1398 m = n; 1399 } 1400 if (m0->m_flags & M_PKTHDR) 1401 m0->m_pkthdr.len += len - remainder; 1402 return (remainder == 0); 1403} 1404 1405/* 1406 * Apply function f to the data in an mbuf chain starting "off" bytes from 1407 * the beginning, continuing for "len" bytes. 1408 */ 1409int 1410m_apply(struct mbuf *m, int off, int len, 1411 int (*f)(void *, void *, u_int), void *arg) 1412{ 1413 u_int count; 1414 int rval; 1415 1416 KASSERT(off >= 0, ("m_apply, negative off %d", off)); 1417 KASSERT(len >= 0, ("m_apply, negative len %d", len)); 1418 while (off > 0) { 1419 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1420 if (off < m->m_len) 1421 break; 1422 off -= m->m_len; 1423 m = m->m_next; 1424 } 1425 while (len > 0) { 1426 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1427 count = min(m->m_len - off, len); 1428 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 1429 if (rval) 1430 return (rval); 1431 len -= count; 1432 off = 0; 1433 m = m->m_next; 1434 } 1435 return (0); 1436} 1437 1438/* 1439 * Return a pointer to mbuf/offset of location in mbuf chain. 1440 */ 1441struct mbuf * 1442m_getptr(struct mbuf *m, int loc, int *off) 1443{ 1444 1445 while (loc >= 0) { 1446 /* Normal end of search. */ 1447 if (m->m_len > loc) { 1448 *off = loc; 1449 return (m); 1450 } else { 1451 loc -= m->m_len; 1452 if (m->m_next == NULL) { 1453 if (loc == 0) { 1454 /* Point at the end of valid data. */ 1455 *off = m->m_len; 1456 return (m); 1457 } 1458 return (NULL); 1459 } 1460 m = m->m_next; 1461 } 1462 } 1463 return (NULL); 1464} 1465 1466void 1467m_print(const struct mbuf *m, int maxlen) 1468{ 1469 int len; 1470 int pdata; 1471 const struct mbuf *m2; 1472 1473 if (m == NULL) { 1474 printf("mbuf: %p\n", m); 1475 return; 1476 } 1477 1478 if (m->m_flags & M_PKTHDR) 1479 len = m->m_pkthdr.len; 1480 else 1481 len = -1; 1482 m2 = m; 1483 while (m2 != NULL && (len == -1 || len)) { 1484 pdata = m2->m_len; 1485 if (maxlen != -1 && pdata > maxlen) 1486 pdata = maxlen; 1487 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len, 1488 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw" 1489 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly" 1490 "\3eor\2pkthdr\1ext", pdata ? "" : "\n"); 1491 if (pdata) 1492 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-"); 1493 if (len != -1) 1494 len -= m2->m_len; 1495 m2 = m2->m_next; 1496 } 1497 if (len > 0) 1498 printf("%d bytes unaccounted for.\n", len); 1499 return; 1500} 1501 1502u_int 1503m_fixhdr(struct mbuf *m0) 1504{ 1505 u_int len; 1506 1507 len = m_length(m0, NULL); 1508 m0->m_pkthdr.len = len; 1509 return (len); 1510} 1511 1512u_int 1513m_length(struct mbuf *m0, struct mbuf **last) 1514{ 1515 struct mbuf *m; 1516 u_int len; 1517 1518 len = 0; 1519 for (m = m0; m != NULL; m = m->m_next) { 1520 len += m->m_len; 1521 if (m->m_next == NULL) 1522 break; 1523 } 1524 if (last != NULL) 1525 *last = m; 1526 return (len); 1527} 1528 1529/* 1530 * Defragment a mbuf chain, returning the shortest possible 1531 * chain of mbufs and clusters. If allocation fails and 1532 * this cannot be completed, NULL will be returned, but 1533 * the passed in chain will be unchanged. Upon success, 1534 * the original chain will be freed, and the new chain 1535 * will be returned. 1536 * 1537 * If a non-packet header is passed in, the original 1538 * mbuf (chain?) will be returned unharmed. 1539 */ 1540struct mbuf * 1541m_defrag(struct mbuf *m0, int how) 1542{ 1543 struct mbuf *m_new = NULL, *m_final = NULL; 1544 int progress = 0, length; 1545 1546 MBUF_CHECKSLEEP(how); 1547 if (!(m0->m_flags & M_PKTHDR)) 1548 return (m0); 1549 1550 m_fixhdr(m0); /* Needed sanity check */ 1551 1552#ifdef MBUF_STRESS_TEST 1553 if (m_defragrandomfailures) { 1554 int temp = arc4random() & 0xff; 1555 if (temp == 0xba) 1556 goto nospace; 1557 } 1558#endif 1559 1560 if (m0->m_pkthdr.len > MHLEN) 1561 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1562 else 1563 m_final = m_gethdr(how, MT_DATA); 1564 1565 if (m_final == NULL) 1566 goto nospace; 1567 1568 if (m_dup_pkthdr(m_final, m0, how) == 0) 1569 goto nospace; 1570 1571 m_new = m_final; 1572 1573 while (progress < m0->m_pkthdr.len) { 1574 length = m0->m_pkthdr.len - progress; 1575 if (length > MCLBYTES) 1576 length = MCLBYTES; 1577 1578 if (m_new == NULL) { 1579 if (length > MLEN) 1580 m_new = m_getcl(how, MT_DATA, 0); 1581 else 1582 m_new = m_get(how, MT_DATA); 1583 if (m_new == NULL) 1584 goto nospace; 1585 } 1586 1587 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 1588 progress += length; 1589 m_new->m_len = length; 1590 if (m_new != m_final) 1591 m_cat(m_final, m_new); 1592 m_new = NULL; 1593 } 1594#ifdef MBUF_STRESS_TEST 1595 if (m0->m_next == NULL) 1596 m_defraguseless++; 1597#endif 1598 m_freem(m0); 1599 m0 = m_final; 1600#ifdef MBUF_STRESS_TEST 1601 m_defragpackets++; 1602 m_defragbytes += m0->m_pkthdr.len; 1603#endif 1604 return (m0); 1605nospace: 1606#ifdef MBUF_STRESS_TEST 1607 m_defragfailure++; 1608#endif 1609 if (m_final) 1610 m_freem(m_final); 1611 return (NULL); 1612} 1613 1614/* 1615 * Defragment an mbuf chain, returning at most maxfrags separate 1616 * mbufs+clusters. If this is not possible NULL is returned and 1617 * the original mbuf chain is left in it's present (potentially 1618 * modified) state. We use two techniques: collapsing consecutive 1619 * mbufs and replacing consecutive mbufs by a cluster. 1620 * 1621 * NB: this should really be named m_defrag but that name is taken 1622 */ 1623struct mbuf * 1624m_collapse(struct mbuf *m0, int how, int maxfrags) 1625{ 1626 struct mbuf *m, *n, *n2, **prev; 1627 u_int curfrags; 1628 1629 /* 1630 * Calculate the current number of frags. 1631 */ 1632 curfrags = 0; 1633 for (m = m0; m != NULL; m = m->m_next) 1634 curfrags++; 1635 /* 1636 * First, try to collapse mbufs. Note that we always collapse 1637 * towards the front so we don't need to deal with moving the 1638 * pkthdr. This may be suboptimal if the first mbuf has much 1639 * less data than the following. 1640 */ 1641 m = m0; 1642again: 1643 for (;;) { 1644 n = m->m_next; 1645 if (n == NULL) 1646 break; 1647 if (M_WRITABLE(m) && 1648 n->m_len < M_TRAILINGSPACE(m)) { 1649 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 1650 n->m_len); 1651 m->m_len += n->m_len; 1652 m->m_next = n->m_next; 1653 m_free(n); 1654 if (--curfrags <= maxfrags) 1655 return m0; 1656 } else 1657 m = n; 1658 } 1659 KASSERT(maxfrags > 1, 1660 ("maxfrags %u, but normal collapse failed", maxfrags)); 1661 /* 1662 * Collapse consecutive mbufs to a cluster. 1663 */ 1664 prev = &m0->m_next; /* NB: not the first mbuf */ 1665 while ((n = *prev) != NULL) { 1666 if ((n2 = n->m_next) != NULL && 1667 n->m_len + n2->m_len < MCLBYTES) { 1668 m = m_getcl(how, MT_DATA, 0); 1669 if (m == NULL) 1670 goto bad; 1671 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 1672 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 1673 n2->m_len); 1674 m->m_len = n->m_len + n2->m_len; 1675 m->m_next = n2->m_next; 1676 *prev = m; 1677 m_free(n); 1678 m_free(n2); 1679 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 1680 return m0; 1681 /* 1682 * Still not there, try the normal collapse 1683 * again before we allocate another cluster. 1684 */ 1685 goto again; 1686 } 1687 prev = &n->m_next; 1688 } 1689 /* 1690 * No place where we can collapse to a cluster; punt. 1691 * This can occur if, for example, you request 2 frags 1692 * but the packet requires that both be clusters (we 1693 * never reallocate the first mbuf to avoid moving the 1694 * packet header). 1695 */ 1696bad: 1697 return NULL; 1698} 1699 1700#ifdef MBUF_STRESS_TEST 1701 1702/* 1703 * Fragment an mbuf chain. There's no reason you'd ever want to do 1704 * this in normal usage, but it's great for stress testing various 1705 * mbuf consumers. 1706 * 1707 * If fragmentation is not possible, the original chain will be 1708 * returned. 1709 * 1710 * Possible length values: 1711 * 0 no fragmentation will occur 1712 * > 0 each fragment will be of the specified length 1713 * -1 each fragment will be the same random value in length 1714 * -2 each fragment's length will be entirely random 1715 * (Random values range from 1 to 256) 1716 */ 1717struct mbuf * 1718m_fragment(struct mbuf *m0, int how, int length) 1719{ 1720 struct mbuf *m_new = NULL, *m_final = NULL; 1721 int progress = 0; 1722 1723 if (!(m0->m_flags & M_PKTHDR)) 1724 return (m0); 1725 1726 if ((length == 0) || (length < -2)) 1727 return (m0); 1728 1729 m_fixhdr(m0); /* Needed sanity check */ 1730 1731 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1732 1733 if (m_final == NULL) 1734 goto nospace; 1735 1736 if (m_dup_pkthdr(m_final, m0, how) == 0) 1737 goto nospace; 1738 1739 m_new = m_final; 1740 1741 if (length == -1) 1742 length = 1 + (arc4random() & 255); 1743 1744 while (progress < m0->m_pkthdr.len) { 1745 int fraglen; 1746 1747 if (length > 0) 1748 fraglen = length; 1749 else 1750 fraglen = 1 + (arc4random() & 255); 1751 if (fraglen > m0->m_pkthdr.len - progress) 1752 fraglen = m0->m_pkthdr.len - progress; 1753 1754 if (fraglen > MCLBYTES) 1755 fraglen = MCLBYTES; 1756 1757 if (m_new == NULL) { 1758 m_new = m_getcl(how, MT_DATA, 0); 1759 if (m_new == NULL) 1760 goto nospace; 1761 } 1762 1763 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t)); 1764 progress += fraglen; 1765 m_new->m_len = fraglen; 1766 if (m_new != m_final) 1767 m_cat(m_final, m_new); 1768 m_new = NULL; 1769 } 1770 m_freem(m0); 1771 m0 = m_final; 1772 return (m0); 1773nospace: 1774 if (m_final) 1775 m_freem(m_final); 1776 /* Return the original chain on failure */ 1777 return (m0); 1778} 1779 1780#endif 1781 1782/* 1783 * Copy the contents of uio into a properly sized mbuf chain. 1784 */ 1785struct mbuf * 1786m_uiotombuf(struct uio *uio, int how, int len, int align, int flags) 1787{ 1788 struct mbuf *m, *mb; 1789 int error, length; 1790 ssize_t total; 1791 int progress = 0; 1792 1793 /* 1794 * len can be zero or an arbitrary large value bound by 1795 * the total data supplied by the uio. 1796 */ 1797 if (len > 0) 1798 total = min(uio->uio_resid, len); 1799 else 1800 total = uio->uio_resid; 1801 1802 /* 1803 * The smallest unit returned by m_getm2() is a single mbuf 1804 * with pkthdr. We can't align past it. 1805 */ 1806 if (align >= MHLEN) 1807 return (NULL); 1808 1809 /* 1810 * Give us the full allocation or nothing. 1811 * If len is zero return the smallest empty mbuf. 1812 */ 1813 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags); 1814 if (m == NULL) 1815 return (NULL); 1816 m->m_data += align; 1817 1818 /* Fill all mbufs with uio data and update header information. */ 1819 for (mb = m; mb != NULL; mb = mb->m_next) { 1820 length = min(M_TRAILINGSPACE(mb), total - progress); 1821 1822 error = uiomove(mtod(mb, void *), length, uio); 1823 if (error) { 1824 m_freem(m); 1825 return (NULL); 1826 } 1827 1828 mb->m_len = length; 1829 progress += length; 1830 if (flags & M_PKTHDR) 1831 m->m_pkthdr.len += length; 1832 } 1833 KASSERT(progress == total, ("%s: progress != total", __func__)); 1834 1835 return (m); 1836} 1837 1838/* 1839 * Copy an mbuf chain into a uio limited by len if set. 1840 */ 1841int 1842m_mbuftouio(struct uio *uio, struct mbuf *m, int len) 1843{ 1844 int error, length, total; 1845 int progress = 0; 1846 1847 if (len > 0) 1848 total = min(uio->uio_resid, len); 1849 else 1850 total = uio->uio_resid; 1851 1852 /* Fill the uio with data from the mbufs. */ 1853 for (; m != NULL; m = m->m_next) { 1854 length = min(m->m_len, total - progress); 1855 1856 error = uiomove(mtod(m, void *), length, uio); 1857 if (error) 1858 return (error); 1859 1860 progress += length; 1861 } 1862 1863 return (0); 1864} 1865 1866/* 1867 * Set the m_data pointer of a newly-allocated mbuf 1868 * to place an object of the specified size at the 1869 * end of the mbuf, longword aligned. 1870 */ 1871void 1872m_align(struct mbuf *m, int len) 1873{ 1874#ifdef INVARIANTS 1875 const char *msg = "%s: not a virgin mbuf"; 1876#endif 1877 int adjust; 1878 1879 if (m->m_flags & M_EXT) { 1880 KASSERT(m->m_data == m->m_ext.ext_buf, (msg, __func__)); 1881 adjust = m->m_ext.ext_size - len; 1882 } else if (m->m_flags & M_PKTHDR) { 1883 KASSERT(m->m_data == m->m_pktdat, (msg, __func__)); 1884 adjust = MHLEN - len; 1885 } else { 1886 KASSERT(m->m_data == m->m_dat, (msg, __func__)); 1887 adjust = MLEN - len; 1888 } 1889 1890 m->m_data += adjust &~ (sizeof(long)-1); 1891} 1892 1893/* 1894 * Create a writable copy of the mbuf chain. While doing this 1895 * we compact the chain with a goal of producing a chain with 1896 * at most two mbufs. The second mbuf in this chain is likely 1897 * to be a cluster. The primary purpose of this work is to create 1898 * a writable packet for encryption, compression, etc. The 1899 * secondary goal is to linearize the data so the data can be 1900 * passed to crypto hardware in the most efficient manner possible. 1901 */ 1902struct mbuf * 1903m_unshare(struct mbuf *m0, int how) 1904{ 1905 struct mbuf *m, *mprev; 1906 struct mbuf *n, *mfirst, *mlast; 1907 int len, off; 1908 1909 mprev = NULL; 1910 for (m = m0; m != NULL; m = mprev->m_next) { 1911 /* 1912 * Regular mbufs are ignored unless there's a cluster 1913 * in front of it that we can use to coalesce. We do 1914 * the latter mainly so later clusters can be coalesced 1915 * also w/o having to handle them specially (i.e. convert 1916 * mbuf+cluster -> cluster). This optimization is heavily 1917 * influenced by the assumption that we're running over 1918 * Ethernet where MCLBYTES is large enough that the max 1919 * packet size will permit lots of coalescing into a 1920 * single cluster. This in turn permits efficient 1921 * crypto operations, especially when using hardware. 1922 */ 1923 if ((m->m_flags & M_EXT) == 0) { 1924 if (mprev && (mprev->m_flags & M_EXT) && 1925 m->m_len <= M_TRAILINGSPACE(mprev)) { 1926 /* XXX: this ignores mbuf types */ 1927 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 1928 mtod(m, caddr_t), m->m_len); 1929 mprev->m_len += m->m_len; 1930 mprev->m_next = m->m_next; /* unlink from chain */ 1931 m_free(m); /* reclaim mbuf */ 1932#if 0 1933 newipsecstat.ips_mbcoalesced++; 1934#endif 1935 } else { 1936 mprev = m; 1937 } 1938 continue; 1939 } 1940 /* 1941 * Writable mbufs are left alone (for now). 1942 */ 1943 if (M_WRITABLE(m)) { 1944 mprev = m; 1945 continue; 1946 } 1947 1948 /* 1949 * Not writable, replace with a copy or coalesce with 1950 * the previous mbuf if possible (since we have to copy 1951 * it anyway, we try to reduce the number of mbufs and 1952 * clusters so that future work is easier). 1953 */ 1954 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags)); 1955 /* NB: we only coalesce into a cluster or larger */ 1956 if (mprev != NULL && (mprev->m_flags & M_EXT) && 1957 m->m_len <= M_TRAILINGSPACE(mprev)) { 1958 /* XXX: this ignores mbuf types */ 1959 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 1960 mtod(m, caddr_t), m->m_len); 1961 mprev->m_len += m->m_len; 1962 mprev->m_next = m->m_next; /* unlink from chain */ 1963 m_free(m); /* reclaim mbuf */ 1964#if 0 1965 newipsecstat.ips_clcoalesced++; 1966#endif 1967 continue; 1968 } 1969 1970 /* 1971 * Allocate new space to hold the copy and copy the data. 1972 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by 1973 * splitting them into clusters. We could just malloc a 1974 * buffer and make it external but too many device drivers 1975 * don't know how to break up the non-contiguous memory when 1976 * doing DMA. 1977 */ 1978 n = m_getcl(how, m->m_type, m->m_flags); 1979 if (n == NULL) { 1980 m_freem(m0); 1981 return (NULL); 1982 } 1983 len = m->m_len; 1984 off = 0; 1985 mfirst = n; 1986 mlast = NULL; 1987 for (;;) { 1988 int cc = min(len, MCLBYTES); 1989 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); 1990 n->m_len = cc; 1991 if (mlast != NULL) 1992 mlast->m_next = n; 1993 mlast = n; 1994#if 0 1995 newipsecstat.ips_clcopied++; 1996#endif 1997 1998 len -= cc; 1999 if (len <= 0) 2000 break; 2001 off += cc; 2002 2003 n = m_getcl(how, m->m_type, m->m_flags); 2004 if (n == NULL) { 2005 m_freem(mfirst); 2006 m_freem(m0); 2007 return (NULL); 2008 } 2009 } 2010 n->m_next = m->m_next; 2011 if (mprev == NULL) 2012 m0 = mfirst; /* new head of chain */ 2013 else 2014 mprev->m_next = mfirst; /* replace old mbuf */ 2015 m_free(m); /* release old mbuf */ 2016 mprev = mfirst; 2017 } 2018 return (m0); 2019} 2020 2021#ifdef MBUF_PROFILING 2022 2023#define MP_BUCKETS 32 /* don't just change this as things may overflow.*/ 2024struct mbufprofile { 2025 uintmax_t wasted[MP_BUCKETS]; 2026 uintmax_t used[MP_BUCKETS]; 2027 uintmax_t segments[MP_BUCKETS]; 2028} mbprof; 2029 2030#define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */ 2031#define MP_NUMLINES 6 2032#define MP_NUMSPERLINE 16 2033#define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */ 2034/* work out max space needed and add a bit of spare space too */ 2035#define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE) 2036#define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES) 2037 2038char mbprofbuf[MP_BUFSIZE]; 2039 2040void 2041m_profile(struct mbuf *m) 2042{ 2043 int segments = 0; 2044 int used = 0; 2045 int wasted = 0; 2046 2047 while (m) { 2048 segments++; 2049 used += m->m_len; 2050 if (m->m_flags & M_EXT) { 2051 wasted += MHLEN - sizeof(m->m_ext) + 2052 m->m_ext.ext_size - m->m_len; 2053 } else { 2054 if (m->m_flags & M_PKTHDR) 2055 wasted += MHLEN - m->m_len; 2056 else 2057 wasted += MLEN - m->m_len; 2058 } 2059 m = m->m_next; 2060 } 2061 /* be paranoid.. it helps */ 2062 if (segments > MP_BUCKETS - 1) 2063 segments = MP_BUCKETS - 1; 2064 if (used > 100000) 2065 used = 100000; 2066 if (wasted > 100000) 2067 wasted = 100000; 2068 /* store in the appropriate bucket */ 2069 /* don't bother locking. if it's slightly off, so what? */ 2070 mbprof.segments[segments]++; 2071 mbprof.used[fls(used)]++; 2072 mbprof.wasted[fls(wasted)]++; 2073} 2074 2075static void 2076mbprof_textify(void) 2077{ 2078 int offset; 2079 char *c; 2080 uint64_t *p; 2081 2082 2083 p = &mbprof.wasted[0]; 2084 c = mbprofbuf; 2085 offset = snprintf(c, MP_MAXLINE + 10, 2086 "wasted:\n" 2087 "%ju %ju %ju %ju %ju %ju %ju %ju " 2088 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2089 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2090 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2091#ifdef BIG_ARRAY 2092 p = &mbprof.wasted[16]; 2093 c += offset; 2094 offset = snprintf(c, MP_MAXLINE, 2095 "%ju %ju %ju %ju %ju %ju %ju %ju " 2096 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2097 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2098 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2099#endif 2100 p = &mbprof.used[0]; 2101 c += offset; 2102 offset = snprintf(c, MP_MAXLINE + 10, 2103 "used:\n" 2104 "%ju %ju %ju %ju %ju %ju %ju %ju " 2105 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2106 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2107 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2108#ifdef BIG_ARRAY 2109 p = &mbprof.used[16]; 2110 c += offset; 2111 offset = snprintf(c, MP_MAXLINE, 2112 "%ju %ju %ju %ju %ju %ju %ju %ju " 2113 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2114 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2115 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2116#endif 2117 p = &mbprof.segments[0]; 2118 c += offset; 2119 offset = snprintf(c, MP_MAXLINE + 10, 2120 "segments:\n" 2121 "%ju %ju %ju %ju %ju %ju %ju %ju " 2122 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2123 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2124 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2125#ifdef BIG_ARRAY 2126 p = &mbprof.segments[16]; 2127 c += offset; 2128 offset = snprintf(c, MP_MAXLINE, 2129 "%ju %ju %ju %ju %ju %ju %ju %ju " 2130 "%ju %ju %ju %ju %ju %ju %ju %jju", 2131 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2132 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2133#endif 2134} 2135 2136static int 2137mbprof_handler(SYSCTL_HANDLER_ARGS) 2138{ 2139 int error; 2140 2141 mbprof_textify(); 2142 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1); 2143 return (error); 2144} 2145 2146static int 2147mbprof_clr_handler(SYSCTL_HANDLER_ARGS) 2148{ 2149 int clear, error; 2150 2151 clear = 0; 2152 error = sysctl_handle_int(oidp, &clear, 0, req); 2153 if (error || !req->newptr) 2154 return (error); 2155 2156 if (clear) { 2157 bzero(&mbprof, sizeof(mbprof)); 2158 } 2159 2160 return (error); 2161} 2162 2163 2164SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD, 2165 NULL, 0, mbprof_handler, "A", "mbuf profiling statistics"); 2166 2167SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW, 2168 NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics"); 2169#endif 2170 2171