35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/lock.h> 42#include <sys/malloc.h> 43#include <sys/mbuf.h> 44#include <sys/sysctl.h> 45#include <sys/domain.h> 46#include <sys/protosw.h> 47 48int max_linkhdr; 49int max_protohdr; 50int max_hdr; 51int max_datalen; 52 53/* 54 * sysctl(8) exported objects 55 */ 56SYSCTL_DECL(_kern_ipc); 57SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 58 &max_linkhdr, 0, ""); 59SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 60 &max_protohdr, 0, ""); 61SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 62SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 63 &max_datalen, 0, ""); 64 65/* 66 * struct mbuf * 67 * m_getm(m, len, how, type) 68 * 69 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits 70 * best) and return a pointer to the top of the allocated chain. If m is 71 * non-null, then we assume that it is a single mbuf or an mbuf chain to 72 * which we want len bytes worth of mbufs and/or clusters attached, and so 73 * if we succeed in allocating it, we will just return a pointer to m. 74 * 75 * If we happen to fail at any point during the allocation, we will free 76 * up everything we have already allocated and return NULL. 77 * 78 */ 79struct mbuf * 80m_getm(struct mbuf *m, int len, int how, int type) 81{ 82 struct mbuf *top, *tail, *mp, *mtail = NULL; 83 84 KASSERT(len >= 0, ("len is < 0 in m_getm")); 85 86 MGET(mp, how, type); 87 if (mp == NULL) 88 return (NULL); 89 else if (len > MINCLSIZE) { 90 MCLGET(mp, how); 91 if ((mp->m_flags & M_EXT) == 0) { 92 m_free(mp); 93 return (NULL); 94 } 95 } 96 mp->m_len = 0; 97 len -= M_TRAILINGSPACE(mp); 98 99 if (m != NULL) 100 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next); 101 else 102 m = mp; 103 104 top = tail = mp; 105 while (len > 0) { 106 MGET(mp, how, type); 107 if (mp == NULL) 108 goto failed; 109 110 tail->m_next = mp; 111 tail = mp; 112 if (len > MINCLSIZE) { 113 MCLGET(mp, how); 114 if ((mp->m_flags & M_EXT) == 0) 115 goto failed; 116 } 117 118 mp->m_len = 0; 119 len -= M_TRAILINGSPACE(mp); 120 } 121 122 if (mtail != NULL) 123 mtail->m_next = top; 124 return (m); 125 126failed: 127 m_freem(top); 128 return (NULL); 129} 130 131void 132m_freem(struct mbuf *m) 133{
| 35 */ 36 37#include "opt_param.h" 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/lock.h> 42#include <sys/malloc.h> 43#include <sys/mbuf.h> 44#include <sys/sysctl.h> 45#include <sys/domain.h> 46#include <sys/protosw.h> 47 48int max_linkhdr; 49int max_protohdr; 50int max_hdr; 51int max_datalen; 52 53/* 54 * sysctl(8) exported objects 55 */ 56SYSCTL_DECL(_kern_ipc); 57SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 58 &max_linkhdr, 0, ""); 59SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 60 &max_protohdr, 0, ""); 61SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 62SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 63 &max_datalen, 0, ""); 64 65/* 66 * struct mbuf * 67 * m_getm(m, len, how, type) 68 * 69 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits 70 * best) and return a pointer to the top of the allocated chain. If m is 71 * non-null, then we assume that it is a single mbuf or an mbuf chain to 72 * which we want len bytes worth of mbufs and/or clusters attached, and so 73 * if we succeed in allocating it, we will just return a pointer to m. 74 * 75 * If we happen to fail at any point during the allocation, we will free 76 * up everything we have already allocated and return NULL. 77 * 78 */ 79struct mbuf * 80m_getm(struct mbuf *m, int len, int how, int type) 81{ 82 struct mbuf *top, *tail, *mp, *mtail = NULL; 83 84 KASSERT(len >= 0, ("len is < 0 in m_getm")); 85 86 MGET(mp, how, type); 87 if (mp == NULL) 88 return (NULL); 89 else if (len > MINCLSIZE) { 90 MCLGET(mp, how); 91 if ((mp->m_flags & M_EXT) == 0) { 92 m_free(mp); 93 return (NULL); 94 } 95 } 96 mp->m_len = 0; 97 len -= M_TRAILINGSPACE(mp); 98 99 if (m != NULL) 100 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next); 101 else 102 m = mp; 103 104 top = tail = mp; 105 while (len > 0) { 106 MGET(mp, how, type); 107 if (mp == NULL) 108 goto failed; 109 110 tail->m_next = mp; 111 tail = mp; 112 if (len > MINCLSIZE) { 113 MCLGET(mp, how); 114 if ((mp->m_flags & M_EXT) == 0) 115 goto failed; 116 } 117 118 mp->m_len = 0; 119 len -= M_TRAILINGSPACE(mp); 120 } 121 122 if (mtail != NULL) 123 mtail->m_next = top; 124 return (m); 125 126failed: 127 m_freem(top); 128 return (NULL); 129} 130 131void 132m_freem(struct mbuf *m) 133{
|
142} 143 144/* 145 * Lesser-used path for M_PREPEND: 146 * allocate new mbuf to prepend to chain, 147 * copy junk along. 148 */ 149struct mbuf * 150m_prepend(struct mbuf *m, int len, int how) 151{ 152 struct mbuf *mn; 153 154 MGET(mn, how, m->m_type); 155 if (mn == NULL) { 156 m_freem(m); 157 return (NULL); 158 } 159 if (m->m_flags & M_PKTHDR) { 160 M_COPY_PKTHDR(mn, m); 161 m->m_flags &= ~M_PKTHDR; 162 } 163 mn->m_next = m; 164 m = mn; 165 if (len < MHLEN) 166 MH_ALIGN(m, len); 167 m->m_len = len; 168 return (m); 169} 170 171/* 172 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 173 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 174 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 175 * Note that the copy is read-only, because clusters are not copied, 176 * only their reference counts are incremented. 177 */ 178struct mbuf * 179m_copym(struct mbuf *m, int off0, int len, int wait) 180{ 181 struct mbuf *n, **np; 182 int off = off0; 183 struct mbuf *top; 184 int copyhdr = 0; 185 186 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 187 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 188 if (off == 0 && m->m_flags & M_PKTHDR) 189 copyhdr = 1; 190 while (off > 0) { 191 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 192 if (off < m->m_len) 193 break; 194 off -= m->m_len; 195 m = m->m_next; 196 } 197 np = ⊤ 198 top = 0; 199 while (len > 0) { 200 if (m == NULL) { 201 KASSERT(len == M_COPYALL, 202 ("m_copym, length > size of mbuf chain")); 203 break; 204 } 205 MGET(n, wait, m->m_type); 206 *np = n; 207 if (n == NULL) 208 goto nospace; 209 if (copyhdr) { 210 M_COPY_PKTHDR(n, m); 211 if (len == M_COPYALL) 212 n->m_pkthdr.len -= off0; 213 else 214 n->m_pkthdr.len = len; 215 copyhdr = 0; 216 } 217 n->m_len = min(len, m->m_len - off); 218 if (m->m_flags & M_EXT) { 219 n->m_data = m->m_data + off; 220 n->m_ext = m->m_ext; 221 n->m_flags |= M_EXT; 222 MEXT_ADD_REF(m); 223 } else 224 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 225 (unsigned)n->m_len); 226 if (len != M_COPYALL) 227 len -= n->m_len; 228 off = 0; 229 m = m->m_next; 230 np = &n->m_next; 231 } 232 if (top == NULL) 233 mbstat.m_mcfail++; /* XXX: No consistency. */ 234 235 return (top); 236nospace: 237 m_freem(top); 238 mbstat.m_mcfail++; /* XXX: No consistency. */ 239 return (NULL); 240} 241 242/* 243 * Copy an entire packet, including header (which must be present). 244 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 245 * Note that the copy is read-only, because clusters are not copied, 246 * only their reference counts are incremented. 247 * Preserve alignment of the first mbuf so if the creator has left 248 * some room at the beginning (e.g. for inserting protocol headers) 249 * the copies still have the room available. 250 */ 251struct mbuf * 252m_copypacket(struct mbuf *m, int how) 253{ 254 struct mbuf *top, *n, *o; 255 256 MGET(n, how, m->m_type); 257 top = n; 258 if (n == NULL) 259 goto nospace; 260 261 M_COPY_PKTHDR(n, m); 262 n->m_len = m->m_len; 263 if (m->m_flags & M_EXT) { 264 n->m_data = m->m_data; 265 n->m_ext = m->m_ext; 266 n->m_flags |= M_EXT; 267 MEXT_ADD_REF(m); 268 } else { 269 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 270 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 271 } 272 273 m = m->m_next; 274 while (m) { 275 MGET(o, how, m->m_type); 276 if (o == NULL) 277 goto nospace; 278 279 n->m_next = o; 280 n = n->m_next; 281 282 n->m_len = m->m_len; 283 if (m->m_flags & M_EXT) { 284 n->m_data = m->m_data; 285 n->m_ext = m->m_ext; 286 n->m_flags |= M_EXT; 287 MEXT_ADD_REF(m); 288 } else { 289 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 290 } 291 292 m = m->m_next; 293 } 294 return top; 295nospace: 296 m_freem(top); 297 mbstat.m_mcfail++; /* XXX: No consistency. */ 298 return (NULL); 299} 300 301/* 302 * Copy data from an mbuf chain starting "off" bytes from the beginning, 303 * continuing for "len" bytes, into the indicated buffer. 304 */ 305void 306m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 307{ 308 unsigned count; 309 310 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 311 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 312 while (off > 0) { 313 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 314 if (off < m->m_len) 315 break; 316 off -= m->m_len; 317 m = m->m_next; 318 } 319 while (len > 0) { 320 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 321 count = min(m->m_len - off, len); 322 bcopy(mtod(m, caddr_t) + off, cp, count); 323 len -= count; 324 cp += count; 325 off = 0; 326 m = m->m_next; 327 } 328} 329 330/* 331 * Copy a packet header mbuf chain into a completely new chain, including 332 * copying any mbuf clusters. Use this instead of m_copypacket() when 333 * you need a writable copy of an mbuf chain. 334 */ 335struct mbuf * 336m_dup(struct mbuf *m, int how) 337{ 338 struct mbuf **p, *top = NULL; 339 int remain, moff, nsize; 340 341 /* Sanity check */ 342 if (m == NULL) 343 return (NULL); 344 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__)); 345 346 /* While there's more data, get a new mbuf, tack it on, and fill it */ 347 remain = m->m_pkthdr.len; 348 moff = 0; 349 p = ⊤ 350 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 351 struct mbuf *n; 352 353 /* Get the next new mbuf */ 354 MGET(n, how, m->m_type); 355 if (n == NULL) 356 goto nospace; 357 if (top == NULL) { /* first one, must be PKTHDR */ 358 M_COPY_PKTHDR(n, m); 359 nsize = MHLEN; 360 } else /* not the first one */ 361 nsize = MLEN; 362 if (remain >= MINCLSIZE) { 363 MCLGET(n, how); 364 if ((n->m_flags & M_EXT) == 0) { 365 (void)m_free(n); 366 goto nospace; 367 } 368 nsize = MCLBYTES; 369 } 370 n->m_len = 0; 371 372 /* Link it into the new chain */ 373 *p = n; 374 p = &n->m_next; 375 376 /* Copy data from original mbuf(s) into new mbuf */ 377 while (n->m_len < nsize && m != NULL) { 378 int chunk = min(nsize - n->m_len, m->m_len - moff); 379 380 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 381 moff += chunk; 382 n->m_len += chunk; 383 remain -= chunk; 384 if (moff == m->m_len) { 385 m = m->m_next; 386 moff = 0; 387 } 388 } 389 390 /* Check correct total mbuf length */ 391 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 392 ("%s: bogus m_pkthdr.len", __func__)); 393 } 394 return (top); 395 396nospace: 397 m_freem(top); 398 mbstat.m_mcfail++; /* XXX: No consistency. */ 399 return (NULL); 400} 401 402/* 403 * Concatenate mbuf chain n to m. 404 * Both chains must be of the same type (e.g. MT_DATA). 405 * Any m_pkthdr is not updated. 406 */ 407void 408m_cat(struct mbuf *m, struct mbuf *n) 409{ 410 while (m->m_next) 411 m = m->m_next; 412 while (n) { 413 if (m->m_flags & M_EXT || 414 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 415 /* just join the two chains */ 416 m->m_next = n; 417 return; 418 } 419 /* splat the data from one into the other */ 420 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 421 (u_int)n->m_len); 422 m->m_len += n->m_len; 423 n = m_free(n); 424 } 425} 426 427void 428m_adj(struct mbuf *mp, int req_len) 429{ 430 int len = req_len; 431 struct mbuf *m; 432 int count; 433 434 if ((m = mp) == NULL) 435 return; 436 if (len >= 0) { 437 /* 438 * Trim from head. 439 */ 440 while (m != NULL && len > 0) { 441 if (m->m_len <= len) { 442 len -= m->m_len; 443 m->m_len = 0; 444 m = m->m_next; 445 } else { 446 m->m_len -= len; 447 m->m_data += len; 448 len = 0; 449 } 450 } 451 m = mp; 452 if (mp->m_flags & M_PKTHDR) 453 m->m_pkthdr.len -= (req_len - len); 454 } else { 455 /* 456 * Trim from tail. Scan the mbuf chain, 457 * calculating its length and finding the last mbuf. 458 * If the adjustment only affects this mbuf, then just 459 * adjust and return. Otherwise, rescan and truncate 460 * after the remaining size. 461 */ 462 len = -len; 463 count = 0; 464 for (;;) { 465 count += m->m_len; 466 if (m->m_next == (struct mbuf *)0) 467 break; 468 m = m->m_next; 469 } 470 if (m->m_len >= len) { 471 m->m_len -= len; 472 if (mp->m_flags & M_PKTHDR) 473 mp->m_pkthdr.len -= len; 474 return; 475 } 476 count -= len; 477 if (count < 0) 478 count = 0; 479 /* 480 * Correct length for chain is "count". 481 * Find the mbuf with last data, adjust its length, 482 * and toss data from remaining mbufs on chain. 483 */ 484 m = mp; 485 if (m->m_flags & M_PKTHDR) 486 m->m_pkthdr.len = count; 487 for (; m; m = m->m_next) { 488 if (m->m_len >= count) { 489 m->m_len = count; 490 break; 491 } 492 count -= m->m_len; 493 } 494 while (m->m_next) 495 (m = m->m_next) ->m_len = 0; 496 } 497} 498 499/* 500 * Rearange an mbuf chain so that len bytes are contiguous 501 * and in the data area of an mbuf (so that mtod and dtom 502 * will work for a structure of size len). Returns the resulting 503 * mbuf chain on success, frees it and returns null on failure. 504 * If there is room, it will add up to max_protohdr-len extra bytes to the 505 * contiguous region in an attempt to avoid being called next time. 506 */ 507struct mbuf * 508m_pullup(struct mbuf *n, int len) 509{ 510 struct mbuf *m; 511 int count; 512 int space; 513 514 /* 515 * If first mbuf has no cluster, and has room for len bytes 516 * without shifting current data, pullup into it, 517 * otherwise allocate a new mbuf to prepend to the chain. 518 */ 519 if ((n->m_flags & M_EXT) == 0 && 520 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 521 if (n->m_len >= len) 522 return (n); 523 m = n; 524 n = n->m_next; 525 len -= m->m_len; 526 } else { 527 if (len > MHLEN) 528 goto bad; 529 MGET(m, M_DONTWAIT, n->m_type); 530 if (m == NULL) 531 goto bad; 532 m->m_len = 0; 533 if (n->m_flags & M_PKTHDR) { 534 M_COPY_PKTHDR(m, n); 535 n->m_flags &= ~M_PKTHDR; 536 } 537 } 538 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 539 do { 540 count = min(min(max(len, max_protohdr), space), n->m_len); 541 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 542 (unsigned)count); 543 len -= count; 544 m->m_len += count; 545 n->m_len -= count; 546 space -= count; 547 if (n->m_len) 548 n->m_data += count; 549 else 550 n = m_free(n); 551 } while (len > 0 && n); 552 if (len > 0) { 553 (void) m_free(m); 554 goto bad; 555 } 556 m->m_next = n; 557 return (m); 558bad: 559 m_freem(n); 560 mbstat.m_mpfail++; /* XXX: No consistency. */ 561 return (NULL); 562} 563 564/* 565 * Partition an mbuf chain in two pieces, returning the tail -- 566 * all but the first len0 bytes. In case of failure, it returns NULL and 567 * attempts to restore the chain to its original state. 568 */ 569struct mbuf * 570m_split(struct mbuf *m0, int len0, int wait) 571{ 572 struct mbuf *m, *n; 573 unsigned len = len0, remain; 574 575 for (m = m0; m && len > m->m_len; m = m->m_next) 576 len -= m->m_len; 577 if (m == NULL) 578 return (NULL); 579 remain = m->m_len - len; 580 if (m0->m_flags & M_PKTHDR) { 581 MGETHDR(n, wait, m0->m_type); 582 if (n == NULL) 583 return (NULL); 584 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 585 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 586 m0->m_pkthdr.len = len0; 587 if (m->m_flags & M_EXT) 588 goto extpacket; 589 if (remain > MHLEN) { 590 /* m can't be the lead packet */ 591 MH_ALIGN(n, 0); 592 n->m_next = m_split(m, len, wait); 593 if (n->m_next == NULL) { 594 (void) m_free(n); 595 return (NULL); 596 } else 597 return (n); 598 } else 599 MH_ALIGN(n, remain); 600 } else if (remain == 0) { 601 n = m->m_next; 602 m->m_next = NULL; 603 return (n); 604 } else { 605 MGET(n, wait, m->m_type); 606 if (n == NULL) 607 return (NULL); 608 M_ALIGN(n, remain); 609 } 610extpacket: 611 if (m->m_flags & M_EXT) { 612 n->m_flags |= M_EXT; 613 n->m_ext = m->m_ext; 614 MEXT_ADD_REF(m); 615 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 616 n->m_data = m->m_data + len; 617 } else { 618 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 619 } 620 n->m_len = remain; 621 m->m_len = len; 622 n->m_next = m->m_next; 623 m->m_next = NULL; 624 return (n); 625} 626/* 627 * Routine to copy from device local memory into mbufs. 628 * Note that `off' argument is offset into first mbuf of target chain from 629 * which to begin copying the data to. 630 */ 631struct mbuf * 632m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 633 void (*copy)(char *from, caddr_t to, u_int len)) 634{ 635 struct mbuf *m; 636 struct mbuf *top = 0, **mp = ⊤ 637 int len; 638 639 if (off < 0 || off > MHLEN) 640 return (NULL); 641 642 MGETHDR(m, M_DONTWAIT, MT_DATA); 643 if (m == NULL) 644 return (NULL); 645 m->m_pkthdr.rcvif = ifp; 646 m->m_pkthdr.len = totlen; 647 len = MHLEN; 648 649 while (totlen > 0) { 650 if (top) { 651 MGET(m, M_DONTWAIT, MT_DATA); 652 if (m == NULL) { 653 m_freem(top); 654 return (NULL); 655 } 656 len = MLEN; 657 } 658 if (totlen + off >= MINCLSIZE) { 659 MCLGET(m, M_DONTWAIT); 660 if (m->m_flags & M_EXT) 661 len = MCLBYTES; 662 } else { 663 /* 664 * Place initial small packet/header at end of mbuf. 665 */ 666 if (top == NULL && totlen + off + max_linkhdr <= len) { 667 m->m_data += max_linkhdr; 668 len -= max_linkhdr; 669 } 670 } 671 if (off) { 672 m->m_data += off; 673 len -= off; 674 off = 0; 675 } 676 m->m_len = len = min(totlen, len); 677 if (copy) 678 copy(buf, mtod(m, caddr_t), (unsigned)len); 679 else 680 bcopy(buf, mtod(m, caddr_t), (unsigned)len); 681 buf += len; 682 *mp = m; 683 mp = &m->m_next; 684 totlen -= len; 685 } 686 return (top); 687} 688 689/* 690 * Copy data from a buffer back into the indicated mbuf chain, 691 * starting "off" bytes from the beginning, extending the mbuf 692 * chain if necessary. 693 */ 694void 695m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 696{ 697 int mlen; 698 struct mbuf *m = m0, *n; 699 int totlen = 0; 700 701 if (m0 == NULL) 702 return; 703 while (off > (mlen = m->m_len)) { 704 off -= mlen; 705 totlen += mlen; 706 if (m->m_next == NULL) { 707 n = m_get_clrd(M_DONTWAIT, m->m_type); 708 if (n == NULL) 709 goto out; 710 n->m_len = min(MLEN, len + off); 711 m->m_next = n; 712 } 713 m = m->m_next; 714 } 715 while (len > 0) { 716 mlen = min (m->m_len - off, len); 717 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 718 cp += mlen; 719 len -= mlen; 720 mlen += off; 721 off = 0; 722 totlen += mlen; 723 if (len == 0) 724 break; 725 if (m->m_next == NULL) { 726 n = m_get(M_DONTWAIT, m->m_type); 727 if (n == NULL) 728 break; 729 n->m_len = min(MLEN, len); 730 m->m_next = n; 731 } 732 m = m->m_next; 733 } 734out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 735 m->m_pkthdr.len = totlen; 736} 737 738void 739m_print(const struct mbuf *m) 740{ 741 int len; 742 const struct mbuf *m2; 743 744 len = m->m_pkthdr.len; 745 m2 = m; 746 while (len) { 747 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 748 len -= m2->m_len; 749 m2 = m2->m_next; 750 } 751 return; 752}
| 137} 138 139/* 140 * Lesser-used path for M_PREPEND: 141 * allocate new mbuf to prepend to chain, 142 * copy junk along. 143 */ 144struct mbuf * 145m_prepend(struct mbuf *m, int len, int how) 146{ 147 struct mbuf *mn; 148 149 MGET(mn, how, m->m_type); 150 if (mn == NULL) { 151 m_freem(m); 152 return (NULL); 153 } 154 if (m->m_flags & M_PKTHDR) { 155 M_COPY_PKTHDR(mn, m); 156 m->m_flags &= ~M_PKTHDR; 157 } 158 mn->m_next = m; 159 m = mn; 160 if (len < MHLEN) 161 MH_ALIGN(m, len); 162 m->m_len = len; 163 return (m); 164} 165 166/* 167 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 168 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 169 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 170 * Note that the copy is read-only, because clusters are not copied, 171 * only their reference counts are incremented. 172 */ 173struct mbuf * 174m_copym(struct mbuf *m, int off0, int len, int wait) 175{ 176 struct mbuf *n, **np; 177 int off = off0; 178 struct mbuf *top; 179 int copyhdr = 0; 180 181 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 182 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 183 if (off == 0 && m->m_flags & M_PKTHDR) 184 copyhdr = 1; 185 while (off > 0) { 186 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 187 if (off < m->m_len) 188 break; 189 off -= m->m_len; 190 m = m->m_next; 191 } 192 np = ⊤ 193 top = 0; 194 while (len > 0) { 195 if (m == NULL) { 196 KASSERT(len == M_COPYALL, 197 ("m_copym, length > size of mbuf chain")); 198 break; 199 } 200 MGET(n, wait, m->m_type); 201 *np = n; 202 if (n == NULL) 203 goto nospace; 204 if (copyhdr) { 205 M_COPY_PKTHDR(n, m); 206 if (len == M_COPYALL) 207 n->m_pkthdr.len -= off0; 208 else 209 n->m_pkthdr.len = len; 210 copyhdr = 0; 211 } 212 n->m_len = min(len, m->m_len - off); 213 if (m->m_flags & M_EXT) { 214 n->m_data = m->m_data + off; 215 n->m_ext = m->m_ext; 216 n->m_flags |= M_EXT; 217 MEXT_ADD_REF(m); 218 } else 219 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 220 (unsigned)n->m_len); 221 if (len != M_COPYALL) 222 len -= n->m_len; 223 off = 0; 224 m = m->m_next; 225 np = &n->m_next; 226 } 227 if (top == NULL) 228 mbstat.m_mcfail++; /* XXX: No consistency. */ 229 230 return (top); 231nospace: 232 m_freem(top); 233 mbstat.m_mcfail++; /* XXX: No consistency. */ 234 return (NULL); 235} 236 237/* 238 * Copy an entire packet, including header (which must be present). 239 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 240 * Note that the copy is read-only, because clusters are not copied, 241 * only their reference counts are incremented. 242 * Preserve alignment of the first mbuf so if the creator has left 243 * some room at the beginning (e.g. for inserting protocol headers) 244 * the copies still have the room available. 245 */ 246struct mbuf * 247m_copypacket(struct mbuf *m, int how) 248{ 249 struct mbuf *top, *n, *o; 250 251 MGET(n, how, m->m_type); 252 top = n; 253 if (n == NULL) 254 goto nospace; 255 256 M_COPY_PKTHDR(n, m); 257 n->m_len = m->m_len; 258 if (m->m_flags & M_EXT) { 259 n->m_data = m->m_data; 260 n->m_ext = m->m_ext; 261 n->m_flags |= M_EXT; 262 MEXT_ADD_REF(m); 263 } else { 264 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 265 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 266 } 267 268 m = m->m_next; 269 while (m) { 270 MGET(o, how, m->m_type); 271 if (o == NULL) 272 goto nospace; 273 274 n->m_next = o; 275 n = n->m_next; 276 277 n->m_len = m->m_len; 278 if (m->m_flags & M_EXT) { 279 n->m_data = m->m_data; 280 n->m_ext = m->m_ext; 281 n->m_flags |= M_EXT; 282 MEXT_ADD_REF(m); 283 } else { 284 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 285 } 286 287 m = m->m_next; 288 } 289 return top; 290nospace: 291 m_freem(top); 292 mbstat.m_mcfail++; /* XXX: No consistency. */ 293 return (NULL); 294} 295 296/* 297 * Copy data from an mbuf chain starting "off" bytes from the beginning, 298 * continuing for "len" bytes, into the indicated buffer. 299 */ 300void 301m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 302{ 303 unsigned count; 304 305 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 306 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 307 while (off > 0) { 308 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 309 if (off < m->m_len) 310 break; 311 off -= m->m_len; 312 m = m->m_next; 313 } 314 while (len > 0) { 315 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 316 count = min(m->m_len - off, len); 317 bcopy(mtod(m, caddr_t) + off, cp, count); 318 len -= count; 319 cp += count; 320 off = 0; 321 m = m->m_next; 322 } 323} 324 325/* 326 * Copy a packet header mbuf chain into a completely new chain, including 327 * copying any mbuf clusters. Use this instead of m_copypacket() when 328 * you need a writable copy of an mbuf chain. 329 */ 330struct mbuf * 331m_dup(struct mbuf *m, int how) 332{ 333 struct mbuf **p, *top = NULL; 334 int remain, moff, nsize; 335 336 /* Sanity check */ 337 if (m == NULL) 338 return (NULL); 339 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__)); 340 341 /* While there's more data, get a new mbuf, tack it on, and fill it */ 342 remain = m->m_pkthdr.len; 343 moff = 0; 344 p = ⊤ 345 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 346 struct mbuf *n; 347 348 /* Get the next new mbuf */ 349 MGET(n, how, m->m_type); 350 if (n == NULL) 351 goto nospace; 352 if (top == NULL) { /* first one, must be PKTHDR */ 353 M_COPY_PKTHDR(n, m); 354 nsize = MHLEN; 355 } else /* not the first one */ 356 nsize = MLEN; 357 if (remain >= MINCLSIZE) { 358 MCLGET(n, how); 359 if ((n->m_flags & M_EXT) == 0) { 360 (void)m_free(n); 361 goto nospace; 362 } 363 nsize = MCLBYTES; 364 } 365 n->m_len = 0; 366 367 /* Link it into the new chain */ 368 *p = n; 369 p = &n->m_next; 370 371 /* Copy data from original mbuf(s) into new mbuf */ 372 while (n->m_len < nsize && m != NULL) { 373 int chunk = min(nsize - n->m_len, m->m_len - moff); 374 375 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 376 moff += chunk; 377 n->m_len += chunk; 378 remain -= chunk; 379 if (moff == m->m_len) { 380 m = m->m_next; 381 moff = 0; 382 } 383 } 384 385 /* Check correct total mbuf length */ 386 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 387 ("%s: bogus m_pkthdr.len", __func__)); 388 } 389 return (top); 390 391nospace: 392 m_freem(top); 393 mbstat.m_mcfail++; /* XXX: No consistency. */ 394 return (NULL); 395} 396 397/* 398 * Concatenate mbuf chain n to m. 399 * Both chains must be of the same type (e.g. MT_DATA). 400 * Any m_pkthdr is not updated. 401 */ 402void 403m_cat(struct mbuf *m, struct mbuf *n) 404{ 405 while (m->m_next) 406 m = m->m_next; 407 while (n) { 408 if (m->m_flags & M_EXT || 409 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 410 /* just join the two chains */ 411 m->m_next = n; 412 return; 413 } 414 /* splat the data from one into the other */ 415 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 416 (u_int)n->m_len); 417 m->m_len += n->m_len; 418 n = m_free(n); 419 } 420} 421 422void 423m_adj(struct mbuf *mp, int req_len) 424{ 425 int len = req_len; 426 struct mbuf *m; 427 int count; 428 429 if ((m = mp) == NULL) 430 return; 431 if (len >= 0) { 432 /* 433 * Trim from head. 434 */ 435 while (m != NULL && len > 0) { 436 if (m->m_len <= len) { 437 len -= m->m_len; 438 m->m_len = 0; 439 m = m->m_next; 440 } else { 441 m->m_len -= len; 442 m->m_data += len; 443 len = 0; 444 } 445 } 446 m = mp; 447 if (mp->m_flags & M_PKTHDR) 448 m->m_pkthdr.len -= (req_len - len); 449 } else { 450 /* 451 * Trim from tail. Scan the mbuf chain, 452 * calculating its length and finding the last mbuf. 453 * If the adjustment only affects this mbuf, then just 454 * adjust and return. Otherwise, rescan and truncate 455 * after the remaining size. 456 */ 457 len = -len; 458 count = 0; 459 for (;;) { 460 count += m->m_len; 461 if (m->m_next == (struct mbuf *)0) 462 break; 463 m = m->m_next; 464 } 465 if (m->m_len >= len) { 466 m->m_len -= len; 467 if (mp->m_flags & M_PKTHDR) 468 mp->m_pkthdr.len -= len; 469 return; 470 } 471 count -= len; 472 if (count < 0) 473 count = 0; 474 /* 475 * Correct length for chain is "count". 476 * Find the mbuf with last data, adjust its length, 477 * and toss data from remaining mbufs on chain. 478 */ 479 m = mp; 480 if (m->m_flags & M_PKTHDR) 481 m->m_pkthdr.len = count; 482 for (; m; m = m->m_next) { 483 if (m->m_len >= count) { 484 m->m_len = count; 485 break; 486 } 487 count -= m->m_len; 488 } 489 while (m->m_next) 490 (m = m->m_next) ->m_len = 0; 491 } 492} 493 494/* 495 * Rearange an mbuf chain so that len bytes are contiguous 496 * and in the data area of an mbuf (so that mtod and dtom 497 * will work for a structure of size len). Returns the resulting 498 * mbuf chain on success, frees it and returns null on failure. 499 * If there is room, it will add up to max_protohdr-len extra bytes to the 500 * contiguous region in an attempt to avoid being called next time. 501 */ 502struct mbuf * 503m_pullup(struct mbuf *n, int len) 504{ 505 struct mbuf *m; 506 int count; 507 int space; 508 509 /* 510 * If first mbuf has no cluster, and has room for len bytes 511 * without shifting current data, pullup into it, 512 * otherwise allocate a new mbuf to prepend to the chain. 513 */ 514 if ((n->m_flags & M_EXT) == 0 && 515 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 516 if (n->m_len >= len) 517 return (n); 518 m = n; 519 n = n->m_next; 520 len -= m->m_len; 521 } else { 522 if (len > MHLEN) 523 goto bad; 524 MGET(m, M_DONTWAIT, n->m_type); 525 if (m == NULL) 526 goto bad; 527 m->m_len = 0; 528 if (n->m_flags & M_PKTHDR) { 529 M_COPY_PKTHDR(m, n); 530 n->m_flags &= ~M_PKTHDR; 531 } 532 } 533 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 534 do { 535 count = min(min(max(len, max_protohdr), space), n->m_len); 536 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 537 (unsigned)count); 538 len -= count; 539 m->m_len += count; 540 n->m_len -= count; 541 space -= count; 542 if (n->m_len) 543 n->m_data += count; 544 else 545 n = m_free(n); 546 } while (len > 0 && n); 547 if (len > 0) { 548 (void) m_free(m); 549 goto bad; 550 } 551 m->m_next = n; 552 return (m); 553bad: 554 m_freem(n); 555 mbstat.m_mpfail++; /* XXX: No consistency. */ 556 return (NULL); 557} 558 559/* 560 * Partition an mbuf chain in two pieces, returning the tail -- 561 * all but the first len0 bytes. In case of failure, it returns NULL and 562 * attempts to restore the chain to its original state. 563 */ 564struct mbuf * 565m_split(struct mbuf *m0, int len0, int wait) 566{ 567 struct mbuf *m, *n; 568 unsigned len = len0, remain; 569 570 for (m = m0; m && len > m->m_len; m = m->m_next) 571 len -= m->m_len; 572 if (m == NULL) 573 return (NULL); 574 remain = m->m_len - len; 575 if (m0->m_flags & M_PKTHDR) { 576 MGETHDR(n, wait, m0->m_type); 577 if (n == NULL) 578 return (NULL); 579 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 580 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 581 m0->m_pkthdr.len = len0; 582 if (m->m_flags & M_EXT) 583 goto extpacket; 584 if (remain > MHLEN) { 585 /* m can't be the lead packet */ 586 MH_ALIGN(n, 0); 587 n->m_next = m_split(m, len, wait); 588 if (n->m_next == NULL) { 589 (void) m_free(n); 590 return (NULL); 591 } else 592 return (n); 593 } else 594 MH_ALIGN(n, remain); 595 } else if (remain == 0) { 596 n = m->m_next; 597 m->m_next = NULL; 598 return (n); 599 } else { 600 MGET(n, wait, m->m_type); 601 if (n == NULL) 602 return (NULL); 603 M_ALIGN(n, remain); 604 } 605extpacket: 606 if (m->m_flags & M_EXT) { 607 n->m_flags |= M_EXT; 608 n->m_ext = m->m_ext; 609 MEXT_ADD_REF(m); 610 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 611 n->m_data = m->m_data + len; 612 } else { 613 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 614 } 615 n->m_len = remain; 616 m->m_len = len; 617 n->m_next = m->m_next; 618 m->m_next = NULL; 619 return (n); 620} 621/* 622 * Routine to copy from device local memory into mbufs. 623 * Note that `off' argument is offset into first mbuf of target chain from 624 * which to begin copying the data to. 625 */ 626struct mbuf * 627m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 628 void (*copy)(char *from, caddr_t to, u_int len)) 629{ 630 struct mbuf *m; 631 struct mbuf *top = 0, **mp = ⊤ 632 int len; 633 634 if (off < 0 || off > MHLEN) 635 return (NULL); 636 637 MGETHDR(m, M_DONTWAIT, MT_DATA); 638 if (m == NULL) 639 return (NULL); 640 m->m_pkthdr.rcvif = ifp; 641 m->m_pkthdr.len = totlen; 642 len = MHLEN; 643 644 while (totlen > 0) { 645 if (top) { 646 MGET(m, M_DONTWAIT, MT_DATA); 647 if (m == NULL) { 648 m_freem(top); 649 return (NULL); 650 } 651 len = MLEN; 652 } 653 if (totlen + off >= MINCLSIZE) { 654 MCLGET(m, M_DONTWAIT); 655 if (m->m_flags & M_EXT) 656 len = MCLBYTES; 657 } else { 658 /* 659 * Place initial small packet/header at end of mbuf. 660 */ 661 if (top == NULL && totlen + off + max_linkhdr <= len) { 662 m->m_data += max_linkhdr; 663 len -= max_linkhdr; 664 } 665 } 666 if (off) { 667 m->m_data += off; 668 len -= off; 669 off = 0; 670 } 671 m->m_len = len = min(totlen, len); 672 if (copy) 673 copy(buf, mtod(m, caddr_t), (unsigned)len); 674 else 675 bcopy(buf, mtod(m, caddr_t), (unsigned)len); 676 buf += len; 677 *mp = m; 678 mp = &m->m_next; 679 totlen -= len; 680 } 681 return (top); 682} 683 684/* 685 * Copy data from a buffer back into the indicated mbuf chain, 686 * starting "off" bytes from the beginning, extending the mbuf 687 * chain if necessary. 688 */ 689void 690m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 691{ 692 int mlen; 693 struct mbuf *m = m0, *n; 694 int totlen = 0; 695 696 if (m0 == NULL) 697 return; 698 while (off > (mlen = m->m_len)) { 699 off -= mlen; 700 totlen += mlen; 701 if (m->m_next == NULL) { 702 n = m_get_clrd(M_DONTWAIT, m->m_type); 703 if (n == NULL) 704 goto out; 705 n->m_len = min(MLEN, len + off); 706 m->m_next = n; 707 } 708 m = m->m_next; 709 } 710 while (len > 0) { 711 mlen = min (m->m_len - off, len); 712 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 713 cp += mlen; 714 len -= mlen; 715 mlen += off; 716 off = 0; 717 totlen += mlen; 718 if (len == 0) 719 break; 720 if (m->m_next == NULL) { 721 n = m_get(M_DONTWAIT, m->m_type); 722 if (n == NULL) 723 break; 724 n->m_len = min(MLEN, len); 725 m->m_next = n; 726 } 727 m = m->m_next; 728 } 729out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 730 m->m_pkthdr.len = totlen; 731} 732 733void 734m_print(const struct mbuf *m) 735{ 736 int len; 737 const struct mbuf *m2; 738 739 len = m->m_pkthdr.len; 740 m2 = m; 741 while (len) { 742 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 743 len -= m2->m_len; 744 m2 = m2->m_next; 745 } 746 return; 747}
|