124static int 125sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 126{ 127 int error, newnmbclusters; 128 129 newnmbclusters = nmbclusters; 130 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 131 if (error == 0 && req->newptr) { 132 if (newnmbclusters > nmbclusters) { 133 nmbclusters = newnmbclusters; 134 uma_zone_set_max(zone_clust, nmbclusters); 135 EVENTHANDLER_INVOKE(nmbclusters_change); 136 } else 137 error = EINVAL; 138 } 139 return (error); 140} 141SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, 142&nmbclusters, 0, sysctl_nmbclusters, "IU", 143 "Maximum number of mbuf clusters allowed"); 144 145static int 146sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) 147{ 148 int error, newnmbjumbop; 149 150 newnmbjumbop = nmbjumbop; 151 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); 152 if (error == 0 && req->newptr) { 153 if (newnmbjumbop> nmbjumbop) { 154 nmbjumbop = newnmbjumbop; 155 uma_zone_set_max(zone_jumbop, nmbjumbop); 156 } else 157 error = EINVAL; 158 } 159 return (error); 160} 161SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW, 162&nmbjumbop, 0, sysctl_nmbjumbop, "IU", 163 "Maximum number of mbuf page size jumbo clusters allowed"); 164 165 166static int 167sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) 168{ 169 int error, newnmbjumbo9; 170 171 newnmbjumbo9 = nmbjumbo9; 172 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); 173 if (error == 0 && req->newptr) { 174 if (newnmbjumbo9> nmbjumbo9) { 175 nmbjumbo9 = newnmbjumbo9; 176 uma_zone_set_max(zone_jumbo9, nmbjumbo9); 177 } else 178 error = EINVAL; 179 } 180 return (error); 181} 182SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW, 183&nmbjumbo9, 0, sysctl_nmbjumbo9, "IU", 184 "Maximum number of mbuf 9k jumbo clusters allowed"); 185 186static int 187sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) 188{ 189 int error, newnmbjumbo16; 190 191 newnmbjumbo16 = nmbjumbo16; 192 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); 193 if (error == 0 && req->newptr) { 194 if (newnmbjumbo16> nmbjumbo16) { 195 nmbjumbo16 = newnmbjumbo16; 196 uma_zone_set_max(zone_jumbo16, nmbjumbo16); 197 } else 198 error = EINVAL; 199 } 200 return (error); 201} 202SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW, 203&nmbjumbo16, 0, sysctl_nmbjumbo16, "IU", 204 "Maximum number of mbuf 16k jumbo clusters allowed"); 205 206 207 208SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat, 209 "Mbuf general information and statistics"); 210 211/* 212 * Zones from which we allocate. 213 */ 214uma_zone_t zone_mbuf; 215uma_zone_t zone_clust; 216uma_zone_t zone_pack; 217uma_zone_t zone_jumbop; 218uma_zone_t zone_jumbo9; 219uma_zone_t zone_jumbo16; 220uma_zone_t zone_ext_refcnt; 221 222/* 223 * Local prototypes. 224 */ 225static int mb_ctor_mbuf(void *, int, void *, int); 226static int mb_ctor_clust(void *, int, void *, int); 227static int mb_ctor_pack(void *, int, void *, int); 228static void mb_dtor_mbuf(void *, int, void *); 229static void mb_dtor_clust(void *, int, void *); 230static void mb_dtor_pack(void *, int, void *); 231static int mb_zinit_pack(void *, int, int); 232static void mb_zfini_pack(void *, int); 233 234static void mb_reclaim(void *); 235static void mbuf_init(void *); 236static void *mbuf_jumbo_alloc(uma_zone_t, int, u_int8_t *, int); 237static void mbuf_jumbo_free(void *, int, u_int8_t); 238 239static MALLOC_DEFINE(M_JUMBOFRAME, "jumboframes", "mbuf jumbo frame buffers"); 240 241/* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */ 242CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 243 244/* 245 * Initialize FreeBSD Network buffer allocation. 246 */ 247SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); 248static void 249mbuf_init(void *dummy) 250{ 251 252 /* 253 * Configure UMA zones for Mbufs, Clusters, and Packets. 254 */ 255 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 256 mb_ctor_mbuf, mb_dtor_mbuf, 257#ifdef INVARIANTS 258 trash_init, trash_fini, 259#else 260 NULL, NULL, 261#endif 262 MSIZE - 1, UMA_ZONE_MAXBUCKET); 263 264 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 265 mb_ctor_clust, mb_dtor_clust, 266#ifdef INVARIANTS 267 trash_init, trash_fini, 268#else 269 NULL, NULL, 270#endif 271 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 272 if (nmbclusters > 0) 273 uma_zone_set_max(zone_clust, nmbclusters); 274 275 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 276 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 277 278 /* Make jumbo frame zone too. Page size, 9k and 16k. */ 279 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 280 mb_ctor_clust, mb_dtor_clust, 281#ifdef INVARIANTS 282 trash_init, trash_fini, 283#else 284 NULL, NULL, 285#endif 286 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 287 if (nmbjumbop > 0) 288 uma_zone_set_max(zone_jumbop, nmbjumbop); 289 290 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 291 mb_ctor_clust, mb_dtor_clust, 292#ifdef INVARIANTS 293 trash_init, trash_fini, 294#else 295 NULL, NULL, 296#endif 297 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 298 if (nmbjumbo9 > 0) 299 uma_zone_set_max(zone_jumbo9, nmbjumbo9); 300 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); 301 uma_zone_set_freef(zone_jumbo9, mbuf_jumbo_free); 302 303 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 304 mb_ctor_clust, mb_dtor_clust, 305#ifdef INVARIANTS 306 trash_init, trash_fini, 307#else 308 NULL, NULL, 309#endif 310 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 311 if (nmbjumbo16 > 0) 312 uma_zone_set_max(zone_jumbo16, nmbjumbo16); 313 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); 314 uma_zone_set_freef(zone_jumbo16, mbuf_jumbo_free); 315 316 zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 317 NULL, NULL, 318 NULL, NULL, 319 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 320 321 /* uma_prealloc() goes here... */ 322 323 /* 324 * Hook event handler for low-memory situation, used to 325 * drain protocols and push data back to the caches (UMA 326 * later pushes it back to VM). 327 */ 328 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 329 EVENTHANDLER_PRI_FIRST); 330 331 /* 332 * [Re]set counters and local statistics knobs. 333 * XXX Some of these should go and be replaced, but UMA stat 334 * gathering needs to be revised. 335 */ 336 mbstat.m_mbufs = 0; 337 mbstat.m_mclusts = 0; 338 mbstat.m_drain = 0; 339 mbstat.m_msize = MSIZE; 340 mbstat.m_mclbytes = MCLBYTES; 341 mbstat.m_minclsize = MINCLSIZE; 342 mbstat.m_mlen = MLEN; 343 mbstat.m_mhlen = MHLEN; 344 mbstat.m_numtypes = MT_NTYPES; 345 346 mbstat.m_mcfail = mbstat.m_mpfail = 0; 347 mbstat.sf_iocnt = 0; 348 mbstat.sf_allocwait = mbstat.sf_allocfail = 0; 349} 350 351/* 352 * UMA backend page allocator for the jumbo frame zones. 353 * 354 * Allocates kernel virtual memory that is backed by contiguous physical 355 * pages. 356 */ 357static void * 358mbuf_jumbo_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 359{ 360 361 /* Inform UMA that this allocator uses kernel_map/object. */ 362 *flags = UMA_SLAB_KERNEL; 363 return (contigmalloc(bytes, M_JUMBOFRAME, wait, (vm_paddr_t)0, 364 ~(vm_paddr_t)0, 1, 0)); 365} 366 367/* 368 * UMA backend page deallocator for the jumbo frame zones. 369 */ 370static void 371mbuf_jumbo_free(void *mem, int size, u_int8_t flags) 372{ 373 374 contigfree(mem, size, M_JUMBOFRAME); 375} 376 377/* 378 * Constructor for Mbuf master zone. 379 * 380 * The 'arg' pointer points to a mb_args structure which 381 * contains call-specific information required to support the 382 * mbuf allocation API. See mbuf.h. 383 */ 384static int 385mb_ctor_mbuf(void *mem, int size, void *arg, int how) 386{ 387 struct mbuf *m; 388 struct mb_args *args; 389#ifdef MAC 390 int error; 391#endif 392 int flags; 393 short type; 394 395#ifdef INVARIANTS 396 trash_ctor(mem, size, arg, how); 397#endif 398 m = (struct mbuf *)mem; 399 args = (struct mb_args *)arg; 400 flags = args->flags; 401 type = args->type; 402 403 /* 404 * The mbuf is initialized later. The caller has the 405 * responsibility to set up any MAC labels too. 406 */ 407 if (type == MT_NOINIT) 408 return (0); 409 410 m->m_next = NULL; 411 m->m_nextpkt = NULL; 412 m->m_len = 0; 413 m->m_flags = flags; 414 m->m_type = type; 415 if (flags & M_PKTHDR) { 416 m->m_data = m->m_pktdat; 417 m->m_pkthdr.rcvif = NULL; 418 m->m_pkthdr.header = NULL; 419 m->m_pkthdr.len = 0; 420 m->m_pkthdr.csum_flags = 0; 421 m->m_pkthdr.csum_data = 0; 422 m->m_pkthdr.tso_segsz = 0; 423 m->m_pkthdr.ether_vtag = 0; 424 SLIST_INIT(&m->m_pkthdr.tags); 425#ifdef MAC 426 /* If the label init fails, fail the alloc */ 427 error = mac_mbuf_init(m, how); 428 if (error) 429 return (error); 430#endif 431 } else 432 m->m_data = m->m_dat; 433 return (0); 434} 435 436/* 437 * The Mbuf master zone destructor. 438 */ 439static void 440mb_dtor_mbuf(void *mem, int size, void *arg) 441{ 442 struct mbuf *m; 443 unsigned long flags; 444 445 m = (struct mbuf *)mem; 446 flags = (unsigned long)arg; 447 448 if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0) 449 m_tag_delete_chain(m, NULL); 450 KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); 451 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 452#ifdef INVARIANTS 453 trash_dtor(mem, size, arg); 454#endif 455} 456 457/* 458 * The Mbuf Packet zone destructor. 459 */ 460static void 461mb_dtor_pack(void *mem, int size, void *arg) 462{ 463 struct mbuf *m; 464 465 m = (struct mbuf *)mem; 466 if ((m->m_flags & M_PKTHDR) != 0) 467 m_tag_delete_chain(m, NULL); 468 469 /* Make sure we've got a clean cluster back. */ 470 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 471 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 472 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 473 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); 474 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); 475 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 476 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 477 KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__)); 478#ifdef INVARIANTS 479 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 480#endif 481 /* 482 * If there are processes blocked on zone_clust, waiting for pages 483 * to be freed up, * cause them to be woken up by draining the 484 * packet zone. We are exposed to a race here * (in the check for 485 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 486 * is deliberate. We don't want to acquire the zone lock for every 487 * mbuf free. 488 */ 489 if (uma_zone_exhausted_nolock(zone_clust)) 490 zone_drain(zone_pack); 491} 492 493/* 494 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 495 * 496 * Here the 'arg' pointer points to the Mbuf which we 497 * are configuring cluster storage for. If 'arg' is 498 * empty we allocate just the cluster without setting 499 * the mbuf to it. See mbuf.h. 500 */ 501static int 502mb_ctor_clust(void *mem, int size, void *arg, int how) 503{ 504 struct mbuf *m; 505 u_int *refcnt; 506 int type; 507 uma_zone_t zone; 508 509#ifdef INVARIANTS 510 trash_ctor(mem, size, arg, how); 511#endif 512 switch (size) { 513 case MCLBYTES: 514 type = EXT_CLUSTER; 515 zone = zone_clust; 516 break; 517#if MJUMPAGESIZE != MCLBYTES 518 case MJUMPAGESIZE: 519 type = EXT_JUMBOP; 520 zone = zone_jumbop; 521 break; 522#endif 523 case MJUM9BYTES: 524 type = EXT_JUMBO9; 525 zone = zone_jumbo9; 526 break; 527 case MJUM16BYTES: 528 type = EXT_JUMBO16; 529 zone = zone_jumbo16; 530 break; 531 default: 532 panic("unknown cluster size"); 533 break; 534 } 535 536 m = (struct mbuf *)arg; 537 refcnt = uma_find_refcnt(zone, mem); 538 *refcnt = 1; 539 if (m != NULL) { 540 m->m_ext.ext_buf = (caddr_t)mem; 541 m->m_data = m->m_ext.ext_buf; 542 m->m_flags |= M_EXT; 543 m->m_ext.ext_free = NULL; 544 m->m_ext.ext_arg1 = NULL; 545 m->m_ext.ext_arg2 = NULL; 546 m->m_ext.ext_size = size; 547 m->m_ext.ext_type = type; 548 m->m_ext.ref_cnt = refcnt; 549 } 550 551 return (0); 552} 553 554/* 555 * The Mbuf Cluster zone destructor. 556 */ 557static void 558mb_dtor_clust(void *mem, int size, void *arg) 559{ 560#ifdef INVARIANTS 561 uma_zone_t zone; 562 563 zone = m_getzone(size); 564 KASSERT(*(uma_find_refcnt(zone, mem)) <= 1, 565 ("%s: refcnt incorrect %u", __func__, 566 *(uma_find_refcnt(zone, mem))) ); 567 568 trash_dtor(mem, size, arg); 569#endif 570} 571 572/* 573 * The Packet secondary zone's init routine, executed on the 574 * object's transition from mbuf keg slab to zone cache. 575 */ 576static int 577mb_zinit_pack(void *mem, int size, int how) 578{ 579 struct mbuf *m; 580 581 m = (struct mbuf *)mem; /* m is virgin. */ 582 if (uma_zalloc_arg(zone_clust, m, how) == NULL || 583 m->m_ext.ext_buf == NULL) 584 return (ENOMEM); 585 m->m_ext.ext_type = EXT_PACKET; /* Override. */ 586#ifdef INVARIANTS 587 trash_init(m->m_ext.ext_buf, MCLBYTES, how); 588#endif 589 return (0); 590} 591 592/* 593 * The Packet secondary zone's fini routine, executed on the 594 * object's transition from zone cache to keg slab. 595 */ 596static void 597mb_zfini_pack(void *mem, int size) 598{ 599 struct mbuf *m; 600 601 m = (struct mbuf *)mem; 602#ifdef INVARIANTS 603 trash_fini(m->m_ext.ext_buf, MCLBYTES); 604#endif 605 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 606#ifdef INVARIANTS 607 trash_dtor(mem, size, NULL); 608#endif 609} 610 611/* 612 * The "packet" keg constructor. 613 */ 614static int 615mb_ctor_pack(void *mem, int size, void *arg, int how) 616{ 617 struct mbuf *m; 618 struct mb_args *args; 619#ifdef MAC 620 int error; 621#endif 622 int flags; 623 short type; 624 625 m = (struct mbuf *)mem; 626 args = (struct mb_args *)arg; 627 flags = args->flags; 628 type = args->type; 629 630#ifdef INVARIANTS 631 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 632#endif 633 m->m_next = NULL; 634 m->m_nextpkt = NULL; 635 m->m_data = m->m_ext.ext_buf; 636 m->m_len = 0; 637 m->m_flags = (flags | M_EXT); 638 m->m_type = type; 639 640 if (flags & M_PKTHDR) { 641 m->m_pkthdr.rcvif = NULL; 642 m->m_pkthdr.len = 0; 643 m->m_pkthdr.header = NULL; 644 m->m_pkthdr.csum_flags = 0; 645 m->m_pkthdr.csum_data = 0; 646 m->m_pkthdr.tso_segsz = 0; 647 m->m_pkthdr.ether_vtag = 0; 648 SLIST_INIT(&m->m_pkthdr.tags); 649#ifdef MAC 650 /* If the label init fails, fail the alloc */ 651 error = mac_mbuf_init(m, how); 652 if (error) 653 return (error); 654#endif 655 } 656 /* m_ext is already initialized. */ 657 658 return (0); 659} 660 661/* 662 * This is the protocol drain routine. 663 * 664 * No locks should be held when this is called. The drain routines have to 665 * presently acquire some locks which raises the possibility of lock order 666 * reversal. 667 */ 668static void 669mb_reclaim(void *junk) 670{ 671 struct domain *dp; 672 struct protosw *pr; 673 674 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, 675 "mb_reclaim()"); 676 677 for (dp = domains; dp != NULL; dp = dp->dom_next) 678 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 679 if (pr->pr_drain != NULL) 680 (*pr->pr_drain)(); 681}
| 123static int 124sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 125{ 126 int error, newnmbclusters; 127 128 newnmbclusters = nmbclusters; 129 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 130 if (error == 0 && req->newptr) { 131 if (newnmbclusters > nmbclusters) { 132 nmbclusters = newnmbclusters; 133 uma_zone_set_max(zone_clust, nmbclusters); 134 EVENTHANDLER_INVOKE(nmbclusters_change); 135 } else 136 error = EINVAL; 137 } 138 return (error); 139} 140SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, 141&nmbclusters, 0, sysctl_nmbclusters, "IU", 142 "Maximum number of mbuf clusters allowed"); 143 144static int 145sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) 146{ 147 int error, newnmbjumbop; 148 149 newnmbjumbop = nmbjumbop; 150 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); 151 if (error == 0 && req->newptr) { 152 if (newnmbjumbop> nmbjumbop) { 153 nmbjumbop = newnmbjumbop; 154 uma_zone_set_max(zone_jumbop, nmbjumbop); 155 } else 156 error = EINVAL; 157 } 158 return (error); 159} 160SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW, 161&nmbjumbop, 0, sysctl_nmbjumbop, "IU", 162 "Maximum number of mbuf page size jumbo clusters allowed"); 163 164 165static int 166sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) 167{ 168 int error, newnmbjumbo9; 169 170 newnmbjumbo9 = nmbjumbo9; 171 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); 172 if (error == 0 && req->newptr) { 173 if (newnmbjumbo9> nmbjumbo9) { 174 nmbjumbo9 = newnmbjumbo9; 175 uma_zone_set_max(zone_jumbo9, nmbjumbo9); 176 } else 177 error = EINVAL; 178 } 179 return (error); 180} 181SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW, 182&nmbjumbo9, 0, sysctl_nmbjumbo9, "IU", 183 "Maximum number of mbuf 9k jumbo clusters allowed"); 184 185static int 186sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) 187{ 188 int error, newnmbjumbo16; 189 190 newnmbjumbo16 = nmbjumbo16; 191 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); 192 if (error == 0 && req->newptr) { 193 if (newnmbjumbo16> nmbjumbo16) { 194 nmbjumbo16 = newnmbjumbo16; 195 uma_zone_set_max(zone_jumbo16, nmbjumbo16); 196 } else 197 error = EINVAL; 198 } 199 return (error); 200} 201SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW, 202&nmbjumbo16, 0, sysctl_nmbjumbo16, "IU", 203 "Maximum number of mbuf 16k jumbo clusters allowed"); 204 205 206 207SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat, 208 "Mbuf general information and statistics"); 209 210/* 211 * Zones from which we allocate. 212 */ 213uma_zone_t zone_mbuf; 214uma_zone_t zone_clust; 215uma_zone_t zone_pack; 216uma_zone_t zone_jumbop; 217uma_zone_t zone_jumbo9; 218uma_zone_t zone_jumbo16; 219uma_zone_t zone_ext_refcnt; 220 221/* 222 * Local prototypes. 223 */ 224static int mb_ctor_mbuf(void *, int, void *, int); 225static int mb_ctor_clust(void *, int, void *, int); 226static int mb_ctor_pack(void *, int, void *, int); 227static void mb_dtor_mbuf(void *, int, void *); 228static void mb_dtor_clust(void *, int, void *); 229static void mb_dtor_pack(void *, int, void *); 230static int mb_zinit_pack(void *, int, int); 231static void mb_zfini_pack(void *, int); 232 233static void mb_reclaim(void *); 234static void mbuf_init(void *); 235static void *mbuf_jumbo_alloc(uma_zone_t, int, u_int8_t *, int); 236static void mbuf_jumbo_free(void *, int, u_int8_t); 237 238static MALLOC_DEFINE(M_JUMBOFRAME, "jumboframes", "mbuf jumbo frame buffers"); 239 240/* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */ 241CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 242 243/* 244 * Initialize FreeBSD Network buffer allocation. 245 */ 246SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); 247static void 248mbuf_init(void *dummy) 249{ 250 251 /* 252 * Configure UMA zones for Mbufs, Clusters, and Packets. 253 */ 254 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 255 mb_ctor_mbuf, mb_dtor_mbuf, 256#ifdef INVARIANTS 257 trash_init, trash_fini, 258#else 259 NULL, NULL, 260#endif 261 MSIZE - 1, UMA_ZONE_MAXBUCKET); 262 263 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 264 mb_ctor_clust, mb_dtor_clust, 265#ifdef INVARIANTS 266 trash_init, trash_fini, 267#else 268 NULL, NULL, 269#endif 270 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 271 if (nmbclusters > 0) 272 uma_zone_set_max(zone_clust, nmbclusters); 273 274 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 275 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 276 277 /* Make jumbo frame zone too. Page size, 9k and 16k. */ 278 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 279 mb_ctor_clust, mb_dtor_clust, 280#ifdef INVARIANTS 281 trash_init, trash_fini, 282#else 283 NULL, NULL, 284#endif 285 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 286 if (nmbjumbop > 0) 287 uma_zone_set_max(zone_jumbop, nmbjumbop); 288 289 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 290 mb_ctor_clust, mb_dtor_clust, 291#ifdef INVARIANTS 292 trash_init, trash_fini, 293#else 294 NULL, NULL, 295#endif 296 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 297 if (nmbjumbo9 > 0) 298 uma_zone_set_max(zone_jumbo9, nmbjumbo9); 299 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); 300 uma_zone_set_freef(zone_jumbo9, mbuf_jumbo_free); 301 302 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 303 mb_ctor_clust, mb_dtor_clust, 304#ifdef INVARIANTS 305 trash_init, trash_fini, 306#else 307 NULL, NULL, 308#endif 309 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 310 if (nmbjumbo16 > 0) 311 uma_zone_set_max(zone_jumbo16, nmbjumbo16); 312 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); 313 uma_zone_set_freef(zone_jumbo16, mbuf_jumbo_free); 314 315 zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 316 NULL, NULL, 317 NULL, NULL, 318 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 319 320 /* uma_prealloc() goes here... */ 321 322 /* 323 * Hook event handler for low-memory situation, used to 324 * drain protocols and push data back to the caches (UMA 325 * later pushes it back to VM). 326 */ 327 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 328 EVENTHANDLER_PRI_FIRST); 329 330 /* 331 * [Re]set counters and local statistics knobs. 332 * XXX Some of these should go and be replaced, but UMA stat 333 * gathering needs to be revised. 334 */ 335 mbstat.m_mbufs = 0; 336 mbstat.m_mclusts = 0; 337 mbstat.m_drain = 0; 338 mbstat.m_msize = MSIZE; 339 mbstat.m_mclbytes = MCLBYTES; 340 mbstat.m_minclsize = MINCLSIZE; 341 mbstat.m_mlen = MLEN; 342 mbstat.m_mhlen = MHLEN; 343 mbstat.m_numtypes = MT_NTYPES; 344 345 mbstat.m_mcfail = mbstat.m_mpfail = 0; 346 mbstat.sf_iocnt = 0; 347 mbstat.sf_allocwait = mbstat.sf_allocfail = 0; 348} 349 350/* 351 * UMA backend page allocator for the jumbo frame zones. 352 * 353 * Allocates kernel virtual memory that is backed by contiguous physical 354 * pages. 355 */ 356static void * 357mbuf_jumbo_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 358{ 359 360 /* Inform UMA that this allocator uses kernel_map/object. */ 361 *flags = UMA_SLAB_KERNEL; 362 return (contigmalloc(bytes, M_JUMBOFRAME, wait, (vm_paddr_t)0, 363 ~(vm_paddr_t)0, 1, 0)); 364} 365 366/* 367 * UMA backend page deallocator for the jumbo frame zones. 368 */ 369static void 370mbuf_jumbo_free(void *mem, int size, u_int8_t flags) 371{ 372 373 contigfree(mem, size, M_JUMBOFRAME); 374} 375 376/* 377 * Constructor for Mbuf master zone. 378 * 379 * The 'arg' pointer points to a mb_args structure which 380 * contains call-specific information required to support the 381 * mbuf allocation API. See mbuf.h. 382 */ 383static int 384mb_ctor_mbuf(void *mem, int size, void *arg, int how) 385{ 386 struct mbuf *m; 387 struct mb_args *args; 388#ifdef MAC 389 int error; 390#endif 391 int flags; 392 short type; 393 394#ifdef INVARIANTS 395 trash_ctor(mem, size, arg, how); 396#endif 397 m = (struct mbuf *)mem; 398 args = (struct mb_args *)arg; 399 flags = args->flags; 400 type = args->type; 401 402 /* 403 * The mbuf is initialized later. The caller has the 404 * responsibility to set up any MAC labels too. 405 */ 406 if (type == MT_NOINIT) 407 return (0); 408 409 m->m_next = NULL; 410 m->m_nextpkt = NULL; 411 m->m_len = 0; 412 m->m_flags = flags; 413 m->m_type = type; 414 if (flags & M_PKTHDR) { 415 m->m_data = m->m_pktdat; 416 m->m_pkthdr.rcvif = NULL; 417 m->m_pkthdr.header = NULL; 418 m->m_pkthdr.len = 0; 419 m->m_pkthdr.csum_flags = 0; 420 m->m_pkthdr.csum_data = 0; 421 m->m_pkthdr.tso_segsz = 0; 422 m->m_pkthdr.ether_vtag = 0; 423 SLIST_INIT(&m->m_pkthdr.tags); 424#ifdef MAC 425 /* If the label init fails, fail the alloc */ 426 error = mac_mbuf_init(m, how); 427 if (error) 428 return (error); 429#endif 430 } else 431 m->m_data = m->m_dat; 432 return (0); 433} 434 435/* 436 * The Mbuf master zone destructor. 437 */ 438static void 439mb_dtor_mbuf(void *mem, int size, void *arg) 440{ 441 struct mbuf *m; 442 unsigned long flags; 443 444 m = (struct mbuf *)mem; 445 flags = (unsigned long)arg; 446 447 if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0) 448 m_tag_delete_chain(m, NULL); 449 KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); 450 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 451#ifdef INVARIANTS 452 trash_dtor(mem, size, arg); 453#endif 454} 455 456/* 457 * The Mbuf Packet zone destructor. 458 */ 459static void 460mb_dtor_pack(void *mem, int size, void *arg) 461{ 462 struct mbuf *m; 463 464 m = (struct mbuf *)mem; 465 if ((m->m_flags & M_PKTHDR) != 0) 466 m_tag_delete_chain(m, NULL); 467 468 /* Make sure we've got a clean cluster back. */ 469 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 470 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 471 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 472 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); 473 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); 474 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 475 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 476 KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__)); 477#ifdef INVARIANTS 478 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 479#endif 480 /* 481 * If there are processes blocked on zone_clust, waiting for pages 482 * to be freed up, * cause them to be woken up by draining the 483 * packet zone. We are exposed to a race here * (in the check for 484 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 485 * is deliberate. We don't want to acquire the zone lock for every 486 * mbuf free. 487 */ 488 if (uma_zone_exhausted_nolock(zone_clust)) 489 zone_drain(zone_pack); 490} 491 492/* 493 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 494 * 495 * Here the 'arg' pointer points to the Mbuf which we 496 * are configuring cluster storage for. If 'arg' is 497 * empty we allocate just the cluster without setting 498 * the mbuf to it. See mbuf.h. 499 */ 500static int 501mb_ctor_clust(void *mem, int size, void *arg, int how) 502{ 503 struct mbuf *m; 504 u_int *refcnt; 505 int type; 506 uma_zone_t zone; 507 508#ifdef INVARIANTS 509 trash_ctor(mem, size, arg, how); 510#endif 511 switch (size) { 512 case MCLBYTES: 513 type = EXT_CLUSTER; 514 zone = zone_clust; 515 break; 516#if MJUMPAGESIZE != MCLBYTES 517 case MJUMPAGESIZE: 518 type = EXT_JUMBOP; 519 zone = zone_jumbop; 520 break; 521#endif 522 case MJUM9BYTES: 523 type = EXT_JUMBO9; 524 zone = zone_jumbo9; 525 break; 526 case MJUM16BYTES: 527 type = EXT_JUMBO16; 528 zone = zone_jumbo16; 529 break; 530 default: 531 panic("unknown cluster size"); 532 break; 533 } 534 535 m = (struct mbuf *)arg; 536 refcnt = uma_find_refcnt(zone, mem); 537 *refcnt = 1; 538 if (m != NULL) { 539 m->m_ext.ext_buf = (caddr_t)mem; 540 m->m_data = m->m_ext.ext_buf; 541 m->m_flags |= M_EXT; 542 m->m_ext.ext_free = NULL; 543 m->m_ext.ext_arg1 = NULL; 544 m->m_ext.ext_arg2 = NULL; 545 m->m_ext.ext_size = size; 546 m->m_ext.ext_type = type; 547 m->m_ext.ref_cnt = refcnt; 548 } 549 550 return (0); 551} 552 553/* 554 * The Mbuf Cluster zone destructor. 555 */ 556static void 557mb_dtor_clust(void *mem, int size, void *arg) 558{ 559#ifdef INVARIANTS 560 uma_zone_t zone; 561 562 zone = m_getzone(size); 563 KASSERT(*(uma_find_refcnt(zone, mem)) <= 1, 564 ("%s: refcnt incorrect %u", __func__, 565 *(uma_find_refcnt(zone, mem))) ); 566 567 trash_dtor(mem, size, arg); 568#endif 569} 570 571/* 572 * The Packet secondary zone's init routine, executed on the 573 * object's transition from mbuf keg slab to zone cache. 574 */ 575static int 576mb_zinit_pack(void *mem, int size, int how) 577{ 578 struct mbuf *m; 579 580 m = (struct mbuf *)mem; /* m is virgin. */ 581 if (uma_zalloc_arg(zone_clust, m, how) == NULL || 582 m->m_ext.ext_buf == NULL) 583 return (ENOMEM); 584 m->m_ext.ext_type = EXT_PACKET; /* Override. */ 585#ifdef INVARIANTS 586 trash_init(m->m_ext.ext_buf, MCLBYTES, how); 587#endif 588 return (0); 589} 590 591/* 592 * The Packet secondary zone's fini routine, executed on the 593 * object's transition from zone cache to keg slab. 594 */ 595static void 596mb_zfini_pack(void *mem, int size) 597{ 598 struct mbuf *m; 599 600 m = (struct mbuf *)mem; 601#ifdef INVARIANTS 602 trash_fini(m->m_ext.ext_buf, MCLBYTES); 603#endif 604 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 605#ifdef INVARIANTS 606 trash_dtor(mem, size, NULL); 607#endif 608} 609 610/* 611 * The "packet" keg constructor. 612 */ 613static int 614mb_ctor_pack(void *mem, int size, void *arg, int how) 615{ 616 struct mbuf *m; 617 struct mb_args *args; 618#ifdef MAC 619 int error; 620#endif 621 int flags; 622 short type; 623 624 m = (struct mbuf *)mem; 625 args = (struct mb_args *)arg; 626 flags = args->flags; 627 type = args->type; 628 629#ifdef INVARIANTS 630 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 631#endif 632 m->m_next = NULL; 633 m->m_nextpkt = NULL; 634 m->m_data = m->m_ext.ext_buf; 635 m->m_len = 0; 636 m->m_flags = (flags | M_EXT); 637 m->m_type = type; 638 639 if (flags & M_PKTHDR) { 640 m->m_pkthdr.rcvif = NULL; 641 m->m_pkthdr.len = 0; 642 m->m_pkthdr.header = NULL; 643 m->m_pkthdr.csum_flags = 0; 644 m->m_pkthdr.csum_data = 0; 645 m->m_pkthdr.tso_segsz = 0; 646 m->m_pkthdr.ether_vtag = 0; 647 SLIST_INIT(&m->m_pkthdr.tags); 648#ifdef MAC 649 /* If the label init fails, fail the alloc */ 650 error = mac_mbuf_init(m, how); 651 if (error) 652 return (error); 653#endif 654 } 655 /* m_ext is already initialized. */ 656 657 return (0); 658} 659 660/* 661 * This is the protocol drain routine. 662 * 663 * No locks should be held when this is called. The drain routines have to 664 * presently acquire some locks which raises the possibility of lock order 665 * reversal. 666 */ 667static void 668mb_reclaim(void *junk) 669{ 670 struct domain *dp; 671 struct protosw *pr; 672 673 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, 674 "mb_reclaim()"); 675 676 for (dp = domains; dp != NULL; dp = dp->dom_next) 677 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 678 if (pr->pr_drain != NULL) 679 (*pr->pr_drain)(); 680}
|