kern_mbuf.c revision 168374
1129906Sbmilekic/*- 2141991Sbmilekic * Copyright (c) 2004, 2005, 3141991Sbmilekic * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 4129906Sbmilekic * 5129906Sbmilekic * Redistribution and use in source and binary forms, with or without 6129906Sbmilekic * modification, are permitted provided that the following conditions 7129906Sbmilekic * are met: 8129906Sbmilekic * 1. Redistributions of source code must retain the above copyright 9129906Sbmilekic * notice unmodified, this list of conditions and the following 10129906Sbmilekic * disclaimer. 11129906Sbmilekic * 2. Redistributions in binary form must reproduce the above copyright 12129906Sbmilekic * notice, this list of conditions and the following disclaimer in the 13129906Sbmilekic * documentation and/or other materials provided with the distribution. 14129906Sbmilekic * 15129906Sbmilekic * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16129906Sbmilekic * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17129906Sbmilekic * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18129906Sbmilekic * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19129906Sbmilekic * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20129906Sbmilekic * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21129906Sbmilekic * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22129906Sbmilekic * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23129906Sbmilekic * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24129906Sbmilekic * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25129906Sbmilekic * SUCH DAMAGE. 26129906Sbmilekic */ 27129906Sbmilekic 28129906Sbmilekic#include <sys/cdefs.h> 29129906Sbmilekic__FBSDID("$FreeBSD: head/sys/kern/kern_mbuf.c 168374 2007-04-04 21:27:01Z kmacy $"); 30129906Sbmilekic 31129906Sbmilekic#include "opt_mac.h" 32129906Sbmilekic#include "opt_param.h" 33129906Sbmilekic 34129906Sbmilekic#include <sys/param.h> 35129906Sbmilekic#include <sys/malloc.h> 36129906Sbmilekic#include <sys/systm.h> 37129906Sbmilekic#include <sys/mbuf.h> 38129906Sbmilekic#include <sys/domain.h> 39129906Sbmilekic#include <sys/eventhandler.h> 40129906Sbmilekic#include <sys/kernel.h> 41129906Sbmilekic#include <sys/protosw.h> 42129906Sbmilekic#include <sys/smp.h> 43129906Sbmilekic#include <sys/sysctl.h> 44129906Sbmilekic 45163606Srwatson#include <security/mac/mac_framework.h> 46163606Srwatson 47129906Sbmilekic#include <vm/vm.h> 48129906Sbmilekic#include <vm/vm_page.h> 49129906Sbmilekic#include <vm/uma.h> 50147537Ssilby#include <vm/uma_int.h> 51147537Ssilby#include <vm/uma_dbg.h> 52129906Sbmilekic 53129906Sbmilekic/* 54129906Sbmilekic * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 55129906Sbmilekic * Zones. 56129906Sbmilekic * 57129906Sbmilekic * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 58129906Sbmilekic * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 59129906Sbmilekic * administrator so desires. 60129906Sbmilekic * 61129906Sbmilekic * Mbufs are allocated from a UMA Master Zone called the Mbuf 62129906Sbmilekic * Zone. 63129906Sbmilekic * 64129906Sbmilekic * Additionally, FreeBSD provides a Packet Zone, which it 65129906Sbmilekic * configures as a Secondary Zone to the Mbuf Master Zone, 66129906Sbmilekic * thus sharing backend Slab kegs with the Mbuf Master Zone. 67129906Sbmilekic * 68129906Sbmilekic * Thus common-case allocations and locking are simplified: 69129906Sbmilekic * 70129906Sbmilekic * m_clget() m_getcl() 71129906Sbmilekic * | | 72129906Sbmilekic * | .------------>[(Packet Cache)] m_get(), m_gethdr() 73129906Sbmilekic * | | [ Packet ] | 74129906Sbmilekic * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 75129906Sbmilekic * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 76129906Sbmilekic * | \________ | 77129906Sbmilekic * [ Cluster Keg ] \ / 78156023Sglebius * | [ Mbuf Keg ] 79129906Sbmilekic * [ Cluster Slabs ] | 80129906Sbmilekic * | [ Mbuf Slabs ] 81129906Sbmilekic * \____________(VM)_________________/ 82151976Sandre * 83151976Sandre * 84156023Sglebius * Whenever an object is allocated with uma_zalloc() out of 85151976Sandre * one of the Zones its _ctor_ function is executed. The same 86156023Sglebius * for any deallocation through uma_zfree() the _dtor_ function 87151976Sandre * is executed. 88156023Sglebius * 89151976Sandre * Caches are per-CPU and are filled from the Master Zone. 90151976Sandre * 91156023Sglebius * Whenever an object is allocated from the underlying global 92151976Sandre * memory pool it gets pre-initialized with the _zinit_ functions. 93151976Sandre * When the Keg's are overfull objects get decomissioned with 94151976Sandre * _zfini_ functions and free'd back to the global memory pool. 95151976Sandre * 96129906Sbmilekic */ 97129906Sbmilekic 98151976Sandreint nmbclusters; /* limits number of mbuf clusters */ 99155780Sandreint nmbjumbop; /* limits number of page size jumbo clusters */ 100151976Sandreint nmbjumbo9; /* limits number of 9k jumbo clusters */ 101151976Sandreint nmbjumbo16; /* limits number of 16k jumbo clusters */ 102129906Sbmilekicstruct mbstat mbstat; 103129906Sbmilekic 104129906Sbmilekicstatic void 105129906Sbmilekictunable_mbinit(void *dummy) 106129906Sbmilekic{ 107129906Sbmilekic 108129906Sbmilekic /* This has to be done before VM init. */ 109129906Sbmilekic nmbclusters = 1024 + maxusers * 64; 110129906Sbmilekic TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 111129906Sbmilekic} 112129906SbmilekicSYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL); 113129906Sbmilekic 114151976Sandre/* XXX: These should be tuneables. Can't change UMA limits on the fly. */ 115157927Spsstatic int 116157927Spssysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 117157927Sps{ 118157927Sps int error, newnmbclusters; 119157927Sps 120157927Sps newnmbclusters = nmbclusters; 121157927Sps error = sysctl_handle_int(oidp, &newnmbclusters, sizeof(int), req); 122157927Sps if (error == 0 && req->newptr) { 123157927Sps if (newnmbclusters > nmbclusters) { 124157927Sps nmbclusters = newnmbclusters; 125157927Sps uma_zone_set_max(zone_clust, nmbclusters); 126157927Sps EVENTHANDLER_INVOKE(nmbclusters_change); 127157927Sps } else 128157927Sps error = EINVAL; 129157927Sps } 130157927Sps return (error); 131157927Sps} 132157927SpsSYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, 133157927Sps&nmbclusters, 0, sysctl_nmbclusters, "IU", 134129906Sbmilekic "Maximum number of mbuf clusters allowed"); 135155780SandreSYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbop, CTLFLAG_RW, &nmbjumbop, 0, 136155780Sandre "Maximum number of mbuf page size jumbo clusters allowed"); 137151976SandreSYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo9, CTLFLAG_RW, &nmbjumbo9, 0, 138151976Sandre "Maximum number of mbuf 9k jumbo clusters allowed"); 139151976SandreSYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo16, CTLFLAG_RW, &nmbjumbo16, 0, 140151976Sandre "Maximum number of mbuf 16k jumbo clusters allowed"); 141129906SbmilekicSYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat, 142129906Sbmilekic "Mbuf general information and statistics"); 143129906Sbmilekic 144129906Sbmilekic/* 145129906Sbmilekic * Zones from which we allocate. 146129906Sbmilekic */ 147129906Sbmilekicuma_zone_t zone_mbuf; 148129906Sbmilekicuma_zone_t zone_clust; 149129906Sbmilekicuma_zone_t zone_pack; 150155780Sandreuma_zone_t zone_jumbop; 151151976Sandreuma_zone_t zone_jumbo9; 152151976Sandreuma_zone_t zone_jumbo16; 153151976Sandreuma_zone_t zone_ext_refcnt; 154129906Sbmilekic 155129906Sbmilekic/* 156129906Sbmilekic * Local prototypes. 157129906Sbmilekic */ 158132987Sgreenstatic int mb_ctor_mbuf(void *, int, void *, int); 159132987Sgreenstatic int mb_ctor_clust(void *, int, void *, int); 160132987Sgreenstatic int mb_ctor_pack(void *, int, void *, int); 161129906Sbmilekicstatic void mb_dtor_mbuf(void *, int, void *); 162151976Sandrestatic void mb_dtor_clust(void *, int, void *); 163151976Sandrestatic void mb_dtor_pack(void *, int, void *); 164151976Sandrestatic int mb_zinit_pack(void *, int, int); 165151976Sandrestatic void mb_zfini_pack(void *, int); 166129906Sbmilekic 167129906Sbmilekicstatic void mb_reclaim(void *); 168129906Sbmilekicstatic void mbuf_init(void *); 169129906Sbmilekic 170135510Sbrian/* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */ 171135510SbrianCTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 172135510Sbrian 173129906Sbmilekic/* 174129906Sbmilekic * Initialize FreeBSD Network buffer allocation. 175129906Sbmilekic */ 176129906SbmilekicSYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL) 177129906Sbmilekicstatic void 178129906Sbmilekicmbuf_init(void *dummy) 179129906Sbmilekic{ 180129906Sbmilekic 181129906Sbmilekic /* 182129906Sbmilekic * Configure UMA zones for Mbufs, Clusters, and Packets. 183129906Sbmilekic */ 184151976Sandre zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 185151976Sandre mb_ctor_mbuf, mb_dtor_mbuf, 186147537Ssilby#ifdef INVARIANTS 187151976Sandre trash_init, trash_fini, 188147537Ssilby#else 189151976Sandre NULL, NULL, 190147537Ssilby#endif 191151976Sandre MSIZE - 1, UMA_ZONE_MAXBUCKET); 192151976Sandre 193148095Srwatson zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 194151976Sandre mb_ctor_clust, mb_dtor_clust, 195147537Ssilby#ifdef INVARIANTS 196151976Sandre trash_init, trash_fini, 197147537Ssilby#else 198151976Sandre NULL, NULL, 199147537Ssilby#endif 200151976Sandre UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 201129906Sbmilekic if (nmbclusters > 0) 202129906Sbmilekic uma_zone_set_max(zone_clust, nmbclusters); 203151976Sandre 204148095Srwatson zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 205151976Sandre mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 206129906Sbmilekic 207156023Sglebius /* Make jumbo frame zone too. Page size, 9k and 16k. */ 208155780Sandre zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 209153232Sandre mb_ctor_clust, mb_dtor_clust, 210153232Sandre#ifdef INVARIANTS 211153232Sandre trash_init, trash_fini, 212153232Sandre#else 213153232Sandre NULL, NULL, 214153232Sandre#endif 215153232Sandre UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 216155780Sandre if (nmbjumbop > 0) 217155780Sandre uma_zone_set_max(zone_jumbop, nmbjumbop); 218153232Sandre 219151976Sandre zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 220151976Sandre mb_ctor_clust, mb_dtor_clust, 221151976Sandre#ifdef INVARIANTS 222151976Sandre trash_init, trash_fini, 223151976Sandre#else 224151976Sandre NULL, NULL, 225151976Sandre#endif 226151976Sandre UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 227151976Sandre if (nmbjumbo9 > 0) 228151976Sandre uma_zone_set_max(zone_jumbo9, nmbjumbo9); 229129906Sbmilekic 230151976Sandre zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 231151976Sandre mb_ctor_clust, mb_dtor_clust, 232151976Sandre#ifdef INVARIANTS 233151976Sandre trash_init, trash_fini, 234151976Sandre#else 235151976Sandre NULL, NULL, 236151976Sandre#endif 237151976Sandre UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 238151976Sandre if (nmbjumbo16 > 0) 239151976Sandre uma_zone_set_max(zone_jumbo16, nmbjumbo16); 240151976Sandre 241151976Sandre zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 242151976Sandre NULL, NULL, 243151976Sandre NULL, NULL, 244151976Sandre UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 245151976Sandre 246151976Sandre /* uma_prealloc() goes here... */ 247151976Sandre 248129906Sbmilekic /* 249129906Sbmilekic * Hook event handler for low-memory situation, used to 250129906Sbmilekic * drain protocols and push data back to the caches (UMA 251129906Sbmilekic * later pushes it back to VM). 252129906Sbmilekic */ 253129906Sbmilekic EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 254129906Sbmilekic EVENTHANDLER_PRI_FIRST); 255129906Sbmilekic 256129906Sbmilekic /* 257129906Sbmilekic * [Re]set counters and local statistics knobs. 258129906Sbmilekic * XXX Some of these should go and be replaced, but UMA stat 259129906Sbmilekic * gathering needs to be revised. 260129906Sbmilekic */ 261129906Sbmilekic mbstat.m_mbufs = 0; 262129906Sbmilekic mbstat.m_mclusts = 0; 263129906Sbmilekic mbstat.m_drain = 0; 264129906Sbmilekic mbstat.m_msize = MSIZE; 265129906Sbmilekic mbstat.m_mclbytes = MCLBYTES; 266129906Sbmilekic mbstat.m_minclsize = MINCLSIZE; 267129906Sbmilekic mbstat.m_mlen = MLEN; 268129906Sbmilekic mbstat.m_mhlen = MHLEN; 269129906Sbmilekic mbstat.m_numtypes = MT_NTYPES; 270129906Sbmilekic 271129906Sbmilekic mbstat.m_mcfail = mbstat.m_mpfail = 0; 272129906Sbmilekic mbstat.sf_iocnt = 0; 273129906Sbmilekic mbstat.sf_allocwait = mbstat.sf_allocfail = 0; 274129906Sbmilekic} 275129906Sbmilekic 276129906Sbmilekic/* 277129906Sbmilekic * Constructor for Mbuf master zone. 278129906Sbmilekic * 279129906Sbmilekic * The 'arg' pointer points to a mb_args structure which 280129906Sbmilekic * contains call-specific information required to support the 281151976Sandre * mbuf allocation API. See mbuf.h. 282129906Sbmilekic */ 283132987Sgreenstatic int 284132987Sgreenmb_ctor_mbuf(void *mem, int size, void *arg, int how) 285129906Sbmilekic{ 286129906Sbmilekic struct mbuf *m; 287129906Sbmilekic struct mb_args *args; 288132987Sgreen#ifdef MAC 289132987Sgreen int error; 290132987Sgreen#endif 291129906Sbmilekic int flags; 292129906Sbmilekic short type; 293129906Sbmilekic 294147537Ssilby#ifdef INVARIANTS 295147537Ssilby trash_ctor(mem, size, arg, how); 296147537Ssilby#endif 297129906Sbmilekic m = (struct mbuf *)mem; 298129906Sbmilekic args = (struct mb_args *)arg; 299129906Sbmilekic flags = args->flags; 300129906Sbmilekic type = args->type; 301129906Sbmilekic 302151976Sandre /* 303151976Sandre * The mbuf is initialized later. The caller has the 304156059Sglebius * responsibility to set up any MAC labels too. 305151976Sandre */ 306151976Sandre if (type == MT_NOINIT) 307151976Sandre return (0); 308151976Sandre 309129906Sbmilekic m->m_next = NULL; 310129906Sbmilekic m->m_nextpkt = NULL; 311151976Sandre m->m_len = 0; 312129947Sbmilekic m->m_flags = flags; 313151976Sandre m->m_type = type; 314129906Sbmilekic if (flags & M_PKTHDR) { 315129906Sbmilekic m->m_data = m->m_pktdat; 316129906Sbmilekic m->m_pkthdr.rcvif = NULL; 317151976Sandre m->m_pkthdr.len = 0; 318151976Sandre m->m_pkthdr.header = NULL; 319129906Sbmilekic m->m_pkthdr.csum_flags = 0; 320151976Sandre m->m_pkthdr.csum_data = 0; 321162377Sandre m->m_pkthdr.tso_segsz = 0; 322162377Sandre m->m_pkthdr.ether_vtag = 0; 323129906Sbmilekic SLIST_INIT(&m->m_pkthdr.tags); 324129906Sbmilekic#ifdef MAC 325129906Sbmilekic /* If the label init fails, fail the alloc */ 326132987Sgreen error = mac_init_mbuf(m, how); 327132987Sgreen if (error) 328132987Sgreen return (error); 329129906Sbmilekic#endif 330129947Sbmilekic } else 331129906Sbmilekic m->m_data = m->m_dat; 332132987Sgreen return (0); 333129906Sbmilekic} 334129906Sbmilekic 335129906Sbmilekic/* 336151976Sandre * The Mbuf master zone destructor. 337129906Sbmilekic */ 338129906Sbmilekicstatic void 339129906Sbmilekicmb_dtor_mbuf(void *mem, int size, void *arg) 340129906Sbmilekic{ 341129906Sbmilekic struct mbuf *m; 342129906Sbmilekic 343129906Sbmilekic m = (struct mbuf *)mem; 344129906Sbmilekic if ((m->m_flags & M_PKTHDR) != 0) 345129906Sbmilekic m_tag_delete_chain(m, NULL); 346151976Sandre KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); 347147537Ssilby#ifdef INVARIANTS 348147537Ssilby trash_dtor(mem, size, arg); 349147537Ssilby#endif 350129906Sbmilekic} 351129906Sbmilekic 352151976Sandre/* 353151976Sandre * The Mbuf Packet zone destructor. 354151976Sandre */ 355129906Sbmilekicstatic void 356129906Sbmilekicmb_dtor_pack(void *mem, int size, void *arg) 357129906Sbmilekic{ 358129906Sbmilekic struct mbuf *m; 359129906Sbmilekic 360129906Sbmilekic m = (struct mbuf *)mem; 361129906Sbmilekic if ((m->m_flags & M_PKTHDR) != 0) 362129906Sbmilekic m_tag_delete_chain(m, NULL); 363151976Sandre 364151976Sandre /* Make sure we've got a clean cluster back. */ 365151976Sandre KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 366151976Sandre KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 367151976Sandre KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 368151976Sandre KASSERT(m->m_ext.ext_args == NULL, ("%s: ext_args != NULL", __func__)); 369151976Sandre KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 370152130Sglebius KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 371151976Sandre KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__)); 372147537Ssilby#ifdef INVARIANTS 373147537Ssilby trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 374147537Ssilby#endif 375166213Smohans /* 376166213Smohans * If there are processes blocked on zone_clust, waiting for pages to be freed up, 377166213Smohans * cause them to be woken up by draining the packet zone. We are exposed to a race here 378166213Smohans * (in the check for the UMA_ZFLAG_FULL) where we might miss the flag set, but that is 379166213Smohans * deliberate. We don't want to acquire the zone lock for every mbuf free. 380166213Smohans */ 381166213Smohans if (uma_zone_exhausted_nolock(zone_clust)) 382166213Smohans zone_drain(zone_pack); 383129906Sbmilekic} 384129906Sbmilekic 385129906Sbmilekic/* 386155780Sandre * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 387129906Sbmilekic * 388129906Sbmilekic * Here the 'arg' pointer points to the Mbuf which we 389151976Sandre * are configuring cluster storage for. If 'arg' is 390151976Sandre * empty we allocate just the cluster without setting 391151976Sandre * the mbuf to it. See mbuf.h. 392129906Sbmilekic */ 393132987Sgreenstatic int 394132987Sgreenmb_ctor_clust(void *mem, int size, void *arg, int how) 395129906Sbmilekic{ 396129906Sbmilekic struct mbuf *m; 397151976Sandre u_int *refcnt; 398168374Skmacy int type; 399168374Skmacy uma_zone_t zone; 400168374Skmacy 401147537Ssilby#ifdef INVARIANTS 402147537Ssilby trash_ctor(mem, size, arg, how); 403147537Ssilby#endif 404168374Skmacy switch (size) { 405168374Skmacy case MCLBYTES: 406168374Skmacy type = EXT_CLUSTER; 407168374Skmacy zone = zone_clust; 408168374Skmacy break; 409168374Skmacy#if MJUMPAGESIZE != MCLBYTES 410168374Skmacy case MJUMPAGESIZE: 411168374Skmacy type = EXT_JUMBOP; 412168374Skmacy zone = zone_jumbop; 413168374Skmacy break; 414168374Skmacy#endif 415168374Skmacy case MJUM9BYTES: 416168374Skmacy type = EXT_JUMBO9; 417168374Skmacy zone = zone_jumbo9; 418168374Skmacy break; 419168374Skmacy case MJUM16BYTES: 420168374Skmacy type = EXT_JUMBO16; 421168374Skmacy zone = zone_jumbo16; 422168374Skmacy break; 423168374Skmacy default: 424168374Skmacy panic("unknown cluster size"); 425168374Skmacy break; 426168374Skmacy } 427168374Skmacy 428129906Sbmilekic m = (struct mbuf *)arg; 429168374Skmacy refcnt = uma_find_refcnt(zone, mem); 430168374Skmacy *refcnt = 1; 431151976Sandre if (m != NULL) { 432151976Sandre m->m_ext.ext_buf = (caddr_t)mem; 433151976Sandre m->m_data = m->m_ext.ext_buf; 434151976Sandre m->m_flags |= M_EXT; 435151976Sandre m->m_ext.ext_free = NULL; 436151976Sandre m->m_ext.ext_args = NULL; 437151976Sandre m->m_ext.ext_size = size; 438151976Sandre m->m_ext.ext_type = type; 439168374Skmacy m->m_ext.ref_cnt = refcnt; 440151976Sandre } 441168374Skmacy 442132987Sgreen return (0); 443129906Sbmilekic} 444129906Sbmilekic 445151976Sandre/* 446151976Sandre * The Mbuf Cluster zone destructor. 447151976Sandre */ 448129906Sbmilekicstatic void 449129906Sbmilekicmb_dtor_clust(void *mem, int size, void *arg) 450129906Sbmilekic{ 451168374Skmacy#ifdef INVARIANTS 452168374Skmacy uma_zone_t zone; 453151976Sandre 454168374Skmacy zone = m_getzone(size); 455168374Skmacy KASSERT(*(uma_find_refcnt(zone, mem)) <= 1, 456152035Sandre ("%s: refcnt incorrect %u", __func__, 457168374Skmacy *(uma_find_refcnt(zone, mem))) ); 458168374Skmacy 459147537Ssilby trash_dtor(mem, size, arg); 460147537Ssilby#endif 461129906Sbmilekic} 462129906Sbmilekic 463129906Sbmilekic/* 464129906Sbmilekic * The Packet secondary zone's init routine, executed on the 465151976Sandre * object's transition from mbuf keg slab to zone cache. 466129906Sbmilekic */ 467132987Sgreenstatic int 468151976Sandremb_zinit_pack(void *mem, int size, int how) 469129906Sbmilekic{ 470129906Sbmilekic struct mbuf *m; 471129906Sbmilekic 472151976Sandre m = (struct mbuf *)mem; /* m is virgin. */ 473156428Sandre if (uma_zalloc_arg(zone_clust, m, how) == NULL || 474156428Sandre m->m_ext.ext_buf == NULL) 475132987Sgreen return (ENOMEM); 476152101Sandre m->m_ext.ext_type = EXT_PACKET; /* Override. */ 477147537Ssilby#ifdef INVARIANTS 478147537Ssilby trash_init(m->m_ext.ext_buf, MCLBYTES, how); 479147537Ssilby#endif 480132987Sgreen return (0); 481129906Sbmilekic} 482129906Sbmilekic 483129906Sbmilekic/* 484129906Sbmilekic * The Packet secondary zone's fini routine, executed on the 485129906Sbmilekic * object's transition from zone cache to keg slab. 486129906Sbmilekic */ 487129906Sbmilekicstatic void 488151976Sandremb_zfini_pack(void *mem, int size) 489129906Sbmilekic{ 490129906Sbmilekic struct mbuf *m; 491129906Sbmilekic 492129906Sbmilekic m = (struct mbuf *)mem; 493147537Ssilby#ifdef INVARIANTS 494147537Ssilby trash_fini(m->m_ext.ext_buf, MCLBYTES); 495147537Ssilby#endif 496129906Sbmilekic uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 497147652Ssilby#ifdef INVARIANTS 498147652Ssilby trash_dtor(mem, size, NULL); 499147652Ssilby#endif 500129906Sbmilekic} 501129906Sbmilekic 502129906Sbmilekic/* 503129906Sbmilekic * The "packet" keg constructor. 504129906Sbmilekic */ 505132987Sgreenstatic int 506132987Sgreenmb_ctor_pack(void *mem, int size, void *arg, int how) 507129906Sbmilekic{ 508129906Sbmilekic struct mbuf *m; 509129906Sbmilekic struct mb_args *args; 510132987Sgreen#ifdef MAC 511132987Sgreen int error; 512132987Sgreen#endif 513132987Sgreen int flags; 514129906Sbmilekic short type; 515129906Sbmilekic 516129906Sbmilekic m = (struct mbuf *)mem; 517129906Sbmilekic args = (struct mb_args *)arg; 518129906Sbmilekic flags = args->flags; 519129906Sbmilekic type = args->type; 520129906Sbmilekic 521147537Ssilby#ifdef INVARIANTS 522147537Ssilby trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 523147537Ssilby#endif 524129906Sbmilekic m->m_next = NULL; 525129947Sbmilekic m->m_nextpkt = NULL; 526129906Sbmilekic m->m_data = m->m_ext.ext_buf; 527151976Sandre m->m_len = 0; 528151976Sandre m->m_flags = (flags | M_EXT); 529151976Sandre m->m_type = type; 530129906Sbmilekic 531129906Sbmilekic if (flags & M_PKTHDR) { 532129906Sbmilekic m->m_pkthdr.rcvif = NULL; 533151976Sandre m->m_pkthdr.len = 0; 534151976Sandre m->m_pkthdr.header = NULL; 535129906Sbmilekic m->m_pkthdr.csum_flags = 0; 536151976Sandre m->m_pkthdr.csum_data = 0; 537162377Sandre m->m_pkthdr.tso_segsz = 0; 538162377Sandre m->m_pkthdr.ether_vtag = 0; 539129906Sbmilekic SLIST_INIT(&m->m_pkthdr.tags); 540129906Sbmilekic#ifdef MAC 541129906Sbmilekic /* If the label init fails, fail the alloc */ 542132987Sgreen error = mac_init_mbuf(m, how); 543132987Sgreen if (error) 544132987Sgreen return (error); 545129906Sbmilekic#endif 546129906Sbmilekic } 547151976Sandre /* m_ext is already initialized. */ 548151976Sandre 549132987Sgreen return (0); 550129906Sbmilekic} 551129906Sbmilekic 552129906Sbmilekic/* 553129906Sbmilekic * This is the protocol drain routine. 554129906Sbmilekic * 555129906Sbmilekic * No locks should be held when this is called. The drain routines have to 556129906Sbmilekic * presently acquire some locks which raises the possibility of lock order 557129906Sbmilekic * reversal. 558129906Sbmilekic */ 559129906Sbmilekicstatic void 560129906Sbmilekicmb_reclaim(void *junk) 561129906Sbmilekic{ 562129906Sbmilekic struct domain *dp; 563129906Sbmilekic struct protosw *pr; 564129906Sbmilekic 565129906Sbmilekic WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, 566129906Sbmilekic "mb_reclaim()"); 567129906Sbmilekic 568129906Sbmilekic for (dp = domains; dp != NULL; dp = dp->dom_next) 569129906Sbmilekic for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 570129906Sbmilekic if (pr->pr_drain != NULL) 571129906Sbmilekic (*pr->pr_drain)(); 572129906Sbmilekic} 573