kern_mbuf.c revision 174247
1129906Sbmilekic/*- 2141991Sbmilekic * Copyright (c) 2004, 2005, 3141991Sbmilekic * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 4129906Sbmilekic * 5129906Sbmilekic * Redistribution and use in source and binary forms, with or without 6129906Sbmilekic * modification, are permitted provided that the following conditions 7129906Sbmilekic * are met: 8129906Sbmilekic * 1. Redistributions of source code must retain the above copyright 9129906Sbmilekic * notice unmodified, this list of conditions and the following 10129906Sbmilekic * disclaimer. 11129906Sbmilekic * 2. Redistributions in binary form must reproduce the above copyright 12129906Sbmilekic * notice, this list of conditions and the following disclaimer in the 13129906Sbmilekic * documentation and/or other materials provided with the distribution. 14129906Sbmilekic * 15129906Sbmilekic * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16129906Sbmilekic * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17129906Sbmilekic * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18129906Sbmilekic * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19129906Sbmilekic * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20129906Sbmilekic * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21129906Sbmilekic * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22129906Sbmilekic * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23129906Sbmilekic * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24129906Sbmilekic * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25129906Sbmilekic * SUCH DAMAGE. 26129906Sbmilekic */ 27129906Sbmilekic 28129906Sbmilekic#include <sys/cdefs.h> 29129906Sbmilekic__FBSDID("$FreeBSD: head/sys/kern/kern_mbuf.c 174247 2007-12-04 07:06:08Z alc $"); 30129906Sbmilekic 31129906Sbmilekic#include "opt_mac.h" 32129906Sbmilekic#include "opt_param.h" 33129906Sbmilekic 34129906Sbmilekic#include <sys/param.h> 35129906Sbmilekic#include <sys/malloc.h> 36129906Sbmilekic#include <sys/systm.h> 37129906Sbmilekic#include <sys/mbuf.h> 38129906Sbmilekic#include <sys/domain.h> 39129906Sbmilekic#include <sys/eventhandler.h> 40129906Sbmilekic#include <sys/kernel.h> 41129906Sbmilekic#include <sys/protosw.h> 42129906Sbmilekic#include <sys/smp.h> 43129906Sbmilekic#include <sys/sysctl.h> 44129906Sbmilekic 45163606Srwatson#include <security/mac/mac_framework.h> 46163606Srwatson 47129906Sbmilekic#include <vm/vm.h> 48129906Sbmilekic#include <vm/vm_page.h> 49129906Sbmilekic#include <vm/uma.h> 50147537Ssilby#include <vm/uma_int.h> 51147537Ssilby#include <vm/uma_dbg.h> 52129906Sbmilekic 53129906Sbmilekic/* 54129906Sbmilekic * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 55129906Sbmilekic * Zones. 56129906Sbmilekic * 57129906Sbmilekic * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 58129906Sbmilekic * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 59129906Sbmilekic * administrator so desires. 60129906Sbmilekic * 61129906Sbmilekic * Mbufs are allocated from a UMA Master Zone called the Mbuf 62129906Sbmilekic * Zone. 63129906Sbmilekic * 64129906Sbmilekic * Additionally, FreeBSD provides a Packet Zone, which it 65129906Sbmilekic * configures as a Secondary Zone to the Mbuf Master Zone, 66129906Sbmilekic * thus sharing backend Slab kegs with the Mbuf Master Zone. 67129906Sbmilekic * 68129906Sbmilekic * Thus common-case allocations and locking are simplified: 69129906Sbmilekic * 70129906Sbmilekic * m_clget() m_getcl() 71129906Sbmilekic * | | 72129906Sbmilekic * | .------------>[(Packet Cache)] m_get(), m_gethdr() 73129906Sbmilekic * | | [ Packet ] | 74129906Sbmilekic * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 75129906Sbmilekic * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 76129906Sbmilekic * | \________ | 77129906Sbmilekic * [ Cluster Keg ] \ / 78156023Sglebius * | [ Mbuf Keg ] 79129906Sbmilekic * [ Cluster Slabs ] | 80129906Sbmilekic * | [ Mbuf Slabs ] 81129906Sbmilekic * \____________(VM)_________________/ 82151976Sandre * 83151976Sandre * 84156023Sglebius * Whenever an object is allocated with uma_zalloc() out of 85151976Sandre * one of the Zones its _ctor_ function is executed. The same 86156023Sglebius * for any deallocation through uma_zfree() the _dtor_ function 87151976Sandre * is executed. 88156023Sglebius * 89151976Sandre * Caches are per-CPU and are filled from the Master Zone. 90151976Sandre * 91156023Sglebius * Whenever an object is allocated from the underlying global 92151976Sandre * memory pool it gets pre-initialized with the _zinit_ functions. 93151976Sandre * When the Keg's are overfull objects get decomissioned with 94151976Sandre * _zfini_ functions and free'd back to the global memory pool. 95151976Sandre * 96129906Sbmilekic */ 97129906Sbmilekic 98151976Sandreint nmbclusters; /* limits number of mbuf clusters */ 99155780Sandreint nmbjumbop; /* limits number of page size jumbo clusters */ 100151976Sandreint nmbjumbo9; /* limits number of 9k jumbo clusters */ 101151976Sandreint nmbjumbo16; /* limits number of 16k jumbo clusters */ 102129906Sbmilekicstruct mbstat mbstat; 103129906Sbmilekic 104129906Sbmilekicstatic void 105129906Sbmilekictunable_mbinit(void *dummy) 106129906Sbmilekic{ 107129906Sbmilekic 108129906Sbmilekic /* This has to be done before VM init. */ 109129906Sbmilekic nmbclusters = 1024 + maxusers * 64; 110129906Sbmilekic TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 111129906Sbmilekic} 112129906SbmilekicSYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL); 113129906Sbmilekic 114151976Sandre/* XXX: These should be tuneables. Can't change UMA limits on the fly. */ 115157927Spsstatic int 116157927Spssysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 117157927Sps{ 118157927Sps int error, newnmbclusters; 119157927Sps 120157927Sps newnmbclusters = nmbclusters; 121170289Sdwmalone error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 122157927Sps if (error == 0 && req->newptr) { 123157927Sps if (newnmbclusters > nmbclusters) { 124157927Sps nmbclusters = newnmbclusters; 125157927Sps uma_zone_set_max(zone_clust, nmbclusters); 126157927Sps EVENTHANDLER_INVOKE(nmbclusters_change); 127157927Sps } else 128157927Sps error = EINVAL; 129157927Sps } 130157927Sps return (error); 131157927Sps} 132157927SpsSYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, 133157927Sps&nmbclusters, 0, sysctl_nmbclusters, "IU", 134129906Sbmilekic "Maximum number of mbuf clusters allowed"); 135155780SandreSYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbop, CTLFLAG_RW, &nmbjumbop, 0, 136155780Sandre "Maximum number of mbuf page size jumbo clusters allowed"); 137151976SandreSYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo9, CTLFLAG_RW, &nmbjumbo9, 0, 138151976Sandre "Maximum number of mbuf 9k jumbo clusters allowed"); 139151976SandreSYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo16, CTLFLAG_RW, &nmbjumbo16, 0, 140151976Sandre "Maximum number of mbuf 16k jumbo clusters allowed"); 141129906SbmilekicSYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat, 142129906Sbmilekic "Mbuf general information and statistics"); 143129906Sbmilekic 144129906Sbmilekic/* 145129906Sbmilekic * Zones from which we allocate. 146129906Sbmilekic */ 147129906Sbmilekicuma_zone_t zone_mbuf; 148129906Sbmilekicuma_zone_t zone_clust; 149129906Sbmilekicuma_zone_t zone_pack; 150155780Sandreuma_zone_t zone_jumbop; 151151976Sandreuma_zone_t zone_jumbo9; 152151976Sandreuma_zone_t zone_jumbo16; 153151976Sandreuma_zone_t zone_ext_refcnt; 154129906Sbmilekic 155129906Sbmilekic/* 156129906Sbmilekic * Local prototypes. 157129906Sbmilekic */ 158132987Sgreenstatic int mb_ctor_mbuf(void *, int, void *, int); 159132987Sgreenstatic int mb_ctor_clust(void *, int, void *, int); 160132987Sgreenstatic int mb_ctor_pack(void *, int, void *, int); 161129906Sbmilekicstatic void mb_dtor_mbuf(void *, int, void *); 162151976Sandrestatic void mb_dtor_clust(void *, int, void *); 163151976Sandrestatic void mb_dtor_pack(void *, int, void *); 164151976Sandrestatic int mb_zinit_pack(void *, int, int); 165151976Sandrestatic void mb_zfini_pack(void *, int); 166129906Sbmilekic 167129906Sbmilekicstatic void mb_reclaim(void *); 168129906Sbmilekicstatic void mbuf_init(void *); 169174247Salcstatic void *mbuf_jumbo_alloc(uma_zone_t, int, u_int8_t *, int); 170174247Salcstatic void mbuf_jumbo_free(void *, int, u_int8_t); 171129906Sbmilekic 172174247Salcstatic MALLOC_DEFINE(M_JUMBOFRAME, "jumboframes", "mbuf jumbo frame buffers"); 173174247Salc 174135510Sbrian/* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */ 175135510SbrianCTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 176135510Sbrian 177129906Sbmilekic/* 178129906Sbmilekic * Initialize FreeBSD Network buffer allocation. 179129906Sbmilekic */ 180129906SbmilekicSYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL) 181129906Sbmilekicstatic void 182129906Sbmilekicmbuf_init(void *dummy) 183129906Sbmilekic{ 184129906Sbmilekic 185129906Sbmilekic /* 186129906Sbmilekic * Configure UMA zones for Mbufs, Clusters, and Packets. 187129906Sbmilekic */ 188151976Sandre zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 189151976Sandre mb_ctor_mbuf, mb_dtor_mbuf, 190147537Ssilby#ifdef INVARIANTS 191151976Sandre trash_init, trash_fini, 192147537Ssilby#else 193151976Sandre NULL, NULL, 194147537Ssilby#endif 195151976Sandre MSIZE - 1, UMA_ZONE_MAXBUCKET); 196151976Sandre 197148095Srwatson zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 198151976Sandre mb_ctor_clust, mb_dtor_clust, 199147537Ssilby#ifdef INVARIANTS 200151976Sandre trash_init, trash_fini, 201147537Ssilby#else 202151976Sandre NULL, NULL, 203147537Ssilby#endif 204151976Sandre UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 205129906Sbmilekic if (nmbclusters > 0) 206129906Sbmilekic uma_zone_set_max(zone_clust, nmbclusters); 207151976Sandre 208148095Srwatson zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 209151976Sandre mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 210129906Sbmilekic 211156023Sglebius /* Make jumbo frame zone too. Page size, 9k and 16k. */ 212155780Sandre zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 213153232Sandre mb_ctor_clust, mb_dtor_clust, 214153232Sandre#ifdef INVARIANTS 215153232Sandre trash_init, trash_fini, 216153232Sandre#else 217153232Sandre NULL, NULL, 218153232Sandre#endif 219153232Sandre UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 220155780Sandre if (nmbjumbop > 0) 221155780Sandre uma_zone_set_max(zone_jumbop, nmbjumbop); 222153232Sandre 223151976Sandre zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 224151976Sandre mb_ctor_clust, mb_dtor_clust, 225151976Sandre#ifdef INVARIANTS 226151976Sandre trash_init, trash_fini, 227151976Sandre#else 228151976Sandre NULL, NULL, 229151976Sandre#endif 230151976Sandre UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 231151976Sandre if (nmbjumbo9 > 0) 232151976Sandre uma_zone_set_max(zone_jumbo9, nmbjumbo9); 233174247Salc uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); 234174247Salc uma_zone_set_freef(zone_jumbo9, mbuf_jumbo_free); 235129906Sbmilekic 236151976Sandre zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 237151976Sandre mb_ctor_clust, mb_dtor_clust, 238151976Sandre#ifdef INVARIANTS 239151976Sandre trash_init, trash_fini, 240151976Sandre#else 241151976Sandre NULL, NULL, 242151976Sandre#endif 243151976Sandre UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 244151976Sandre if (nmbjumbo16 > 0) 245151976Sandre uma_zone_set_max(zone_jumbo16, nmbjumbo16); 246174247Salc uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); 247174247Salc uma_zone_set_freef(zone_jumbo16, mbuf_jumbo_free); 248151976Sandre 249151976Sandre zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 250151976Sandre NULL, NULL, 251151976Sandre NULL, NULL, 252151976Sandre UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 253151976Sandre 254151976Sandre /* uma_prealloc() goes here... */ 255151976Sandre 256129906Sbmilekic /* 257129906Sbmilekic * Hook event handler for low-memory situation, used to 258129906Sbmilekic * drain protocols and push data back to the caches (UMA 259129906Sbmilekic * later pushes it back to VM). 260129906Sbmilekic */ 261129906Sbmilekic EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 262129906Sbmilekic EVENTHANDLER_PRI_FIRST); 263129906Sbmilekic 264129906Sbmilekic /* 265129906Sbmilekic * [Re]set counters and local statistics knobs. 266129906Sbmilekic * XXX Some of these should go and be replaced, but UMA stat 267129906Sbmilekic * gathering needs to be revised. 268129906Sbmilekic */ 269129906Sbmilekic mbstat.m_mbufs = 0; 270129906Sbmilekic mbstat.m_mclusts = 0; 271129906Sbmilekic mbstat.m_drain = 0; 272129906Sbmilekic mbstat.m_msize = MSIZE; 273129906Sbmilekic mbstat.m_mclbytes = MCLBYTES; 274129906Sbmilekic mbstat.m_minclsize = MINCLSIZE; 275129906Sbmilekic mbstat.m_mlen = MLEN; 276129906Sbmilekic mbstat.m_mhlen = MHLEN; 277129906Sbmilekic mbstat.m_numtypes = MT_NTYPES; 278129906Sbmilekic 279129906Sbmilekic mbstat.m_mcfail = mbstat.m_mpfail = 0; 280129906Sbmilekic mbstat.sf_iocnt = 0; 281129906Sbmilekic mbstat.sf_allocwait = mbstat.sf_allocfail = 0; 282129906Sbmilekic} 283129906Sbmilekic 284129906Sbmilekic/* 285174247Salc * UMA backend page allocator for the jumbo frame zones. 286174247Salc * 287174247Salc * Allocates kernel virtual memory that is backed by contiguous physical 288174247Salc * pages. 289174247Salc */ 290174247Salcstatic void * 291174247Salcmbuf_jumbo_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 292174247Salc{ 293174247Salc 294174247Salc *flags = UMA_SLAB_PRIV; 295174247Salc return (contigmalloc(bytes, M_JUMBOFRAME, wait, (vm_paddr_t)0, 296174247Salc ~(vm_paddr_t)0, 1, 0)); 297174247Salc} 298174247Salc 299174247Salc/* 300174247Salc * UMA backend page deallocator for the jumbo frame zones. 301174247Salc */ 302174247Salcstatic void 303174247Salcmbuf_jumbo_free(void *mem, int size, u_int8_t flags) 304174247Salc{ 305174247Salc 306174247Salc contigfree(mem, size, M_JUMBOFRAME); 307174247Salc} 308174247Salc 309174247Salc/* 310129906Sbmilekic * Constructor for Mbuf master zone. 311129906Sbmilekic * 312129906Sbmilekic * The 'arg' pointer points to a mb_args structure which 313129906Sbmilekic * contains call-specific information required to support the 314151976Sandre * mbuf allocation API. See mbuf.h. 315129906Sbmilekic */ 316132987Sgreenstatic int 317132987Sgreenmb_ctor_mbuf(void *mem, int size, void *arg, int how) 318129906Sbmilekic{ 319129906Sbmilekic struct mbuf *m; 320129906Sbmilekic struct mb_args *args; 321132987Sgreen#ifdef MAC 322132987Sgreen int error; 323132987Sgreen#endif 324129906Sbmilekic int flags; 325129906Sbmilekic short type; 326129906Sbmilekic 327147537Ssilby#ifdef INVARIANTS 328147537Ssilby trash_ctor(mem, size, arg, how); 329147537Ssilby#endif 330129906Sbmilekic m = (struct mbuf *)mem; 331129906Sbmilekic args = (struct mb_args *)arg; 332129906Sbmilekic flags = args->flags; 333129906Sbmilekic type = args->type; 334129906Sbmilekic 335151976Sandre /* 336151976Sandre * The mbuf is initialized later. The caller has the 337156059Sglebius * responsibility to set up any MAC labels too. 338151976Sandre */ 339151976Sandre if (type == MT_NOINIT) 340151976Sandre return (0); 341151976Sandre 342129906Sbmilekic m->m_next = NULL; 343129906Sbmilekic m->m_nextpkt = NULL; 344151976Sandre m->m_len = 0; 345129947Sbmilekic m->m_flags = flags; 346151976Sandre m->m_type = type; 347129906Sbmilekic if (flags & M_PKTHDR) { 348129906Sbmilekic m->m_data = m->m_pktdat; 349129906Sbmilekic m->m_pkthdr.rcvif = NULL; 350151976Sandre m->m_pkthdr.len = 0; 351151976Sandre m->m_pkthdr.header = NULL; 352129906Sbmilekic m->m_pkthdr.csum_flags = 0; 353151976Sandre m->m_pkthdr.csum_data = 0; 354162377Sandre m->m_pkthdr.tso_segsz = 0; 355162377Sandre m->m_pkthdr.ether_vtag = 0; 356129906Sbmilekic SLIST_INIT(&m->m_pkthdr.tags); 357129906Sbmilekic#ifdef MAC 358129906Sbmilekic /* If the label init fails, fail the alloc */ 359172930Srwatson error = mac_mbuf_init(m, how); 360132987Sgreen if (error) 361132987Sgreen return (error); 362129906Sbmilekic#endif 363129947Sbmilekic } else 364129906Sbmilekic m->m_data = m->m_dat; 365132987Sgreen return (0); 366129906Sbmilekic} 367129906Sbmilekic 368129906Sbmilekic/* 369151976Sandre * The Mbuf master zone destructor. 370129906Sbmilekic */ 371129906Sbmilekicstatic void 372129906Sbmilekicmb_dtor_mbuf(void *mem, int size, void *arg) 373129906Sbmilekic{ 374129906Sbmilekic struct mbuf *m; 375172462Skmacy unsigned long flags; 376129906Sbmilekic 377129906Sbmilekic m = (struct mbuf *)mem; 378172462Skmacy flags = (unsigned long)arg; 379173029Sobrien 380172462Skmacy if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0) 381129906Sbmilekic m_tag_delete_chain(m, NULL); 382151976Sandre KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); 383173029Sobrien KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 384147537Ssilby#ifdef INVARIANTS 385147537Ssilby trash_dtor(mem, size, arg); 386147537Ssilby#endif 387129906Sbmilekic} 388129906Sbmilekic 389151976Sandre/* 390151976Sandre * The Mbuf Packet zone destructor. 391151976Sandre */ 392129906Sbmilekicstatic void 393129906Sbmilekicmb_dtor_pack(void *mem, int size, void *arg) 394129906Sbmilekic{ 395129906Sbmilekic struct mbuf *m; 396129906Sbmilekic 397129906Sbmilekic m = (struct mbuf *)mem; 398129906Sbmilekic if ((m->m_flags & M_PKTHDR) != 0) 399129906Sbmilekic m_tag_delete_chain(m, NULL); 400151976Sandre 401151976Sandre /* Make sure we've got a clean cluster back. */ 402151976Sandre KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 403151976Sandre KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 404151976Sandre KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 405151976Sandre KASSERT(m->m_ext.ext_args == NULL, ("%s: ext_args != NULL", __func__)); 406151976Sandre KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 407152130Sglebius KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 408151976Sandre KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__)); 409147537Ssilby#ifdef INVARIANTS 410147537Ssilby trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 411147537Ssilby#endif 412166213Smohans /* 413173029Sobrien * If there are processes blocked on zone_clust, waiting for pages 414173029Sobrien * to be freed up, * cause them to be woken up by draining the 415173029Sobrien * packet zone. We are exposed to a race here * (in the check for 416173029Sobrien * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 417173029Sobrien * is deliberate. We don't want to acquire the zone lock for every 418173029Sobrien * mbuf free. 419166213Smohans */ 420173029Sobrien if (uma_zone_exhausted_nolock(zone_clust)) 421173029Sobrien zone_drain(zone_pack); 422129906Sbmilekic} 423129906Sbmilekic 424129906Sbmilekic/* 425155780Sandre * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 426129906Sbmilekic * 427129906Sbmilekic * Here the 'arg' pointer points to the Mbuf which we 428151976Sandre * are configuring cluster storage for. If 'arg' is 429151976Sandre * empty we allocate just the cluster without setting 430151976Sandre * the mbuf to it. See mbuf.h. 431129906Sbmilekic */ 432132987Sgreenstatic int 433132987Sgreenmb_ctor_clust(void *mem, int size, void *arg, int how) 434129906Sbmilekic{ 435129906Sbmilekic struct mbuf *m; 436151976Sandre u_int *refcnt; 437168374Skmacy int type; 438168374Skmacy uma_zone_t zone; 439173029Sobrien 440147537Ssilby#ifdef INVARIANTS 441147537Ssilby trash_ctor(mem, size, arg, how); 442147537Ssilby#endif 443168374Skmacy switch (size) { 444168374Skmacy case MCLBYTES: 445168374Skmacy type = EXT_CLUSTER; 446168374Skmacy zone = zone_clust; 447168374Skmacy break; 448168374Skmacy#if MJUMPAGESIZE != MCLBYTES 449168374Skmacy case MJUMPAGESIZE: 450168374Skmacy type = EXT_JUMBOP; 451168374Skmacy zone = zone_jumbop; 452168374Skmacy break; 453168374Skmacy#endif 454168374Skmacy case MJUM9BYTES: 455168374Skmacy type = EXT_JUMBO9; 456168374Skmacy zone = zone_jumbo9; 457168374Skmacy break; 458168374Skmacy case MJUM16BYTES: 459168374Skmacy type = EXT_JUMBO16; 460168374Skmacy zone = zone_jumbo16; 461168374Skmacy break; 462168374Skmacy default: 463168374Skmacy panic("unknown cluster size"); 464168374Skmacy break; 465168374Skmacy } 466168374Skmacy 467129906Sbmilekic m = (struct mbuf *)arg; 468168374Skmacy refcnt = uma_find_refcnt(zone, mem); 469173029Sobrien *refcnt = 1; 470151976Sandre if (m != NULL) { 471151976Sandre m->m_ext.ext_buf = (caddr_t)mem; 472151976Sandre m->m_data = m->m_ext.ext_buf; 473151976Sandre m->m_flags |= M_EXT; 474151976Sandre m->m_ext.ext_free = NULL; 475151976Sandre m->m_ext.ext_args = NULL; 476151976Sandre m->m_ext.ext_size = size; 477151976Sandre m->m_ext.ext_type = type; 478168374Skmacy m->m_ext.ref_cnt = refcnt; 479151976Sandre } 480168374Skmacy 481132987Sgreen return (0); 482129906Sbmilekic} 483129906Sbmilekic 484151976Sandre/* 485151976Sandre * The Mbuf Cluster zone destructor. 486151976Sandre */ 487129906Sbmilekicstatic void 488129906Sbmilekicmb_dtor_clust(void *mem, int size, void *arg) 489129906Sbmilekic{ 490168374Skmacy#ifdef INVARIANTS 491168374Skmacy uma_zone_t zone; 492151976Sandre 493168374Skmacy zone = m_getzone(size); 494168374Skmacy KASSERT(*(uma_find_refcnt(zone, mem)) <= 1, 495152035Sandre ("%s: refcnt incorrect %u", __func__, 496168374Skmacy *(uma_find_refcnt(zone, mem))) ); 497168374Skmacy 498147537Ssilby trash_dtor(mem, size, arg); 499147537Ssilby#endif 500129906Sbmilekic} 501129906Sbmilekic 502129906Sbmilekic/* 503129906Sbmilekic * The Packet secondary zone's init routine, executed on the 504151976Sandre * object's transition from mbuf keg slab to zone cache. 505129906Sbmilekic */ 506132987Sgreenstatic int 507151976Sandremb_zinit_pack(void *mem, int size, int how) 508129906Sbmilekic{ 509129906Sbmilekic struct mbuf *m; 510129906Sbmilekic 511151976Sandre m = (struct mbuf *)mem; /* m is virgin. */ 512156428Sandre if (uma_zalloc_arg(zone_clust, m, how) == NULL || 513156428Sandre m->m_ext.ext_buf == NULL) 514132987Sgreen return (ENOMEM); 515152101Sandre m->m_ext.ext_type = EXT_PACKET; /* Override. */ 516147537Ssilby#ifdef INVARIANTS 517147537Ssilby trash_init(m->m_ext.ext_buf, MCLBYTES, how); 518147537Ssilby#endif 519132987Sgreen return (0); 520129906Sbmilekic} 521129906Sbmilekic 522129906Sbmilekic/* 523129906Sbmilekic * The Packet secondary zone's fini routine, executed on the 524129906Sbmilekic * object's transition from zone cache to keg slab. 525129906Sbmilekic */ 526129906Sbmilekicstatic void 527151976Sandremb_zfini_pack(void *mem, int size) 528129906Sbmilekic{ 529129906Sbmilekic struct mbuf *m; 530129906Sbmilekic 531129906Sbmilekic m = (struct mbuf *)mem; 532147537Ssilby#ifdef INVARIANTS 533147537Ssilby trash_fini(m->m_ext.ext_buf, MCLBYTES); 534147537Ssilby#endif 535129906Sbmilekic uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 536147652Ssilby#ifdef INVARIANTS 537147652Ssilby trash_dtor(mem, size, NULL); 538147652Ssilby#endif 539129906Sbmilekic} 540129906Sbmilekic 541129906Sbmilekic/* 542129906Sbmilekic * The "packet" keg constructor. 543129906Sbmilekic */ 544132987Sgreenstatic int 545132987Sgreenmb_ctor_pack(void *mem, int size, void *arg, int how) 546129906Sbmilekic{ 547129906Sbmilekic struct mbuf *m; 548129906Sbmilekic struct mb_args *args; 549132987Sgreen#ifdef MAC 550132987Sgreen int error; 551132987Sgreen#endif 552132987Sgreen int flags; 553129906Sbmilekic short type; 554129906Sbmilekic 555129906Sbmilekic m = (struct mbuf *)mem; 556129906Sbmilekic args = (struct mb_args *)arg; 557129906Sbmilekic flags = args->flags; 558129906Sbmilekic type = args->type; 559129906Sbmilekic 560147537Ssilby#ifdef INVARIANTS 561147537Ssilby trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 562147537Ssilby#endif 563129906Sbmilekic m->m_next = NULL; 564129947Sbmilekic m->m_nextpkt = NULL; 565129906Sbmilekic m->m_data = m->m_ext.ext_buf; 566151976Sandre m->m_len = 0; 567151976Sandre m->m_flags = (flags | M_EXT); 568151976Sandre m->m_type = type; 569173029Sobrien 570129906Sbmilekic if (flags & M_PKTHDR) { 571129906Sbmilekic m->m_pkthdr.rcvif = NULL; 572151976Sandre m->m_pkthdr.len = 0; 573151976Sandre m->m_pkthdr.header = NULL; 574129906Sbmilekic m->m_pkthdr.csum_flags = 0; 575151976Sandre m->m_pkthdr.csum_data = 0; 576162377Sandre m->m_pkthdr.tso_segsz = 0; 577162377Sandre m->m_pkthdr.ether_vtag = 0; 578129906Sbmilekic SLIST_INIT(&m->m_pkthdr.tags); 579129906Sbmilekic#ifdef MAC 580129906Sbmilekic /* If the label init fails, fail the alloc */ 581172930Srwatson error = mac_mbuf_init(m, how); 582132987Sgreen if (error) 583132987Sgreen return (error); 584129906Sbmilekic#endif 585129906Sbmilekic } 586151976Sandre /* m_ext is already initialized. */ 587151976Sandre 588132987Sgreen return (0); 589129906Sbmilekic} 590129906Sbmilekic 591129906Sbmilekic/* 592129906Sbmilekic * This is the protocol drain routine. 593129906Sbmilekic * 594129906Sbmilekic * No locks should be held when this is called. The drain routines have to 595129906Sbmilekic * presently acquire some locks which raises the possibility of lock order 596129906Sbmilekic * reversal. 597129906Sbmilekic */ 598129906Sbmilekicstatic void 599129906Sbmilekicmb_reclaim(void *junk) 600129906Sbmilekic{ 601129906Sbmilekic struct domain *dp; 602129906Sbmilekic struct protosw *pr; 603129906Sbmilekic 604129906Sbmilekic WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, 605129906Sbmilekic "mb_reclaim()"); 606129906Sbmilekic 607129906Sbmilekic for (dp = domains; dp != NULL; dp = dp->dom_next) 608129906Sbmilekic for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 609129906Sbmilekic if (pr->pr_drain != NULL) 610129906Sbmilekic (*pr->pr_drain)(); 611129906Sbmilekic} 612