Deleted Added
full compact
29c29
< __FBSDID("$FreeBSD: head/sys/kern/kern_mbuf.c 150644 2005-09-27 20:28:43Z rwatson $");
---
> __FBSDID("$FreeBSD: head/sys/kern/kern_mbuf.c 151976 2005-11-02 16:20:36Z andre $");
80a81,94
> *
> *
> * Whenever a object is allocated with uma_zalloc() out of the
> * one of the Zones its _ctor_ function is executed. The same
> * for any deallocation through uma_zfree() the _dror_ function
> * is executed.
> *
> * Caches are per-CPU and are filled from the Master Zone.
> *
> * Whenever a object is allocated from the underlying global
> * memory pool it gets pre-initialized with the _zinit_ functions.
> * When the Keg's are overfull objects get decomissioned with
> * _zfini_ functions and free'd back to the global memory pool.
> *
83c97,99
< int nmbclusters;
---
> int nmbclusters; /* limits number of mbuf clusters */
> int nmbjumbo9; /* limits number of 9k jumbo clusters */
> int nmbjumbo16; /* limits number of 16k jumbo clusters */
96a113
> /* XXX: These should be tuneables. Can't change UMA limits on the fly. */
98a116,119
> SYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo9, CTLFLAG_RW, &nmbjumbo9, 0,
> "Maximum number of mbuf 9k jumbo clusters allowed");
> SYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo16, CTLFLAG_RW, &nmbjumbo16, 0,
> "Maximum number of mbuf 16k jumbo clusters allowed");
107a129,131
> uma_zone_t zone_jumbo9;
> uma_zone_t zone_jumbo16;
> uma_zone_t zone_ext_refcnt;
116,119c140,143
< static void mb_dtor_clust(void *, int, void *); /* XXX */
< static void mb_dtor_pack(void *, int, void *); /* XXX */
< static int mb_init_pack(void *, int, int);
< static void mb_fini_pack(void *, int);
---
> static void mb_dtor_clust(void *, int, void *);
> static void mb_dtor_pack(void *, int, void *);
> static int mb_zinit_pack(void *, int, int);
> static void mb_zfini_pack(void *, int);
138,139c162,163
< zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, mb_ctor_mbuf,
< mb_dtor_mbuf,
---
> zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
> mb_ctor_mbuf, mb_dtor_mbuf,
141c165
< trash_init, trash_fini, MSIZE - 1, UMA_ZONE_MAXBUCKET);
---
> trash_init, trash_fini,
143c167
< NULL, NULL, MSIZE - 1, UMA_ZONE_MAXBUCKET);
---
> NULL, NULL,
144a169,170
> MSIZE - 1, UMA_ZONE_MAXBUCKET);
>
146c172
< mb_ctor_clust,
---
> mb_ctor_clust, mb_dtor_clust,
148c174
< mb_dtor_clust, trash_init, trash_fini, UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
---
> trash_init, trash_fini,
150c176
< mb_dtor_clust, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
---
> NULL, NULL,
151a178
> UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
153a181
>
155c183
< mb_dtor_pack, mb_init_pack, mb_fini_pack, zone_mbuf);
---
> mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
157c185,195
< /* uma_prealloc() goes here */
---
> /* Make jumbo frame zone too. 9k and 16k. */
> zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
> mb_ctor_clust, mb_dtor_clust,
> #ifdef INVARIANTS
> trash_init, trash_fini,
> #else
> NULL, NULL,
> #endif
> UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
> if (nmbjumbo9 > 0)
> uma_zone_set_max(zone_jumbo9, nmbjumbo9);
158a197,214
> zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
> mb_ctor_clust, mb_dtor_clust,
> #ifdef INVARIANTS
> trash_init, trash_fini,
> #else
> NULL, NULL,
> #endif
> UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
> if (nmbjumbo16 > 0)
> uma_zone_set_max(zone_jumbo16, nmbjumbo16);
>
> zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
> NULL, NULL,
> NULL, NULL,
> UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
>
> /* uma_prealloc() goes here... */
>
192c248
< * mbuf allocation API.
---
> * mbuf allocation API. See mbuf.h.
213c269,275
< m->m_type = type;
---
> /*
> * The mbuf is initialized later. The caller has the
> * responseability to setup any MAC labels too.
> */
> if (type == MT_NOINIT)
> return (0);
>
215a278
> m->m_len = 0;
216a280
> m->m_type = type;
219a284,285
> m->m_pkthdr.len = 0;
> m->m_pkthdr.header = NULL;
220a287
> m->m_pkthdr.csum_data = 0;
234c301
< * The Mbuf master zone and Packet secondary zone destructor.
---
> * The Mbuf master zone destructor.
243a311
> KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
249c317,319
< /* XXX Only because of stats */
---
> /*
> * The Mbuf Packet zone destructor.
> */
257a328,336
>
> /* Make sure we've got a clean cluster back. */
> KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
> KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
> KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
> KASSERT(m->m_ext.ext_args == NULL, ("%s: ext_args != NULL", __func__));
> KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
> KASSERT(m->m_ext.ext_type == EXT_CLUSTER, ("%s: ext_type != EXT_CLUSTER", __func__));
> KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
264c343
< * The Cluster zone constructor.
---
> * The Cluster and Jumbo[9|16] zone constructor.
267c346,348
< * are configuring cluster storage for.
---
> * are configuring cluster storage for. If 'arg' is
> * empty we allocate just the cluster without setting
> * the mbuf to it. See mbuf.h.
272a354,355
> u_int *refcnt;
> int type = 0;
278,285c361,388
< m->m_ext.ext_buf = (caddr_t)mem;
< m->m_data = m->m_ext.ext_buf;
< m->m_flags |= M_EXT;
< m->m_ext.ext_free = NULL;
< m->m_ext.ext_args = NULL;
< m->m_ext.ext_size = MCLBYTES;
< m->m_ext.ext_type = EXT_CLUSTER;
< m->m_ext.ref_cnt = NULL; /* Lazy counter assign. */
---
> if (m != NULL) {
> switch (size) {
> case MCLBYTES:
> type = EXT_CLUSTER;
> break;
> case MJUM9BYTES:
> type = EXT_JUMBO9;
> break;
> case MJUM16BYTES:
> type = EXT_JUMBO16;
> break;
> default:
> panic("unknown cluster size");
> break;
> }
> m->m_ext.ext_buf = (caddr_t)mem;
> m->m_data = m->m_ext.ext_buf;
> m->m_flags |= M_EXT;
> m->m_ext.ext_free = NULL;
> m->m_ext.ext_args = NULL;
> m->m_ext.ext_size = size;
> m->m_ext.ext_type = type;
> m->m_ext.ref_cnt = uma_find_refcnt(zone_clust, mem);
> *m->m_ext.ref_cnt = 1;
> } else {
> refcnt = uma_find_refcnt(zone_clust, mem);
> *refcnt = 1;
> }
289c392,394
< /* XXX */
---
> /*
> * The Mbuf Cluster zone destructor.
> */
292a398,402
> u_int *refcnt;
>
> refcnt = uma_find_refcnt(zone_clust, mem);
> KASSERT(*refcnt == 1, ("%s: refcnt incorrect %u", __func__, *refcnt));
> *refcnt = 0;
300c410
< * object's transition from keg slab to zone cache.
---
> * object's transition from mbuf keg slab to zone cache.
303c413
< mb_init_pack(void *mem, int size, int how)
---
> mb_zinit_pack(void *mem, int size, int how)
307,308c417
< m = (struct mbuf *)mem;
< m->m_ext.ext_buf = NULL;
---
> m = (struct mbuf *)mem; /* m is virgin. */
323c432
< mb_fini_pack(void *mem, int size)
---
> mb_zfini_pack(void *mem, int size)
332d440
< m->m_ext.ext_buf = NULL;
360d467
< m->m_type = type;
364,369c471,473
< m->m_flags = flags|M_EXT;
< m->m_ext.ext_free = NULL;
< m->m_ext.ext_args = NULL;
< m->m_ext.ext_size = MCLBYTES;
< m->m_ext.ext_type = EXT_PACKET;
< m->m_ext.ref_cnt = NULL; /* Lazy counter assign. */
---
> m->m_len = 0;
> m->m_flags = (flags | M_EXT);
> m->m_type = type;
372a477,478
> m->m_pkthdr.len = 0;
> m->m_pkthdr.header = NULL;
373a480
> m->m_pkthdr.csum_data = 0;
381a489,490
> /* m_ext is already initialized. */
>