Deleted Added
full compact
uma_int.h (169431) uma_int.h (187681)
1/*-
1/*-
2 * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson <jeff@FreeBSD.org>
2 * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following

--- 8 unchanged lines hidden (view full) ---

19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following

--- 8 unchanged lines hidden (view full) ---

19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/vm/uma_int.h 169431 2007-05-09 22:53:34Z rwatson $
27 * $FreeBSD: head/sys/vm/uma_int.h 187681 2009-01-25 09:11:24Z jeff $
28 *
29 */
30
31/*
32 * This file includes definitions, structures, prototypes, and inlines that
33 * should not be used outside of the actual implementation of UMA.
34 */
35

--- 152 unchanged lines hidden (view full) ---

188 *
189 */
190struct uma_keg {
191 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
192
193 struct mtx uk_lock; /* Lock for the keg */
194 struct uma_hash uk_hash;
195
28 *
29 */
30
31/*
32 * This file includes definitions, structures, prototypes, and inlines that
33 * should not be used outside of the actual implementation of UMA.
34 */
35

--- 152 unchanged lines hidden (view full) ---

188 *
189 */
190struct uma_keg {
191 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
192
193 struct mtx uk_lock; /* Lock for the keg */
194 struct uma_hash uk_hash;
195
196 char *uk_name; /* Name of creating zone. */
196 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
197 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
198 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
199 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
200
201 u_int32_t uk_recurse; /* Allocation recursion count */
202 u_int32_t uk_align; /* Alignment mask */
203 u_int32_t uk_pages; /* Total page count */

--- 11 unchanged lines hidden (view full) ---

215 vm_offset_t uk_kva; /* Base kva for zones with objs */
216 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
217
218 u_int16_t uk_pgoff; /* Offset to uma_slab struct */
219 u_int16_t uk_ppera; /* pages per allocation from backend */
220 u_int16_t uk_ipers; /* Items per slab */
221 u_int32_t uk_flags; /* Internal flags */
222};
197 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
198 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
199 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
200 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
201
202 u_int32_t uk_recurse; /* Allocation recursion count */
203 u_int32_t uk_align; /* Alignment mask */
204 u_int32_t uk_pages; /* Total page count */

--- 11 unchanged lines hidden (view full) ---

216 vm_offset_t uk_kva; /* Base kva for zones with objs */
217 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
218
219 u_int16_t uk_pgoff; /* Offset to uma_slab struct */
220 u_int16_t uk_ppera; /* pages per allocation from backend */
221 u_int16_t uk_ipers; /* Items per slab */
222 u_int32_t uk_flags; /* Internal flags */
223};
224typedef struct uma_keg * uma_keg_t;
223
225
224/* Simpler reference to uma_keg for internal use. */
225typedef struct uma_keg * uma_keg_t;
226
227/* Page management structure */
228
229/* Sorry for the union, but space efficiency is important */
230struct uma_slab_head {
231 uma_keg_t us_keg; /* Keg we live in */
232 union {
233 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
234 unsigned long _us_size; /* Size of allocation */

--- 31 unchanged lines hidden (view full) ---

266#define us_hlink us_head.us_hlink
267#define us_data us_head.us_data
268#define us_flags us_head.us_flags
269#define us_freecount us_head.us_freecount
270#define us_firstfree us_head.us_firstfree
271
272typedef struct uma_slab * uma_slab_t;
273typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
226/* Page management structure */
227
228/* Sorry for the union, but space efficiency is important */
229struct uma_slab_head {
230 uma_keg_t us_keg; /* Keg we live in */
231 union {
232 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
233 unsigned long _us_size; /* Size of allocation */

--- 31 unchanged lines hidden (view full) ---

265#define us_hlink us_head.us_hlink
266#define us_data us_head.us_data
267#define us_flags us_head.us_flags
268#define us_freecount us_head.us_freecount
269#define us_firstfree us_head.us_firstfree
270
271typedef struct uma_slab * uma_slab_t;
272typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
273typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
274
274
275
275/*
276 * These give us the size of one free item reference within our corresponding
277 * uma_slab structures, so that our calculations during zone setup are correct
278 * regardless of what the compiler decides to do with padding the structure
279 * arrays within uma_slab.
280 */
281#define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head))
282#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \
283 sizeof(struct uma_slab_head))
284
276/*
277 * These give us the size of one free item reference within our corresponding
278 * uma_slab structures, so that our calculations during zone setup are correct
279 * regardless of what the compiler decides to do with padding the structure
280 * arrays within uma_slab.
281 */
282#define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head))
283#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \
284 sizeof(struct uma_slab_head))
285
286struct uma_klink {
287 LIST_ENTRY(uma_klink) kl_link;
288 uma_keg_t kl_keg;
289};
290typedef struct uma_klink *uma_klink_t;
291
285/*
286 * Zone management structure
287 *
288 * TODO: Optimize for cache line size
289 *
290 */
291struct uma_zone {
292 char *uz_name; /* Text name of the zone */
293 struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
292/*
293 * Zone management structure
294 *
295 * TODO: Optimize for cache line size
296 *
297 */
298struct uma_zone {
299 char *uz_name; /* Text name of the zone */
300 struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
294 uma_keg_t uz_keg; /* Our underlying Keg */
295
296 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
297 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
298 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
299
301
302 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
303 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
304 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
305
306 LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */
307 struct uma_klink uz_klink; /* klink for first keg. */
308
309 uma_slaballoc uz_slab; /* Allocate a slab from the backend. */
300 uma_ctor uz_ctor; /* Constructor for each allocation */
301 uma_dtor uz_dtor; /* Destructor */
302 uma_init uz_init; /* Initializer for each item */
303 uma_fini uz_fini; /* Discards memory */
304
305 u_int64_t uz_allocs; /* Total number of allocations */
306 u_int64_t uz_frees; /* Total number of frees */
307 u_int64_t uz_fails; /* Total number of alloc failures */
310 uma_ctor uz_ctor; /* Constructor for each allocation */
311 uma_dtor uz_dtor; /* Destructor */
312 uma_init uz_init; /* Initializer for each item */
313 uma_fini uz_fini; /* Discards memory */
314
315 u_int64_t uz_allocs; /* Total number of allocations */
316 u_int64_t uz_frees; /* Total number of frees */
317 u_int64_t uz_fails; /* Total number of alloc failures */
318 u_int32_t uz_flags; /* Flags inherited from kegs */
319 u_int32_t uz_size; /* Size inherited from kegs */
308 uint16_t uz_fills; /* Outstanding bucket fills */
309 uint16_t uz_count; /* Highest value ub_ptr can have */
310
311 /*
312 * This HAS to be the last item because we adjust the zone size
313 * based on NCPU and then allocate the space for the zones.
314 */
315 struct uma_cache uz_cpu[1]; /* Per cpu caches */
316};
317
318/*
319 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
320 */
320 uint16_t uz_fills; /* Outstanding bucket fills */
321 uint16_t uz_count; /* Highest value ub_ptr can have */
322
323 /*
324 * This HAS to be the last item because we adjust the zone size
325 * based on NCPU and then allocate the space for the zones.
326 */
327 struct uma_cache uz_cpu[1]; /* Per cpu caches */
328};
329
330/*
331 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
332 */
333#define UMA_ZFLAG_BUCKET 0x02000000 /* Bucket zone. */
334#define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */
335#define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */
321#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */
322#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
323#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
324#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
325
336#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */
337#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
338#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
339#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
340
341#define UMA_ZFLAG_INHERIT (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | \
342 UMA_ZFLAG_BUCKET)
343
326#ifdef _KERNEL
327/* Internal prototypes */
328static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
329void *uma_large_malloc(int size, int wait);
330void uma_large_free(uma_slab_t slab);
331
332/* Lock Macros */
333
344#ifdef _KERNEL
345/* Internal prototypes */
346static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
347void *uma_large_malloc(int size, int wait);
348void uma_large_free(uma_slab_t slab);
349
350/* Lock Macros */
351
334#define ZONE_LOCK_INIT(z, lc) \
352#define KEG_LOCK_INIT(k, lc) \
335 do { \
336 if ((lc)) \
353 do { \
354 if ((lc)) \
337 mtx_init((z)->uz_lock, (z)->uz_name, \
338 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
355 mtx_init(&(k)->uk_lock, (k)->uk_name, \
356 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
339 else \
357 else \
340 mtx_init((z)->uz_lock, (z)->uz_name, \
358 mtx_init(&(k)->uk_lock, (k)->uk_name, \
341 "UMA zone", MTX_DEF | MTX_DUPOK); \
342 } while (0)
343
359 "UMA zone", MTX_DEF | MTX_DUPOK); \
360 } while (0)
361
344#define ZONE_LOCK_FINI(z) mtx_destroy((z)->uz_lock)
362#define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock)
363#define KEG_LOCK(k) mtx_lock(&(k)->uk_lock)
364#define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
345#define ZONE_LOCK(z) mtx_lock((z)->uz_lock)
346#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock)
347
348/*
349 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
350 * the slab structure.
351 *
352 * Arguments:

--- 66 unchanged lines hidden ---
365#define ZONE_LOCK(z) mtx_lock((z)->uz_lock)
366#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock)
367
368/*
369 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
370 * the slab structure.
371 *
372 * Arguments:

--- 66 unchanged lines hidden ---