Deleted Added
sdiff udiff text old ( 169431 ) new ( 187681 )
full compact
1/*-
2 * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following

--- 8 unchanged lines hidden (view full) ---

19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/vm/uma_int.h 169431 2007-05-09 22:53:34Z rwatson $
28 *
29 */
30
31/*
32 * This file includes definitions, structures, prototypes, and inlines that
33 * should not be used outside of the actual implementation of UMA.
34 */
35

--- 152 unchanged lines hidden (view full) ---

188 *
189 */
190struct uma_keg {
191 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
192
193 struct mtx uk_lock; /* Lock for the keg */
194 struct uma_hash uk_hash;
195
196 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
197 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
198 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
199 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
200
201 u_int32_t uk_recurse; /* Allocation recursion count */
202 u_int32_t uk_align; /* Alignment mask */
203 u_int32_t uk_pages; /* Total page count */

--- 11 unchanged lines hidden (view full) ---

215 vm_offset_t uk_kva; /* Base kva for zones with objs */
216 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
217
218 u_int16_t uk_pgoff; /* Offset to uma_slab struct */
219 u_int16_t uk_ppera; /* pages per allocation from backend */
220 u_int16_t uk_ipers; /* Items per slab */
221 u_int32_t uk_flags; /* Internal flags */
222};
223
224/* Simpler reference to uma_keg for internal use. */
225typedef struct uma_keg * uma_keg_t;
226
227/* Page management structure */
228
229/* Sorry for the union, but space efficiency is important */
230struct uma_slab_head {
231 uma_keg_t us_keg; /* Keg we live in */
232 union {
233 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
234 unsigned long _us_size; /* Size of allocation */

--- 31 unchanged lines hidden (view full) ---

266#define us_hlink us_head.us_hlink
267#define us_data us_head.us_data
268#define us_flags us_head.us_flags
269#define us_freecount us_head.us_freecount
270#define us_firstfree us_head.us_firstfree
271
272typedef struct uma_slab * uma_slab_t;
273typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
274
275/*
276 * These give us the size of one free item reference within our corresponding
277 * uma_slab structures, so that our calculations during zone setup are correct
278 * regardless of what the compiler decides to do with padding the structure
279 * arrays within uma_slab.
280 */
281#define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head))
282#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \
283 sizeof(struct uma_slab_head))
284
285/*
286 * Zone management structure
287 *
288 * TODO: Optimize for cache line size
289 *
290 */
291struct uma_zone {
292 char *uz_name; /* Text name of the zone */
293 struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
294 uma_keg_t uz_keg; /* Our underlying Keg */
295
296 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
297 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
298 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
299
300 uma_ctor uz_ctor; /* Constructor for each allocation */
301 uma_dtor uz_dtor; /* Destructor */
302 uma_init uz_init; /* Initializer for each item */
303 uma_fini uz_fini; /* Discards memory */
304
305 u_int64_t uz_allocs; /* Total number of allocations */
306 u_int64_t uz_frees; /* Total number of frees */
307 u_int64_t uz_fails; /* Total number of alloc failures */
308 uint16_t uz_fills; /* Outstanding bucket fills */
309 uint16_t uz_count; /* Highest value ub_ptr can have */
310
311 /*
312 * This HAS to be the last item because we adjust the zone size
313 * based on NCPU and then allocate the space for the zones.
314 */
315 struct uma_cache uz_cpu[1]; /* Per cpu caches */
316};
317
318/*
319 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
320 */
321#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */
322#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
323#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
324#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
325
326#ifdef _KERNEL
327/* Internal prototypes */
328static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
329void *uma_large_malloc(int size, int wait);
330void uma_large_free(uma_slab_t slab);
331
332/* Lock Macros */
333
334#define ZONE_LOCK_INIT(z, lc) \
335 do { \
336 if ((lc)) \
337 mtx_init((z)->uz_lock, (z)->uz_name, \
338 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
339 else \
340 mtx_init((z)->uz_lock, (z)->uz_name, \
341 "UMA zone", MTX_DEF | MTX_DUPOK); \
342 } while (0)
343
344#define ZONE_LOCK_FINI(z) mtx_destroy((z)->uz_lock)
345#define ZONE_LOCK(z) mtx_lock((z)->uz_lock)
346#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock)
347
348/*
349 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
350 * the slab structure.
351 *
352 * Arguments:

--- 66 unchanged lines hidden ---