uma_int.h (124649) | uma_int.h (129906) |
---|---|
1/* 2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 9 unchanged lines hidden (view full) --- 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * | 1/* 2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 9 unchanged lines hidden (view full) --- 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * |
26 * $FreeBSD: head/sys/vm/uma_int.h 124649 2004-01-18 05:51:06Z alc $ | 26 * $FreeBSD: head/sys/vm/uma_int.h 129906 2004-05-31 21:46:06Z bmilekic $ |
27 * 28 */ 29 30/* 31 * This file includes definitions, structures, prototypes, and inlines that 32 * should not be used outside of the actual implementation of UMA. 33 */ 34 35/* 36 * Here's a quick description of the relationship between the objects: 37 * | 27 * 28 */ 29 30/* 31 * This file includes definitions, structures, prototypes, and inlines that 32 * should not be used outside of the actual implementation of UMA. 33 */ 34 35/* 36 * Here's a quick description of the relationship between the objects: 37 * |
38 * Zones contain lists of slabs which are stored in either the full bin, empty | 38 * Kegs contain lists of slabs which are stored in either the full bin, empty |
39 * bin, or partially allocated bin, to reduce fragmentation. They also contain 40 * the user supplied value for size, which is adjusted for alignment purposes | 39 * bin, or partially allocated bin, to reduce fragmentation. They also contain 40 * the user supplied value for size, which is adjusted for alignment purposes |
41 * and rsize is the result of that. The zone also stores information for | 41 * and rsize is the result of that. The Keg also stores information for |
42 * managing a hash of page addresses that maps pages to uma_slab_t structures 43 * for pages that don't have embedded uma_slab_t's. 44 * 45 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 46 * be allocated off the page from a special slab zone. The free list within a 47 * slab is managed with a linked list of indexes, which are 8 bit values. If 48 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit 49 * values. Currently on alpha you can get 250 or so 32 byte items and on x86 --- 12 unchanged lines hidden (view full) --- 62 * The only really gross cases, with regards to memory waste, are for those 63 * items that are just over half the page size. You can get nearly 50% waste, 64 * so you fall back to the memory footprint of the power of two allocator. I 65 * have looked at memory allocation sizes on many of the machines available to 66 * me, and there does not seem to be an abundance of allocations at this range 67 * so at this time it may not make sense to optimize for it. This can, of 68 * course, be solved with dynamic slab sizes. 69 * | 42 * managing a hash of page addresses that maps pages to uma_slab_t structures 43 * for pages that don't have embedded uma_slab_t's. 44 * 45 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 46 * be allocated off the page from a special slab zone. The free list within a 47 * slab is managed with a linked list of indexes, which are 8 bit values. If 48 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit 49 * values. Currently on alpha you can get 250 or so 32 byte items and on x86 --- 12 unchanged lines hidden (view full) --- 62 * The only really gross cases, with regards to memory waste, are for those 63 * items that are just over half the page size. You can get nearly 50% waste, 64 * so you fall back to the memory footprint of the power of two allocator. I 65 * have looked at memory allocation sizes on many of the machines available to 66 * me, and there does not seem to be an abundance of allocations at this range 67 * so at this time it may not make sense to optimize for it. This can, of 68 * course, be solved with dynamic slab sizes. 69 * |
70 * Kegs may serve multiple Zones but by far most of the time they only serve 71 * one. When a Zone is created, a Keg is allocated and setup for it. While 72 * the backing Keg stores slabs, the Zone caches Buckets of items allocated 73 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor 74 * pair, as well as with its own set of small per-CPU caches, layered above 75 * the Zone's general Bucket cache. 76 * 77 * The PCPU caches are protected by their own locks, while the Zones backed 78 * by the same Keg all share a common Keg lock (to coalesce contention on 79 * the backing slabs). The backing Keg typically only serves one Zone but 80 * in the case of multiple Zones, one of the Zones is considered the 81 * Master Zone and all Zone-related stats from the Keg are done in the 82 * Master Zone. For an example of a Multi-Zone setup, refer to the 83 * Mbuf allocation code. |
|
70 */ 71 72/* 73 * This is the representation for normal (Non OFFPAGE slab) 74 * 75 * i == item 76 * s == slab pointer 77 * --- 51 unchanged lines hidden (view full) --- 129 130#define UMA_HASH_INSERT(h, s, mem) \ 131 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 132 (mem))], (s), us_hlink); 133#define UMA_HASH_REMOVE(h, s, mem) \ 134 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ 135 (mem))], (s), uma_slab, us_hlink); 136 | 84 */ 85 86/* 87 * This is the representation for normal (Non OFFPAGE slab) 88 * 89 * i == item 90 * s == slab pointer 91 * --- 51 unchanged lines hidden (view full) --- 143 144#define UMA_HASH_INSERT(h, s, mem) \ 145 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 146 (mem))], (s), us_hlink); 147#define UMA_HASH_REMOVE(h, s, mem) \ 148 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ 149 (mem))], (s), uma_slab, us_hlink); 150 |
137/* Page management structure */ 138 139/* Sorry for the union, but space efficiency is important */ 140struct uma_slab { 141 uma_zone_t us_zone; /* Zone we live in */ 142 union { 143 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ 144 unsigned long _us_size; /* Size of allocation */ 145 } us_type; 146 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ 147 u_int8_t *us_data; /* First item */ 148 u_int8_t us_flags; /* Page flags see uma.h */ 149 u_int8_t us_freecount; /* How many are free? */ 150 u_int8_t us_firstfree; /* First free item index */ 151 u_int8_t us_freelist[1]; /* Free List (actually larger) */ 152}; 153 154#define us_link us_type._us_link 155#define us_size us_type._us_size 156 157typedef struct uma_slab * uma_slab_t; 158 | |
159/* Hash table for freed address -> slab translation */ 160 161SLIST_HEAD(slabhead, uma_slab); 162 163struct uma_hash { 164 struct slabhead *uh_slab_hash; /* Hash table for slabs */ 165 int uh_hashsize; /* Current size of the hash table */ 166 int uh_hashmask; /* Mask used during hashing */ --- 16 unchanged lines hidden (view full) --- 183 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ 184 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ 185 u_int64_t uc_allocs; /* Count of allocations */ 186}; 187 188typedef struct uma_cache * uma_cache_t; 189 190/* | 151/* Hash table for freed address -> slab translation */ 152 153SLIST_HEAD(slabhead, uma_slab); 154 155struct uma_hash { 156 struct slabhead *uh_slab_hash; /* Hash table for slabs */ 157 int uh_hashsize; /* Current size of the hash table */ 158 int uh_hashmask; /* Mask used during hashing */ --- 16 unchanged lines hidden (view full) --- 175 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ 176 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ 177 u_int64_t uc_allocs; /* Count of allocations */ 178}; 179 180typedef struct uma_cache * uma_cache_t; 181 182/* |
183 * Keg management structure 184 * 185 * TODO: Optimize for cache line size 186 * 187 */ 188struct uma_keg { 189 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ 190 191 struct mtx uk_lock; /* Lock for the keg */ 192 struct uma_hash uk_hash; 193 194 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ 195 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */ 196 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */ 197 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */ 198 199 u_int32_t uk_recurse; /* Allocation recursion count */ 200 u_int32_t uk_align; /* Alignment mask */ 201 u_int32_t uk_pages; /* Total page count */ 202 u_int32_t uk_free; /* Count of items free in slabs */ 203 u_int32_t uk_size; /* Requested size of each item */ 204 u_int32_t uk_rsize; /* Real size of each item */ 205 u_int32_t uk_maxpages; /* Maximum number of pages to alloc */ 206 207 uma_init uk_init; /* Keg's init routine */ 208 uma_fini uk_fini; /* Keg's fini routine */ 209 uma_alloc uk_allocf; /* Allocation function */ 210 uma_free uk_freef; /* Free routine */ 211 212 struct vm_object *uk_obj; /* Zone specific object */ 213 vm_offset_t uk_kva; /* Base kva for zones with objs */ 214 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ 215 216 u_int16_t uk_pgoff; /* Offset to uma_slab struct */ 217 u_int16_t uk_ppera; /* pages per allocation from backend */ 218 u_int16_t uk_ipers; /* Items per slab */ 219 u_int16_t uk_flags; /* Internal flags */ 220}; 221 222/* Simpler reference to uma_keg for internal use. */ 223typedef struct uma_keg * uma_keg_t; 224 225/* Page management structure */ 226 227/* Sorry for the union, but space efficiency is important */ 228struct uma_slab_head { 229 uma_keg_t us_keg; /* Keg we live in */ 230 union { 231 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ 232 unsigned long _us_size; /* Size of allocation */ 233 } us_type; 234 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ 235 u_int8_t *us_data; /* First item */ 236 u_int8_t us_flags; /* Page flags see uma.h */ 237 u_int8_t us_freecount; /* How many are free? */ 238 u_int8_t us_firstfree; /* First free item index */ 239}; 240 241/* The standard slab structure */ 242struct uma_slab { 243 struct uma_slab_head us_head; /* slab header data */ 244 struct { 245 u_int8_t us_item; 246 } us_freelist[1]; /* actual number bigger */ 247}; 248 249/* 250 * The slab structure for UMA_ZONE_REFCNT zones for whose items we 251 * maintain reference counters in the slab for. 252 */ 253struct uma_slab_refcnt { 254 struct uma_slab_head us_head; /* slab header data */ 255 struct { 256 u_int8_t us_item; 257 u_int32_t us_refcnt; 258 } us_freelist[1]; /* actual number bigger */ 259}; 260 261#define us_keg us_head.us_keg 262#define us_link us_head.us_type._us_link 263#define us_size us_head.us_type._us_size 264#define us_hlink us_head.us_hlink 265#define us_data us_head.us_data 266#define us_flags us_head.us_flags 267#define us_freecount us_head.us_freecount 268#define us_firstfree us_head.us_firstfree 269 270typedef struct uma_slab * uma_slab_t; 271typedef struct uma_slab_refcnt * uma_slabrefcnt_t; 272 273/* |
|
191 * Zone management structure 192 * 193 * TODO: Optimize for cache line size 194 * 195 */ 196struct uma_zone { 197 char *uz_name; /* Text name of the zone */ | 274 * Zone management structure 275 * 276 * TODO: Optimize for cache line size 277 * 278 */ 279struct uma_zone { 280 char *uz_name; /* Text name of the zone */ |
198 LIST_ENTRY(uma_zone) uz_link; /* List of all zones */ 199 u_int32_t uz_align; /* Alignment mask */ 200 u_int32_t uz_pages; /* Total page count */ | 281 struct mtx *uz_lock; /* Lock for the zone (keg's lock) */ 282 uma_keg_t uz_keg; /* Our underlying Keg */ |
201 | 283 |
202/* Used during alloc / free */ 203 struct mtx uz_lock; /* Lock for the zone */ 204 u_int32_t uz_free; /* Count of items free in slabs */ 205 u_int16_t uz_ipers; /* Items per slab */ 206 u_int16_t uz_flags; /* Internal flags */ 207 208 LIST_HEAD(,uma_slab) uz_part_slab; /* partially allocated slabs */ 209 LIST_HEAD(,uma_slab) uz_free_slab; /* empty slab list */ 210 LIST_HEAD(,uma_slab) uz_full_slab; /* full slabs */ | 284 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ |
211 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */ 212 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */ | 285 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */ 286 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */ |
213 u_int32_t uz_size; /* Requested size of each item */ 214 u_int32_t uz_rsize; /* Real size of each item */ | |
215 | 287 |
216 struct uma_hash uz_hash; 217 u_int16_t uz_pgoff; /* Offset to uma_slab struct */ 218 u_int16_t uz_ppera; /* pages per allocation from backend */ 219 | |
220 uma_ctor uz_ctor; /* Constructor for each allocation */ 221 uma_dtor uz_dtor; /* Destructor */ | 288 uma_ctor uz_ctor; /* Constructor for each allocation */ 289 uma_dtor uz_dtor; /* Destructor */ |
222 u_int64_t uz_allocs; /* Total number of allocations */ 223 | |
224 uma_init uz_init; /* Initializer for each item */ 225 uma_fini uz_fini; /* Discards memory */ | 290 uma_init uz_init; /* Initializer for each item */ 291 uma_fini uz_fini; /* Discards memory */ |
226 uma_alloc uz_allocf; /* Allocation function */ 227 uma_free uz_freef; /* Free routine */ 228 struct vm_object *uz_obj; /* Zone specific object */ 229 vm_offset_t uz_kva; /* Base kva for zones with objs */ 230 u_int32_t uz_maxpages; /* Maximum number of pages to alloc */ 231 int uz_recurse; /* Allocation recursion count */ | 292 293 u_int64_t uz_allocs; /* Total number of allocations */ |
232 uint16_t uz_fills; /* Outstanding bucket fills */ 233 uint16_t uz_count; /* Highest value ub_ptr can have */ | 294 uint16_t uz_fills; /* Outstanding bucket fills */ 295 uint16_t uz_count; /* Highest value ub_ptr can have */ |
296 |
|
234 /* 235 * This HAS to be the last item because we adjust the zone size 236 * based on NCPU and then allocate the space for the zones. 237 */ 238 struct uma_cache uz_cpu[1]; /* Per cpu caches */ 239}; 240 241/* --- 9 unchanged lines hidden (view full) --- 251void *uma_large_malloc(int size, int wait); 252void uma_large_free(uma_slab_t slab); 253 254/* Lock Macros */ 255 256#define ZONE_LOCK_INIT(z, lc) \ 257 do { \ 258 if ((lc)) \ | 297 /* 298 * This HAS to be the last item because we adjust the zone size 299 * based on NCPU and then allocate the space for the zones. 300 */ 301 struct uma_cache uz_cpu[1]; /* Per cpu caches */ 302}; 303 304/* --- 9 unchanged lines hidden (view full) --- 314void *uma_large_malloc(int size, int wait); 315void uma_large_free(uma_slab_t slab); 316 317/* Lock Macros */ 318 319#define ZONE_LOCK_INIT(z, lc) \ 320 do { \ 321 if ((lc)) \ |
259 mtx_init(&(z)->uz_lock, (z)->uz_name, \ | 322 mtx_init((z)->uz_lock, (z)->uz_name, \ |
260 (z)->uz_name, MTX_DEF | MTX_DUPOK); \ 261 else \ | 323 (z)->uz_name, MTX_DEF | MTX_DUPOK); \ 324 else \ |
262 mtx_init(&(z)->uz_lock, (z)->uz_name, \ | 325 mtx_init((z)->uz_lock, (z)->uz_name, \ |
263 "UMA zone", MTX_DEF | MTX_DUPOK); \ 264 } while (0) 265 | 326 "UMA zone", MTX_DEF | MTX_DUPOK); \ 327 } while (0) 328 |
266#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock) 267#define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock) 268#define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock) | 329#define ZONE_LOCK_FINI(z) mtx_destroy((z)->uz_lock) 330#define ZONE_LOCK(z) mtx_lock((z)->uz_lock) 331#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock) |
269 270#define CPU_LOCK_INIT(cpu) \ 271 mtx_init(&uma_pcpu_mtx[(cpu)], "UMA pcpu", "UMA pcpu", \ 272 MTX_DEF | MTX_DUPOK) 273 274#define CPU_LOCK(cpu) \ 275 mtx_lock(&uma_pcpu_mtx[(cpu)]) 276 --- 73 unchanged lines hidden --- | 332 333#define CPU_LOCK_INIT(cpu) \ 334 mtx_init(&uma_pcpu_mtx[(cpu)], "UMA pcpu", "UMA pcpu", \ 335 MTX_DEF | MTX_DUPOK) 336 337#define CPU_LOCK(cpu) \ 338 mtx_lock(&uma_pcpu_mtx[(cpu)]) 339 --- 73 unchanged lines hidden --- |