1/*-
| 1/*-
|
2 * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson <jeff@FreeBSD.org>
| 2 * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
|
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 *
| 3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 *
|
27 * $FreeBSD: head/sys/vm/uma_int.h 169431 2007-05-09 22:53:34Z rwatson $
| 27 * $FreeBSD: head/sys/vm/uma_int.h 187681 2009-01-25 09:11:24Z jeff $
|
28 * 29 */ 30 31/* 32 * This file includes definitions, structures, prototypes, and inlines that 33 * should not be used outside of the actual implementation of UMA. 34 */ 35 36/* 37 * Here's a quick description of the relationship between the objects: 38 * 39 * Kegs contain lists of slabs which are stored in either the full bin, empty 40 * bin, or partially allocated bin, to reduce fragmentation. They also contain 41 * the user supplied value for size, which is adjusted for alignment purposes 42 * and rsize is the result of that. The Keg also stores information for 43 * managing a hash of page addresses that maps pages to uma_slab_t structures 44 * for pages that don't have embedded uma_slab_t's. 45 * 46 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 47 * be allocated off the page from a special slab zone. The free list within a 48 * slab is managed with a linked list of indexes, which are 8 bit values. If 49 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit 50 * values. Currently on alpha you can get 250 or so 32 byte items and on x86 51 * you can get 250 or so 16byte items. For item sizes that would yield more 52 * than 10% memory waste we potentially allocate a separate uma_slab_t if this 53 * will improve the number of items per slab that will fit. 54 * 55 * Other potential space optimizations are storing the 8bit of linkage in space 56 * wasted between items due to alignment problems. This may yield a much better 57 * memory footprint for certain sizes of objects. Another alternative is to 58 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer 59 * dynamic slab sizes because we could stick with 8 bit indexes and only use 60 * large slab sizes for zones with a lot of waste per slab. This may create 61 * ineffeciencies in the vm subsystem due to fragmentation in the address space. 62 * 63 * The only really gross cases, with regards to memory waste, are for those 64 * items that are just over half the page size. You can get nearly 50% waste, 65 * so you fall back to the memory footprint of the power of two allocator. I 66 * have looked at memory allocation sizes on many of the machines available to 67 * me, and there does not seem to be an abundance of allocations at this range 68 * so at this time it may not make sense to optimize for it. This can, of 69 * course, be solved with dynamic slab sizes. 70 * 71 * Kegs may serve multiple Zones but by far most of the time they only serve 72 * one. When a Zone is created, a Keg is allocated and setup for it. While 73 * the backing Keg stores slabs, the Zone caches Buckets of items allocated 74 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor 75 * pair, as well as with its own set of small per-CPU caches, layered above 76 * the Zone's general Bucket cache. 77 * 78 * The PCPU caches are protected by critical sections, and may be accessed 79 * safely only from their associated CPU, while the Zones backed by the same 80 * Keg all share a common Keg lock (to coalesce contention on the backing 81 * slabs). The backing Keg typically only serves one Zone but in the case of 82 * multiple Zones, one of the Zones is considered the Master Zone and all 83 * Zone-related stats from the Keg are done in the Master Zone. For an 84 * example of a Multi-Zone setup, refer to the Mbuf allocation code. 85 */ 86 87/* 88 * This is the representation for normal (Non OFFPAGE slab) 89 * 90 * i == item 91 * s == slab pointer 92 * 93 * <---------------- Page (UMA_SLAB_SIZE) ------------------> 94 * ___________________________________________________________ 95 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | 96 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| 97 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 98 * |___________________________________________________________| 99 * 100 * 101 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. 102 * 103 * ___________________________________________________________ 104 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | 105 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | 106 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | 107 * |___________________________________________________________| 108 * ___________ ^ 109 * |slab header| | 110 * |___________|---* 111 * 112 */ 113 114#ifndef VM_UMA_INT_H 115#define VM_UMA_INT_H 116 117#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ 118#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ 119#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ 120 121#define UMA_BOOT_PAGES 48 /* Pages allocated for startup */ 122 123/* Max waste before going to off page slab management */ 124#define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10) 125 126/* 127 * I doubt there will be many cases where this is exceeded. This is the initial 128 * size of the hash table for uma_slabs that are managed off page. This hash 129 * does expand by powers of two. Currently it doesn't get smaller. 130 */ 131#define UMA_HASH_SIZE_INIT 32 132 133/* 134 * I should investigate other hashing algorithms. This should yield a low 135 * number of collisions if the pages are relatively contiguous. 136 * 137 * This is the same algorithm that most processor caches use. 138 * 139 * I'm shifting and masking instead of % because it should be faster. 140 */ 141 142#define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \ 143 (h)->uh_hashmask) 144 145#define UMA_HASH_INSERT(h, s, mem) \ 146 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 147 (mem))], (s), us_hlink); 148#define UMA_HASH_REMOVE(h, s, mem) \ 149 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ 150 (mem))], (s), uma_slab, us_hlink); 151 152/* Hash table for freed address -> slab translation */ 153 154SLIST_HEAD(slabhead, uma_slab); 155 156struct uma_hash { 157 struct slabhead *uh_slab_hash; /* Hash table for slabs */ 158 int uh_hashsize; /* Current size of the hash table */ 159 int uh_hashmask; /* Mask used during hashing */ 160}; 161 162/* 163 * Structures for per cpu queues. 164 */ 165 166struct uma_bucket { 167 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ 168 int16_t ub_cnt; /* Count of free items. */ 169 int16_t ub_entries; /* Max items. */ 170 void *ub_bucket[]; /* actual allocation storage */ 171}; 172 173typedef struct uma_bucket * uma_bucket_t; 174 175struct uma_cache { 176 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ 177 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ 178 u_int64_t uc_allocs; /* Count of allocations */ 179 u_int64_t uc_frees; /* Count of frees */ 180}; 181 182typedef struct uma_cache * uma_cache_t; 183 184/* 185 * Keg management structure 186 * 187 * TODO: Optimize for cache line size 188 * 189 */ 190struct uma_keg { 191 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ 192 193 struct mtx uk_lock; /* Lock for the keg */ 194 struct uma_hash uk_hash; 195
| 28 * 29 */ 30 31/* 32 * This file includes definitions, structures, prototypes, and inlines that 33 * should not be used outside of the actual implementation of UMA. 34 */ 35 36/* 37 * Here's a quick description of the relationship between the objects: 38 * 39 * Kegs contain lists of slabs which are stored in either the full bin, empty 40 * bin, or partially allocated bin, to reduce fragmentation. They also contain 41 * the user supplied value for size, which is adjusted for alignment purposes 42 * and rsize is the result of that. The Keg also stores information for 43 * managing a hash of page addresses that maps pages to uma_slab_t structures 44 * for pages that don't have embedded uma_slab_t's. 45 * 46 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 47 * be allocated off the page from a special slab zone. The free list within a 48 * slab is managed with a linked list of indexes, which are 8 bit values. If 49 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit 50 * values. Currently on alpha you can get 250 or so 32 byte items and on x86 51 * you can get 250 or so 16byte items. For item sizes that would yield more 52 * than 10% memory waste we potentially allocate a separate uma_slab_t if this 53 * will improve the number of items per slab that will fit. 54 * 55 * Other potential space optimizations are storing the 8bit of linkage in space 56 * wasted between items due to alignment problems. This may yield a much better 57 * memory footprint for certain sizes of objects. Another alternative is to 58 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer 59 * dynamic slab sizes because we could stick with 8 bit indexes and only use 60 * large slab sizes for zones with a lot of waste per slab. This may create 61 * ineffeciencies in the vm subsystem due to fragmentation in the address space. 62 * 63 * The only really gross cases, with regards to memory waste, are for those 64 * items that are just over half the page size. You can get nearly 50% waste, 65 * so you fall back to the memory footprint of the power of two allocator. I 66 * have looked at memory allocation sizes on many of the machines available to 67 * me, and there does not seem to be an abundance of allocations at this range 68 * so at this time it may not make sense to optimize for it. This can, of 69 * course, be solved with dynamic slab sizes. 70 * 71 * Kegs may serve multiple Zones but by far most of the time they only serve 72 * one. When a Zone is created, a Keg is allocated and setup for it. While 73 * the backing Keg stores slabs, the Zone caches Buckets of items allocated 74 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor 75 * pair, as well as with its own set of small per-CPU caches, layered above 76 * the Zone's general Bucket cache. 77 * 78 * The PCPU caches are protected by critical sections, and may be accessed 79 * safely only from their associated CPU, while the Zones backed by the same 80 * Keg all share a common Keg lock (to coalesce contention on the backing 81 * slabs). The backing Keg typically only serves one Zone but in the case of 82 * multiple Zones, one of the Zones is considered the Master Zone and all 83 * Zone-related stats from the Keg are done in the Master Zone. For an 84 * example of a Multi-Zone setup, refer to the Mbuf allocation code. 85 */ 86 87/* 88 * This is the representation for normal (Non OFFPAGE slab) 89 * 90 * i == item 91 * s == slab pointer 92 * 93 * <---------------- Page (UMA_SLAB_SIZE) ------------------> 94 * ___________________________________________________________ 95 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | 96 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| 97 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 98 * |___________________________________________________________| 99 * 100 * 101 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. 102 * 103 * ___________________________________________________________ 104 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | 105 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | 106 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | 107 * |___________________________________________________________| 108 * ___________ ^ 109 * |slab header| | 110 * |___________|---* 111 * 112 */ 113 114#ifndef VM_UMA_INT_H 115#define VM_UMA_INT_H 116 117#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ 118#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ 119#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ 120 121#define UMA_BOOT_PAGES 48 /* Pages allocated for startup */ 122 123/* Max waste before going to off page slab management */ 124#define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10) 125 126/* 127 * I doubt there will be many cases where this is exceeded. This is the initial 128 * size of the hash table for uma_slabs that are managed off page. This hash 129 * does expand by powers of two. Currently it doesn't get smaller. 130 */ 131#define UMA_HASH_SIZE_INIT 32 132 133/* 134 * I should investigate other hashing algorithms. This should yield a low 135 * number of collisions if the pages are relatively contiguous. 136 * 137 * This is the same algorithm that most processor caches use. 138 * 139 * I'm shifting and masking instead of % because it should be faster. 140 */ 141 142#define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \ 143 (h)->uh_hashmask) 144 145#define UMA_HASH_INSERT(h, s, mem) \ 146 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 147 (mem))], (s), us_hlink); 148#define UMA_HASH_REMOVE(h, s, mem) \ 149 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ 150 (mem))], (s), uma_slab, us_hlink); 151 152/* Hash table for freed address -> slab translation */ 153 154SLIST_HEAD(slabhead, uma_slab); 155 156struct uma_hash { 157 struct slabhead *uh_slab_hash; /* Hash table for slabs */ 158 int uh_hashsize; /* Current size of the hash table */ 159 int uh_hashmask; /* Mask used during hashing */ 160}; 161 162/* 163 * Structures for per cpu queues. 164 */ 165 166struct uma_bucket { 167 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ 168 int16_t ub_cnt; /* Count of free items. */ 169 int16_t ub_entries; /* Max items. */ 170 void *ub_bucket[]; /* actual allocation storage */ 171}; 172 173typedef struct uma_bucket * uma_bucket_t; 174 175struct uma_cache { 176 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ 177 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ 178 u_int64_t uc_allocs; /* Count of allocations */ 179 u_int64_t uc_frees; /* Count of frees */ 180}; 181 182typedef struct uma_cache * uma_cache_t; 183 184/* 185 * Keg management structure 186 * 187 * TODO: Optimize for cache line size 188 * 189 */ 190struct uma_keg { 191 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ 192 193 struct mtx uk_lock; /* Lock for the keg */ 194 struct uma_hash uk_hash; 195
|
| 196 char *uk_name; /* Name of creating zone. */
|
196 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ 197 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */ 198 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */ 199 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */ 200 201 u_int32_t uk_recurse; /* Allocation recursion count */ 202 u_int32_t uk_align; /* Alignment mask */ 203 u_int32_t uk_pages; /* Total page count */ 204 u_int32_t uk_free; /* Count of items free in slabs */ 205 u_int32_t uk_size; /* Requested size of each item */ 206 u_int32_t uk_rsize; /* Real size of each item */ 207 u_int32_t uk_maxpages; /* Maximum number of pages to alloc */ 208 209 uma_init uk_init; /* Keg's init routine */ 210 uma_fini uk_fini; /* Keg's fini routine */ 211 uma_alloc uk_allocf; /* Allocation function */ 212 uma_free uk_freef; /* Free routine */ 213 214 struct vm_object *uk_obj; /* Zone specific object */ 215 vm_offset_t uk_kva; /* Base kva for zones with objs */ 216 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ 217 218 u_int16_t uk_pgoff; /* Offset to uma_slab struct */ 219 u_int16_t uk_ppera; /* pages per allocation from backend */ 220 u_int16_t uk_ipers; /* Items per slab */ 221 u_int32_t uk_flags; /* Internal flags */ 222};
| 197 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ 198 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */ 199 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */ 200 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */ 201 202 u_int32_t uk_recurse; /* Allocation recursion count */ 203 u_int32_t uk_align; /* Alignment mask */ 204 u_int32_t uk_pages; /* Total page count */ 205 u_int32_t uk_free; /* Count of items free in slabs */ 206 u_int32_t uk_size; /* Requested size of each item */ 207 u_int32_t uk_rsize; /* Real size of each item */ 208 u_int32_t uk_maxpages; /* Maximum number of pages to alloc */ 209 210 uma_init uk_init; /* Keg's init routine */ 211 uma_fini uk_fini; /* Keg's fini routine */ 212 uma_alloc uk_allocf; /* Allocation function */ 213 uma_free uk_freef; /* Free routine */ 214 215 struct vm_object *uk_obj; /* Zone specific object */ 216 vm_offset_t uk_kva; /* Base kva for zones with objs */ 217 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ 218 219 u_int16_t uk_pgoff; /* Offset to uma_slab struct */ 220 u_int16_t uk_ppera; /* pages per allocation from backend */ 221 u_int16_t uk_ipers; /* Items per slab */ 222 u_int32_t uk_flags; /* Internal flags */ 223};
|
| 224typedef struct uma_keg * uma_keg_t;
|
223
| 225
|
224/* Simpler reference to uma_keg for internal use. */ 225typedef struct uma_keg * uma_keg_t; 226
| |
227/* Page management structure */ 228 229/* Sorry for the union, but space efficiency is important */ 230struct uma_slab_head { 231 uma_keg_t us_keg; /* Keg we live in */ 232 union { 233 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ 234 unsigned long _us_size; /* Size of allocation */ 235 } us_type; 236 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ 237 u_int8_t *us_data; /* First item */ 238 u_int8_t us_flags; /* Page flags see uma.h */ 239 u_int8_t us_freecount; /* How many are free? */ 240 u_int8_t us_firstfree; /* First free item index */ 241}; 242 243/* The standard slab structure */ 244struct uma_slab { 245 struct uma_slab_head us_head; /* slab header data */ 246 struct { 247 u_int8_t us_item; 248 } us_freelist[1]; /* actual number bigger */ 249}; 250 251/* 252 * The slab structure for UMA_ZONE_REFCNT zones for whose items we 253 * maintain reference counters in the slab for. 254 */ 255struct uma_slab_refcnt { 256 struct uma_slab_head us_head; /* slab header data */ 257 struct { 258 u_int8_t us_item; 259 u_int32_t us_refcnt; 260 } us_freelist[1]; /* actual number bigger */ 261}; 262 263#define us_keg us_head.us_keg 264#define us_link us_head.us_type._us_link 265#define us_size us_head.us_type._us_size 266#define us_hlink us_head.us_hlink 267#define us_data us_head.us_data 268#define us_flags us_head.us_flags 269#define us_freecount us_head.us_freecount 270#define us_firstfree us_head.us_firstfree 271 272typedef struct uma_slab * uma_slab_t; 273typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
| 226/* Page management structure */ 227 228/* Sorry for the union, but space efficiency is important */ 229struct uma_slab_head { 230 uma_keg_t us_keg; /* Keg we live in */ 231 union { 232 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ 233 unsigned long _us_size; /* Size of allocation */ 234 } us_type; 235 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ 236 u_int8_t *us_data; /* First item */ 237 u_int8_t us_flags; /* Page flags see uma.h */ 238 u_int8_t us_freecount; /* How many are free? */ 239 u_int8_t us_firstfree; /* First free item index */ 240}; 241 242/* The standard slab structure */ 243struct uma_slab { 244 struct uma_slab_head us_head; /* slab header data */ 245 struct { 246 u_int8_t us_item; 247 } us_freelist[1]; /* actual number bigger */ 248}; 249 250/* 251 * The slab structure for UMA_ZONE_REFCNT zones for whose items we 252 * maintain reference counters in the slab for. 253 */ 254struct uma_slab_refcnt { 255 struct uma_slab_head us_head; /* slab header data */ 256 struct { 257 u_int8_t us_item; 258 u_int32_t us_refcnt; 259 } us_freelist[1]; /* actual number bigger */ 260}; 261 262#define us_keg us_head.us_keg 263#define us_link us_head.us_type._us_link 264#define us_size us_head.us_type._us_size 265#define us_hlink us_head.us_hlink 266#define us_data us_head.us_data 267#define us_flags us_head.us_flags 268#define us_freecount us_head.us_freecount 269#define us_firstfree us_head.us_firstfree 270 271typedef struct uma_slab * uma_slab_t; 272typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
|
| 273typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
|
274
| 274
|
| 275
|
275/* 276 * These give us the size of one free item reference within our corresponding 277 * uma_slab structures, so that our calculations during zone setup are correct 278 * regardless of what the compiler decides to do with padding the structure 279 * arrays within uma_slab. 280 */ 281#define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head)) 282#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \ 283 sizeof(struct uma_slab_head)) 284
| 276/* 277 * These give us the size of one free item reference within our corresponding 278 * uma_slab structures, so that our calculations during zone setup are correct 279 * regardless of what the compiler decides to do with padding the structure 280 * arrays within uma_slab. 281 */ 282#define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head)) 283#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \ 284 sizeof(struct uma_slab_head)) 285
|
| 286struct uma_klink { 287 LIST_ENTRY(uma_klink) kl_link; 288 uma_keg_t kl_keg; 289}; 290typedef struct uma_klink *uma_klink_t; 291
|
285/* 286 * Zone management structure 287 * 288 * TODO: Optimize for cache line size 289 * 290 */ 291struct uma_zone { 292 char *uz_name; /* Text name of the zone */ 293 struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
| 292/* 293 * Zone management structure 294 * 295 * TODO: Optimize for cache line size 296 * 297 */ 298struct uma_zone { 299 char *uz_name; /* Text name of the zone */ 300 struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
|
294 uma_keg_t uz_keg; /* Our underlying Keg */
| |
295 296 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ 297 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */ 298 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */ 299
| 301 302 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ 303 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */ 304 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */ 305
|
| 306 LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */ 307 struct uma_klink uz_klink; /* klink for first keg. */ 308 309 uma_slaballoc uz_slab; /* Allocate a slab from the backend. */
|
300 uma_ctor uz_ctor; /* Constructor for each allocation */ 301 uma_dtor uz_dtor; /* Destructor */ 302 uma_init uz_init; /* Initializer for each item */ 303 uma_fini uz_fini; /* Discards memory */ 304 305 u_int64_t uz_allocs; /* Total number of allocations */ 306 u_int64_t uz_frees; /* Total number of frees */ 307 u_int64_t uz_fails; /* Total number of alloc failures */
| 310 uma_ctor uz_ctor; /* Constructor for each allocation */ 311 uma_dtor uz_dtor; /* Destructor */ 312 uma_init uz_init; /* Initializer for each item */ 313 uma_fini uz_fini; /* Discards memory */ 314 315 u_int64_t uz_allocs; /* Total number of allocations */ 316 u_int64_t uz_frees; /* Total number of frees */ 317 u_int64_t uz_fails; /* Total number of alloc failures */
|
| 318 u_int32_t uz_flags; /* Flags inherited from kegs */ 319 u_int32_t uz_size; /* Size inherited from kegs */
|
308 uint16_t uz_fills; /* Outstanding bucket fills */ 309 uint16_t uz_count; /* Highest value ub_ptr can have */ 310 311 /* 312 * This HAS to be the last item because we adjust the zone size 313 * based on NCPU and then allocate the space for the zones. 314 */ 315 struct uma_cache uz_cpu[1]; /* Per cpu caches */ 316}; 317 318/* 319 * These flags must not overlap with the UMA_ZONE flags specified in uma.h. 320 */
| 320 uint16_t uz_fills; /* Outstanding bucket fills */ 321 uint16_t uz_count; /* Highest value ub_ptr can have */ 322 323 /* 324 * This HAS to be the last item because we adjust the zone size 325 * based on NCPU and then allocate the space for the zones. 326 */ 327 struct uma_cache uz_cpu[1]; /* Per cpu caches */ 328}; 329 330/* 331 * These flags must not overlap with the UMA_ZONE flags specified in uma.h. 332 */
|
| 333#define UMA_ZFLAG_BUCKET 0x02000000 /* Bucket zone. */ 334#define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */ 335#define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */
|
321#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */ 322#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ 323#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */ 324#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ 325
| 336#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */ 337#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ 338#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */ 339#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ 340
|
| 341#define UMA_ZFLAG_INHERIT (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | \ 342 UMA_ZFLAG_BUCKET) 343
|
326#ifdef _KERNEL 327/* Internal prototypes */ 328static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data); 329void *uma_large_malloc(int size, int wait); 330void uma_large_free(uma_slab_t slab); 331 332/* Lock Macros */ 333
| 344#ifdef _KERNEL 345/* Internal prototypes */ 346static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data); 347void *uma_large_malloc(int size, int wait); 348void uma_large_free(uma_slab_t slab); 349 350/* Lock Macros */ 351
|
334#define ZONE_LOCK_INIT(z, lc) \
| 352#define KEG_LOCK_INIT(k, lc) \
|
335 do { \ 336 if ((lc)) \
| 353 do { \ 354 if ((lc)) \
|
337 mtx_init((z)->uz_lock, (z)->uz_name, \ 338 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
| 355 mtx_init(&(k)->uk_lock, (k)->uk_name, \ 356 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
|
339 else \
| 357 else \
|
340 mtx_init((z)->uz_lock, (z)->uz_name, \
| 358 mtx_init(&(k)->uk_lock, (k)->uk_name, \
|
341 "UMA zone", MTX_DEF | MTX_DUPOK); \ 342 } while (0) 343
| 359 "UMA zone", MTX_DEF | MTX_DUPOK); \ 360 } while (0) 361
|
344#define ZONE_LOCK_FINI(z) mtx_destroy((z)->uz_lock)
| 362#define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock) 363#define KEG_LOCK(k) mtx_lock(&(k)->uk_lock) 364#define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
|
345#define ZONE_LOCK(z) mtx_lock((z)->uz_lock) 346#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock) 347 348/* 349 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup 350 * the slab structure. 351 * 352 * Arguments: 353 * hash The hash table to search. 354 * data The base page of the item. 355 * 356 * Returns: 357 * A pointer to a slab if successful, else NULL. 358 */ 359static __inline uma_slab_t 360hash_sfind(struct uma_hash *hash, u_int8_t *data) 361{ 362 uma_slab_t slab; 363 int hval; 364 365 hval = UMA_HASH(hash, data); 366 367 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) { 368 if ((u_int8_t *)slab->us_data == data) 369 return (slab); 370 } 371 return (NULL); 372} 373 374static __inline uma_slab_t 375vtoslab(vm_offset_t va) 376{ 377 vm_page_t p; 378 uma_slab_t slab; 379 380 p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 381 slab = (uma_slab_t )p->object; 382 383 if (p->flags & PG_SLAB) 384 return (slab); 385 else 386 return (NULL); 387} 388 389static __inline void 390vsetslab(vm_offset_t va, uma_slab_t slab) 391{ 392 vm_page_t p; 393 394 p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 395 p->object = (vm_object_t)slab; 396 p->flags |= PG_SLAB; 397} 398 399static __inline void 400vsetobj(vm_offset_t va, vm_object_t obj) 401{ 402 vm_page_t p; 403 404 p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 405 p->object = obj; 406 p->flags &= ~PG_SLAB; 407} 408 409/* 410 * The following two functions may be defined by architecture specific code 411 * if they can provide more effecient allocation functions. This is useful 412 * for using direct mapped addresses. 413 */ 414void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait); 415void uma_small_free(void *mem, int size, u_int8_t flags); 416#endif /* _KERNEL */ 417 418#endif /* VM_UMA_INT_H */
| 365#define ZONE_LOCK(z) mtx_lock((z)->uz_lock) 366#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock) 367 368/* 369 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup 370 * the slab structure. 371 * 372 * Arguments: 373 * hash The hash table to search. 374 * data The base page of the item. 375 * 376 * Returns: 377 * A pointer to a slab if successful, else NULL. 378 */ 379static __inline uma_slab_t 380hash_sfind(struct uma_hash *hash, u_int8_t *data) 381{ 382 uma_slab_t slab; 383 int hval; 384 385 hval = UMA_HASH(hash, data); 386 387 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) { 388 if ((u_int8_t *)slab->us_data == data) 389 return (slab); 390 } 391 return (NULL); 392} 393 394static __inline uma_slab_t 395vtoslab(vm_offset_t va) 396{ 397 vm_page_t p; 398 uma_slab_t slab; 399 400 p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 401 slab = (uma_slab_t )p->object; 402 403 if (p->flags & PG_SLAB) 404 return (slab); 405 else 406 return (NULL); 407} 408 409static __inline void 410vsetslab(vm_offset_t va, uma_slab_t slab) 411{ 412 vm_page_t p; 413 414 p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 415 p->object = (vm_object_t)slab; 416 p->flags |= PG_SLAB; 417} 418 419static __inline void 420vsetobj(vm_offset_t va, vm_object_t obj) 421{ 422 vm_page_t p; 423 424 p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 425 p->object = obj; 426 p->flags &= ~PG_SLAB; 427} 428 429/* 430 * The following two functions may be defined by architecture specific code 431 * if they can provide more effecient allocation functions. This is useful 432 * for using direct mapped addresses. 433 */ 434void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait); 435void uma_small_free(void *mem, int size, u_int8_t flags); 436#endif /* _KERNEL */ 437 438#endif /* VM_UMA_INT_H */
|