Deleted Added
full compact
uma_int.h (251709) uma_int.h (251826)
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/vm/uma_int.h 251709 2013-06-13 21:05:38Z jeff $
27 * $FreeBSD: head/sys/vm/uma_int.h 251826 2013-06-17 03:43:47Z jeff $
28 *
29 */
30
31/*
32 * This file includes definitions, structures, prototypes, and inlines that
33 * should not be used outside of the actual implementation of UMA.
34 */
35
36/*
37 * Here's a quick description of the relationship between the objects:
38 *
39 * Kegs contain lists of slabs which are stored in either the full bin, empty
40 * bin, or partially allocated bin, to reduce fragmentation. They also contain
41 * the user supplied value for size, which is adjusted for alignment purposes
42 * and rsize is the result of that. The Keg also stores information for
43 * managing a hash of page addresses that maps pages to uma_slab_t structures
44 * for pages that don't have embedded uma_slab_t's.
45 *
46 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
47 * be allocated off the page from a special slab zone. The free list within a
48 * slab is managed with a bitmask. For item sizes that would yield more than
49 * 10% memory waste we potentially allocate a separate uma_slab_t if this will
50 * improve the number of items per slab that will fit.
51 *
52 * Other potential space optimizations are storing the 8bit of linkage in space
53 * wasted between items due to alignment problems. This may yield a much better
54 * memory footprint for certain sizes of objects. Another alternative is to
55 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer
56 * dynamic slab sizes because we could stick with 8 bit indices and only use
57 * large slab sizes for zones with a lot of waste per slab. This may create
58 * inefficiencies in the vm subsystem due to fragmentation in the address space.
59 *
60 * The only really gross cases, with regards to memory waste, are for those
61 * items that are just over half the page size. You can get nearly 50% waste,
62 * so you fall back to the memory footprint of the power of two allocator. I
63 * have looked at memory allocation sizes on many of the machines available to
64 * me, and there does not seem to be an abundance of allocations at this range
65 * so at this time it may not make sense to optimize for it. This can, of
66 * course, be solved with dynamic slab sizes.
67 *
68 * Kegs may serve multiple Zones but by far most of the time they only serve
69 * one. When a Zone is created, a Keg is allocated and setup for it. While
70 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
71 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
72 * pair, as well as with its own set of small per-CPU caches, layered above
73 * the Zone's general Bucket cache.
74 *
75 * The PCPU caches are protected by critical sections, and may be accessed
76 * safely only from their associated CPU, while the Zones backed by the same
77 * Keg all share a common Keg lock (to coalesce contention on the backing
78 * slabs). The backing Keg typically only serves one Zone but in the case of
79 * multiple Zones, one of the Zones is considered the Master Zone and all
80 * Zone-related stats from the Keg are done in the Master Zone. For an
81 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
82 */
83
84/*
85 * This is the representation for normal (Non OFFPAGE slab)
86 *
87 * i == item
88 * s == slab pointer
89 *
90 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
91 * ___________________________________________________________
92 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
93 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
94 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
95 * |___________________________________________________________|
96 *
97 *
98 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
99 *
100 * ___________________________________________________________
101 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
102 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
103 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
104 * |___________________________________________________________|
105 * ___________ ^
106 * |slab header| |
107 * |___________|---*
108 *
109 */
110
111#ifndef VM_UMA_INT_H
112#define VM_UMA_INT_H
113
114#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
115#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
116#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
117
118#define UMA_BOOT_PAGES 64 /* Pages allocated for startup */
119
120/* Max waste percentage before going to off page slab management */
121#define UMA_MAX_WASTE 10
122
123/*
124 * I doubt there will be many cases where this is exceeded. This is the initial
125 * size of the hash table for uma_slabs that are managed off page. This hash
126 * does expand by powers of two. Currently it doesn't get smaller.
127 */
128#define UMA_HASH_SIZE_INIT 32
129
130/*
131 * I should investigate other hashing algorithms. This should yield a low
132 * number of collisions if the pages are relatively contiguous.
133 */
134
135#define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
136
137#define UMA_HASH_INSERT(h, s, mem) \
138 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
139 (mem))], (s), us_hlink)
140#define UMA_HASH_REMOVE(h, s, mem) \
141 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
142 (mem))], (s), uma_slab, us_hlink)
143
144/* Hash table for freed address -> slab translation */
145
146SLIST_HEAD(slabhead, uma_slab);
147
148struct uma_hash {
149 struct slabhead *uh_slab_hash; /* Hash table for slabs */
150 int uh_hashsize; /* Current size of the hash table */
151 int uh_hashmask; /* Mask used during hashing */
152};
153
154/*
155 * align field or structure to cache line
156 */
157#if defined(__amd64__)
158#define UMA_ALIGN __aligned(CACHE_LINE_SIZE)
159#else
160#define UMA_ALIGN
161#endif
162
163/*
164 * Structures for per cpu queues.
165 */
166
167struct uma_bucket {
168 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
169 int16_t ub_cnt; /* Count of free items. */
170 int16_t ub_entries; /* Max items. */
171 void *ub_bucket[]; /* actual allocation storage */
172};
173
174typedef struct uma_bucket * uma_bucket_t;
175
176struct uma_cache {
177 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
178 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
179 uint64_t uc_allocs; /* Count of allocations */
180 uint64_t uc_frees; /* Count of frees */
181} UMA_ALIGN;
182
183typedef struct uma_cache * uma_cache_t;
184
185/*
186 * Keg management structure
187 *
188 * TODO: Optimize for cache line size
189 *
190 */
191struct uma_keg {
192 struct mtx uk_lock; /* Lock for the keg */
193 struct uma_hash uk_hash;
194
195 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
196 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
197 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
198 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
199
200 uint32_t uk_recurse; /* Allocation recursion count */
201 uint32_t uk_align; /* Alignment mask */
202 uint32_t uk_pages; /* Total page count */
203 uint32_t uk_free; /* Count of items free in slabs */
204 uint32_t uk_size; /* Requested size of each item */
205 uint32_t uk_rsize; /* Real size of each item */
206 uint32_t uk_maxpages; /* Maximum number of pages to alloc */
207
208 uma_init uk_init; /* Keg's init routine */
209 uma_fini uk_fini; /* Keg's fini routine */
210 uma_alloc uk_allocf; /* Allocation function */
211 uma_free uk_freef; /* Free routine */
212
213 u_long uk_offset; /* Next free offset from base KVA */
214 vm_offset_t uk_kva; /* Zone base KVA */
215 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
216
217 uint16_t uk_slabsize; /* Slab size for this keg */
218 uint16_t uk_pgoff; /* Offset to uma_slab struct */
219 uint16_t uk_ppera; /* pages per allocation from backend */
220 uint16_t uk_ipers; /* Items per slab */
221 uint32_t uk_flags; /* Internal flags */
222
223 /* Least used fields go to the last cache line. */
224 const char *uk_name; /* Name of creating zone. */
225 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
226};
227typedef struct uma_keg * uma_keg_t;
228
229/*
230 * Free bits per-slab.
231 */
232#define SLAB_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
233BITSET_DEFINE(slabbits, SLAB_SETSIZE);
234
235/*
236 * The slab structure manages a single contiguous allocation from backing
237 * store and subdivides it into individually allocatable items.
238 */
239struct uma_slab {
240 uma_keg_t us_keg; /* Keg we live in */
241 union {
242 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
243 unsigned long _us_size; /* Size of allocation */
244 } us_type;
245 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
246 uint8_t *us_data; /* First item */
247 struct slabbits us_free; /* Free bitmask. */
248#ifdef INVARIANTS
249 struct slabbits us_debugfree; /* Debug bitmask. */
250#endif
251 uint16_t us_freecount; /* How many are free? */
252 uint8_t us_flags; /* Page flags see uma.h */
253 uint8_t us_pad; /* Pad to 32bits, unused. */
254};
255
256#define us_link us_type._us_link
257#define us_size us_type._us_size
258
259/*
260 * The slab structure for UMA_ZONE_REFCNT zones for whose items we
261 * maintain reference counters in the slab for.
262 */
263struct uma_slab_refcnt {
264 struct uma_slab us_head; /* slab header data */
265 uint32_t us_refcnt[0]; /* Actually larger. */
266};
267
268typedef struct uma_slab * uma_slab_t;
269typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
270typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
271
272struct uma_klink {
273 LIST_ENTRY(uma_klink) kl_link;
274 uma_keg_t kl_keg;
275};
276typedef struct uma_klink *uma_klink_t;
277
278/*
279 * Zone management structure
280 *
281 * TODO: Optimize for cache line size
282 *
283 */
284struct uma_zone {
285 const char *uz_name; /* Text name of the zone */
286 struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
287
288 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
289 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
290 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
291
292 LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */
293 struct uma_klink uz_klink; /* klink for first keg. */
294
295 uma_slaballoc uz_slab; /* Allocate a slab from the backend. */
296 uma_ctor uz_ctor; /* Constructor for each allocation */
297 uma_dtor uz_dtor; /* Destructor */
298 uma_init uz_init; /* Initializer for each item */
28 *
29 */
30
31/*
32 * This file includes definitions, structures, prototypes, and inlines that
33 * should not be used outside of the actual implementation of UMA.
34 */
35
36/*
37 * Here's a quick description of the relationship between the objects:
38 *
39 * Kegs contain lists of slabs which are stored in either the full bin, empty
40 * bin, or partially allocated bin, to reduce fragmentation. They also contain
41 * the user supplied value for size, which is adjusted for alignment purposes
42 * and rsize is the result of that. The Keg also stores information for
43 * managing a hash of page addresses that maps pages to uma_slab_t structures
44 * for pages that don't have embedded uma_slab_t's.
45 *
46 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
47 * be allocated off the page from a special slab zone. The free list within a
48 * slab is managed with a bitmask. For item sizes that would yield more than
49 * 10% memory waste we potentially allocate a separate uma_slab_t if this will
50 * improve the number of items per slab that will fit.
51 *
52 * Other potential space optimizations are storing the 8bit of linkage in space
53 * wasted between items due to alignment problems. This may yield a much better
54 * memory footprint for certain sizes of objects. Another alternative is to
55 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer
56 * dynamic slab sizes because we could stick with 8 bit indices and only use
57 * large slab sizes for zones with a lot of waste per slab. This may create
58 * inefficiencies in the vm subsystem due to fragmentation in the address space.
59 *
60 * The only really gross cases, with regards to memory waste, are for those
61 * items that are just over half the page size. You can get nearly 50% waste,
62 * so you fall back to the memory footprint of the power of two allocator. I
63 * have looked at memory allocation sizes on many of the machines available to
64 * me, and there does not seem to be an abundance of allocations at this range
65 * so at this time it may not make sense to optimize for it. This can, of
66 * course, be solved with dynamic slab sizes.
67 *
68 * Kegs may serve multiple Zones but by far most of the time they only serve
69 * one. When a Zone is created, a Keg is allocated and setup for it. While
70 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
71 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
72 * pair, as well as with its own set of small per-CPU caches, layered above
73 * the Zone's general Bucket cache.
74 *
75 * The PCPU caches are protected by critical sections, and may be accessed
76 * safely only from their associated CPU, while the Zones backed by the same
77 * Keg all share a common Keg lock (to coalesce contention on the backing
78 * slabs). The backing Keg typically only serves one Zone but in the case of
79 * multiple Zones, one of the Zones is considered the Master Zone and all
80 * Zone-related stats from the Keg are done in the Master Zone. For an
81 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
82 */
83
84/*
85 * This is the representation for normal (Non OFFPAGE slab)
86 *
87 * i == item
88 * s == slab pointer
89 *
90 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
91 * ___________________________________________________________
92 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
93 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
94 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
95 * |___________________________________________________________|
96 *
97 *
98 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
99 *
100 * ___________________________________________________________
101 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
102 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
103 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
104 * |___________________________________________________________|
105 * ___________ ^
106 * |slab header| |
107 * |___________|---*
108 *
109 */
110
111#ifndef VM_UMA_INT_H
112#define VM_UMA_INT_H
113
114#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
115#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
116#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
117
118#define UMA_BOOT_PAGES 64 /* Pages allocated for startup */
119
120/* Max waste percentage before going to off page slab management */
121#define UMA_MAX_WASTE 10
122
123/*
124 * I doubt there will be many cases where this is exceeded. This is the initial
125 * size of the hash table for uma_slabs that are managed off page. This hash
126 * does expand by powers of two. Currently it doesn't get smaller.
127 */
128#define UMA_HASH_SIZE_INIT 32
129
130/*
131 * I should investigate other hashing algorithms. This should yield a low
132 * number of collisions if the pages are relatively contiguous.
133 */
134
135#define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
136
137#define UMA_HASH_INSERT(h, s, mem) \
138 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
139 (mem))], (s), us_hlink)
140#define UMA_HASH_REMOVE(h, s, mem) \
141 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
142 (mem))], (s), uma_slab, us_hlink)
143
144/* Hash table for freed address -> slab translation */
145
146SLIST_HEAD(slabhead, uma_slab);
147
148struct uma_hash {
149 struct slabhead *uh_slab_hash; /* Hash table for slabs */
150 int uh_hashsize; /* Current size of the hash table */
151 int uh_hashmask; /* Mask used during hashing */
152};
153
154/*
155 * align field or structure to cache line
156 */
157#if defined(__amd64__)
158#define UMA_ALIGN __aligned(CACHE_LINE_SIZE)
159#else
160#define UMA_ALIGN
161#endif
162
163/*
164 * Structures for per cpu queues.
165 */
166
167struct uma_bucket {
168 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
169 int16_t ub_cnt; /* Count of free items. */
170 int16_t ub_entries; /* Max items. */
171 void *ub_bucket[]; /* actual allocation storage */
172};
173
174typedef struct uma_bucket * uma_bucket_t;
175
176struct uma_cache {
177 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
178 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
179 uint64_t uc_allocs; /* Count of allocations */
180 uint64_t uc_frees; /* Count of frees */
181} UMA_ALIGN;
182
183typedef struct uma_cache * uma_cache_t;
184
185/*
186 * Keg management structure
187 *
188 * TODO: Optimize for cache line size
189 *
190 */
191struct uma_keg {
192 struct mtx uk_lock; /* Lock for the keg */
193 struct uma_hash uk_hash;
194
195 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
196 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
197 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
198 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
199
200 uint32_t uk_recurse; /* Allocation recursion count */
201 uint32_t uk_align; /* Alignment mask */
202 uint32_t uk_pages; /* Total page count */
203 uint32_t uk_free; /* Count of items free in slabs */
204 uint32_t uk_size; /* Requested size of each item */
205 uint32_t uk_rsize; /* Real size of each item */
206 uint32_t uk_maxpages; /* Maximum number of pages to alloc */
207
208 uma_init uk_init; /* Keg's init routine */
209 uma_fini uk_fini; /* Keg's fini routine */
210 uma_alloc uk_allocf; /* Allocation function */
211 uma_free uk_freef; /* Free routine */
212
213 u_long uk_offset; /* Next free offset from base KVA */
214 vm_offset_t uk_kva; /* Zone base KVA */
215 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
216
217 uint16_t uk_slabsize; /* Slab size for this keg */
218 uint16_t uk_pgoff; /* Offset to uma_slab struct */
219 uint16_t uk_ppera; /* pages per allocation from backend */
220 uint16_t uk_ipers; /* Items per slab */
221 uint32_t uk_flags; /* Internal flags */
222
223 /* Least used fields go to the last cache line. */
224 const char *uk_name; /* Name of creating zone. */
225 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
226};
227typedef struct uma_keg * uma_keg_t;
228
229/*
230 * Free bits per-slab.
231 */
232#define SLAB_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
233BITSET_DEFINE(slabbits, SLAB_SETSIZE);
234
235/*
236 * The slab structure manages a single contiguous allocation from backing
237 * store and subdivides it into individually allocatable items.
238 */
239struct uma_slab {
240 uma_keg_t us_keg; /* Keg we live in */
241 union {
242 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
243 unsigned long _us_size; /* Size of allocation */
244 } us_type;
245 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
246 uint8_t *us_data; /* First item */
247 struct slabbits us_free; /* Free bitmask. */
248#ifdef INVARIANTS
249 struct slabbits us_debugfree; /* Debug bitmask. */
250#endif
251 uint16_t us_freecount; /* How many are free? */
252 uint8_t us_flags; /* Page flags see uma.h */
253 uint8_t us_pad; /* Pad to 32bits, unused. */
254};
255
256#define us_link us_type._us_link
257#define us_size us_type._us_size
258
259/*
260 * The slab structure for UMA_ZONE_REFCNT zones for whose items we
261 * maintain reference counters in the slab for.
262 */
263struct uma_slab_refcnt {
264 struct uma_slab us_head; /* slab header data */
265 uint32_t us_refcnt[0]; /* Actually larger. */
266};
267
268typedef struct uma_slab * uma_slab_t;
269typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
270typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
271
272struct uma_klink {
273 LIST_ENTRY(uma_klink) kl_link;
274 uma_keg_t kl_keg;
275};
276typedef struct uma_klink *uma_klink_t;
277
278/*
279 * Zone management structure
280 *
281 * TODO: Optimize for cache line size
282 *
283 */
284struct uma_zone {
285 const char *uz_name; /* Text name of the zone */
286 struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
287
288 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
289 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
290 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
291
292 LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */
293 struct uma_klink uz_klink; /* klink for first keg. */
294
295 uma_slaballoc uz_slab; /* Allocate a slab from the backend. */
296 uma_ctor uz_ctor; /* Constructor for each allocation */
297 uma_dtor uz_dtor; /* Destructor */
298 uma_init uz_init; /* Initializer for each item */
299 uma_fini uz_fini; /* Discards memory */
299 uma_fini uz_fini; /* Finalizer for each item. */
300 uma_import uz_import; /* Import new memory to cache. */
301 uma_release uz_release; /* Release memory from cache. */
302 void *uz_arg; /* Import/release argument. */
300
301 uint32_t uz_flags; /* Flags inherited from kegs */
302 uint32_t uz_size; /* Size inherited from kegs */
303
303
304 uint32_t uz_flags; /* Flags inherited from kegs */
305 uint32_t uz_size; /* Size inherited from kegs */
306
304 uint64_t uz_allocs UMA_ALIGN; /* Total number of allocations */
305 uint64_t uz_frees; /* Total number of frees */
306 uint64_t uz_fails; /* Total number of alloc failures */
307 volatile u_long uz_allocs UMA_ALIGN; /* Total number of allocations */
308 volatile u_long uz_fails; /* Total number of alloc failures */
309 volatile u_long uz_frees; /* Total number of frees */
307 uint64_t uz_sleeps; /* Total number of alloc sleeps */
308 uint16_t uz_fills; /* Outstanding bucket fills */
309 uint16_t uz_count; /* Highest amount of items in bucket */
310
311 /* The next three fields are used to print a rate-limited warnings. */
312 const char *uz_warning; /* Warning to print on failure */
313 struct timeval uz_ratecheck; /* Warnings rate-limiting */
314
315 /*
316 * This HAS to be the last item because we adjust the zone size
317 * based on NCPU and then allocate the space for the zones.
318 */
319 struct uma_cache uz_cpu[1]; /* Per cpu caches */
320};
321
322/*
323 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
324 */
325#define UMA_ZFLAG_BUCKET 0x02000000 /* Bucket zone. */
326#define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */
327#define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */
328#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */
329#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
330#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
331#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
332
333#define UMA_ZFLAG_INHERIT (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | \
334 UMA_ZFLAG_BUCKET)
335
310 uint64_t uz_sleeps; /* Total number of alloc sleeps */
311 uint16_t uz_fills; /* Outstanding bucket fills */
312 uint16_t uz_count; /* Highest amount of items in bucket */
313
314 /* The next three fields are used to print a rate-limited warnings. */
315 const char *uz_warning; /* Warning to print on failure */
316 struct timeval uz_ratecheck; /* Warnings rate-limiting */
317
318 /*
319 * This HAS to be the last item because we adjust the zone size
320 * based on NCPU and then allocate the space for the zones.
321 */
322 struct uma_cache uz_cpu[1]; /* Per cpu caches */
323};
324
325/*
326 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
327 */
328#define UMA_ZFLAG_BUCKET 0x02000000 /* Bucket zone. */
329#define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */
330#define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */
331#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */
332#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
333#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
334#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
335
336#define UMA_ZFLAG_INHERIT (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | \
337 UMA_ZFLAG_BUCKET)
338
339static inline uma_keg_t
340zone_first_keg(uma_zone_t zone)
341{
342
343 return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
344}
345
336#undef UMA_ALIGN
337
338#ifdef _KERNEL
339/* Internal prototypes */
340static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
341void *uma_large_malloc(int size, int wait);
342void uma_large_free(uma_slab_t slab);
343
344/* Lock Macros */
345
346#define KEG_LOCK_INIT(k, lc) \
347 do { \
348 if ((lc)) \
349 mtx_init(&(k)->uk_lock, (k)->uk_name, \
350 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
351 else \
352 mtx_init(&(k)->uk_lock, (k)->uk_name, \
353 "UMA zone", MTX_DEF | MTX_DUPOK); \
354 } while (0)
355
356#define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock)
357#define KEG_LOCK(k) mtx_lock(&(k)->uk_lock)
358#define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
359#define ZONE_LOCK(z) mtx_lock((z)->uz_lock)
360#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock)
361
362/*
363 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
364 * the slab structure.
365 *
366 * Arguments:
367 * hash The hash table to search.
368 * data The base page of the item.
369 *
370 * Returns:
371 * A pointer to a slab if successful, else NULL.
372 */
373static __inline uma_slab_t
374hash_sfind(struct uma_hash *hash, uint8_t *data)
375{
376 uma_slab_t slab;
377 int hval;
378
379 hval = UMA_HASH(hash, data);
380
381 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
382 if ((uint8_t *)slab->us_data == data)
383 return (slab);
384 }
385 return (NULL);
386}
387
388static __inline uma_slab_t
389vtoslab(vm_offset_t va)
390{
391 vm_page_t p;
392 uma_slab_t slab;
393
394 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
395 slab = (uma_slab_t )p->object;
396
397 if (p->flags & PG_SLAB)
398 return (slab);
399 else
400 return (NULL);
401}
402
403static __inline void
404vsetslab(vm_offset_t va, uma_slab_t slab)
405{
406 vm_page_t p;
407
408 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
409 p->object = (vm_object_t)slab;
410 p->flags |= PG_SLAB;
411}
412
413static __inline void
414vsetobj(vm_offset_t va, vm_object_t obj)
415{
416 vm_page_t p;
417
418 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
419 p->object = obj;
420 p->flags &= ~PG_SLAB;
421}
422
423/*
424 * The following two functions may be defined by architecture specific code
425 * if they can provide more effecient allocation functions. This is useful
426 * for using direct mapped addresses.
427 */
428void *uma_small_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait);
429void uma_small_free(void *mem, int size, uint8_t flags);
430#endif /* _KERNEL */
431
432#endif /* VM_UMA_INT_H */
346#undef UMA_ALIGN
347
348#ifdef _KERNEL
349/* Internal prototypes */
350static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
351void *uma_large_malloc(int size, int wait);
352void uma_large_free(uma_slab_t slab);
353
354/* Lock Macros */
355
356#define KEG_LOCK_INIT(k, lc) \
357 do { \
358 if ((lc)) \
359 mtx_init(&(k)->uk_lock, (k)->uk_name, \
360 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
361 else \
362 mtx_init(&(k)->uk_lock, (k)->uk_name, \
363 "UMA zone", MTX_DEF | MTX_DUPOK); \
364 } while (0)
365
366#define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock)
367#define KEG_LOCK(k) mtx_lock(&(k)->uk_lock)
368#define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
369#define ZONE_LOCK(z) mtx_lock((z)->uz_lock)
370#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock)
371
372/*
373 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
374 * the slab structure.
375 *
376 * Arguments:
377 * hash The hash table to search.
378 * data The base page of the item.
379 *
380 * Returns:
381 * A pointer to a slab if successful, else NULL.
382 */
383static __inline uma_slab_t
384hash_sfind(struct uma_hash *hash, uint8_t *data)
385{
386 uma_slab_t slab;
387 int hval;
388
389 hval = UMA_HASH(hash, data);
390
391 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
392 if ((uint8_t *)slab->us_data == data)
393 return (slab);
394 }
395 return (NULL);
396}
397
398static __inline uma_slab_t
399vtoslab(vm_offset_t va)
400{
401 vm_page_t p;
402 uma_slab_t slab;
403
404 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
405 slab = (uma_slab_t )p->object;
406
407 if (p->flags & PG_SLAB)
408 return (slab);
409 else
410 return (NULL);
411}
412
413static __inline void
414vsetslab(vm_offset_t va, uma_slab_t slab)
415{
416 vm_page_t p;
417
418 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
419 p->object = (vm_object_t)slab;
420 p->flags |= PG_SLAB;
421}
422
423static __inline void
424vsetobj(vm_offset_t va, vm_object_t obj)
425{
426 vm_page_t p;
427
428 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
429 p->object = obj;
430 p->flags &= ~PG_SLAB;
431}
432
433/*
434 * The following two functions may be defined by architecture specific code
435 * if they can provide more effecient allocation functions. This is useful
436 * for using direct mapped addresses.
437 */
438void *uma_small_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait);
439void uma_small_free(void *mem, int size, uint8_t flags);
440#endif /* _KERNEL */
441
442#endif /* VM_UMA_INT_H */