Deleted Added
full compact
uma_int.h (292484) uma_int.h (295222)
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/vm/uma_int.h 292484 2015-12-20 02:05:33Z jtl $
27 * $FreeBSD: head/sys/vm/uma_int.h 295222 2016-02-03 23:30:17Z glebius $
28 *
29 */
30
31/*
32 * This file includes definitions, structures, prototypes, and inlines that
33 * should not be used outside of the actual implementation of UMA.
34 */
35
36/*
37 * Here's a quick description of the relationship between the objects:
38 *
39 * Kegs contain lists of slabs which are stored in either the full bin, empty
40 * bin, or partially allocated bin, to reduce fragmentation. They also contain
41 * the user supplied value for size, which is adjusted for alignment purposes
42 * and rsize is the result of that. The Keg also stores information for
43 * managing a hash of page addresses that maps pages to uma_slab_t structures
44 * for pages that don't have embedded uma_slab_t's.
45 *
46 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
47 * be allocated off the page from a special slab zone. The free list within a
48 * slab is managed with a bitmask. For item sizes that would yield more than
49 * 10% memory waste we potentially allocate a separate uma_slab_t if this will
50 * improve the number of items per slab that will fit.
51 *
52 * The only really gross cases, with regards to memory waste, are for those
53 * items that are just over half the page size. You can get nearly 50% waste,
54 * so you fall back to the memory footprint of the power of two allocator. I
55 * have looked at memory allocation sizes on many of the machines available to
56 * me, and there does not seem to be an abundance of allocations at this range
57 * so at this time it may not make sense to optimize for it. This can, of
58 * course, be solved with dynamic slab sizes.
59 *
60 * Kegs may serve multiple Zones but by far most of the time they only serve
61 * one. When a Zone is created, a Keg is allocated and setup for it. While
62 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
63 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
64 * pair, as well as with its own set of small per-CPU caches, layered above
65 * the Zone's general Bucket cache.
66 *
67 * The PCPU caches are protected by critical sections, and may be accessed
68 * safely only from their associated CPU, while the Zones backed by the same
69 * Keg all share a common Keg lock (to coalesce contention on the backing
70 * slabs). The backing Keg typically only serves one Zone but in the case of
71 * multiple Zones, one of the Zones is considered the Master Zone and all
72 * Zone-related stats from the Keg are done in the Master Zone. For an
73 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
74 */
75
76/*
77 * This is the representation for normal (Non OFFPAGE slab)
78 *
79 * i == item
80 * s == slab pointer
81 *
82 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
83 * ___________________________________________________________
84 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
85 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
86 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
87 * |___________________________________________________________|
88 *
89 *
90 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
91 *
92 * ___________________________________________________________
93 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
94 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
95 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
96 * |___________________________________________________________|
97 * ___________ ^
98 * |slab header| |
99 * |___________|---*
100 *
101 */
102
103#ifndef VM_UMA_INT_H
104#define VM_UMA_INT_H
105
106#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
107#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
108#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
109
110#define UMA_BOOT_PAGES 64 /* Pages allocated for startup */
111
112/* Max waste percentage before going to off page slab management */
113#define UMA_MAX_WASTE 10
114
115/*
116 * I doubt there will be many cases where this is exceeded. This is the initial
117 * size of the hash table for uma_slabs that are managed off page. This hash
118 * does expand by powers of two. Currently it doesn't get smaller.
119 */
120#define UMA_HASH_SIZE_INIT 32
121
122/*
123 * I should investigate other hashing algorithms. This should yield a low
124 * number of collisions if the pages are relatively contiguous.
125 */
126
127#define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
128
129#define UMA_HASH_INSERT(h, s, mem) \
130 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
131 (mem))], (s), us_hlink)
132#define UMA_HASH_REMOVE(h, s, mem) \
133 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
134 (mem))], (s), uma_slab, us_hlink)
135
136/* Hash table for freed address -> slab translation */
137
138SLIST_HEAD(slabhead, uma_slab);
139
140struct uma_hash {
141 struct slabhead *uh_slab_hash; /* Hash table for slabs */
142 int uh_hashsize; /* Current size of the hash table */
143 int uh_hashmask; /* Mask used during hashing */
144};
145
146/*
147 * align field or structure to cache line
148 */
149#if defined(__amd64__)
150#define UMA_ALIGN __aligned(CACHE_LINE_SIZE)
151#else
152#define UMA_ALIGN
153#endif
154
155/*
156 * Structures for per cpu queues.
157 */
158
159struct uma_bucket {
160 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
161 int16_t ub_cnt; /* Count of free items. */
162 int16_t ub_entries; /* Max items. */
163 void *ub_bucket[]; /* actual allocation storage */
164};
165
166typedef struct uma_bucket * uma_bucket_t;
167
168struct uma_cache {
169 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
170 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
171 uint64_t uc_allocs; /* Count of allocations */
172 uint64_t uc_frees; /* Count of frees */
173} UMA_ALIGN;
174
175typedef struct uma_cache * uma_cache_t;
176
177/*
178 * Keg management structure
179 *
180 * TODO: Optimize for cache line size
181 *
182 */
183struct uma_keg {
184 struct mtx_padalign uk_lock; /* Lock for the keg */
185 struct uma_hash uk_hash;
186
187 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
188 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
189 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
190 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
191
192 uint32_t uk_align; /* Alignment mask */
193 uint32_t uk_pages; /* Total page count */
194 uint32_t uk_free; /* Count of items free in slabs */
195 uint32_t uk_reserve; /* Number of reserved items. */
196 uint32_t uk_size; /* Requested size of each item */
197 uint32_t uk_rsize; /* Real size of each item */
198 uint32_t uk_maxpages; /* Maximum number of pages to alloc */
199
200 uma_init uk_init; /* Keg's init routine */
201 uma_fini uk_fini; /* Keg's fini routine */
202 uma_alloc uk_allocf; /* Allocation function */
203 uma_free uk_freef; /* Free routine */
204
205 u_long uk_offset; /* Next free offset from base KVA */
206 vm_offset_t uk_kva; /* Zone base KVA */
207 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
208
209 uint16_t uk_slabsize; /* Slab size for this keg */
210 uint16_t uk_pgoff; /* Offset to uma_slab struct */
211 uint16_t uk_ppera; /* pages per allocation from backend */
212 uint16_t uk_ipers; /* Items per slab */
213 uint32_t uk_flags; /* Internal flags */
214
215 /* Least used fields go to the last cache line. */
216 const char *uk_name; /* Name of creating zone. */
217 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
218};
219typedef struct uma_keg * uma_keg_t;
220
221/*
222 * Free bits per-slab.
223 */
224#define SLAB_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
225BITSET_DEFINE(slabbits, SLAB_SETSIZE);
226
227/*
228 * The slab structure manages a single contiguous allocation from backing
229 * store and subdivides it into individually allocatable items.
230 */
231struct uma_slab {
232 uma_keg_t us_keg; /* Keg we live in */
233 union {
234 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
235 unsigned long _us_size; /* Size of allocation */
236 } us_type;
237 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
238 uint8_t *us_data; /* First item */
239 struct slabbits us_free; /* Free bitmask. */
240#ifdef INVARIANTS
241 struct slabbits us_debugfree; /* Debug bitmask. */
242#endif
243 uint16_t us_freecount; /* How many are free? */
244 uint8_t us_flags; /* Page flags see uma.h */
245 uint8_t us_pad; /* Pad to 32bits, unused. */
246};
247
248#define us_link us_type._us_link
249#define us_size us_type._us_size
250
251/*
252 * The slab structure for UMA_ZONE_REFCNT zones for whose items we
253 * maintain reference counters in the slab for.
254 */
255struct uma_slab_refcnt {
256 struct uma_slab us_head; /* slab header data */
257 uint32_t us_refcnt[0]; /* Actually larger. */
258};
259
260typedef struct uma_slab * uma_slab_t;
261typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
262typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
263
264struct uma_klink {
265 LIST_ENTRY(uma_klink) kl_link;
266 uma_keg_t kl_keg;
267};
268typedef struct uma_klink *uma_klink_t;
269
270/*
271 * Zone management structure
272 *
273 * TODO: Optimize for cache line size
274 *
275 */
276struct uma_zone {
277 struct mtx_padalign uz_lock; /* Lock for the zone */
278 struct mtx_padalign *uz_lockptr;
279 const char *uz_name; /* Text name of the zone */
280
281 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
282 LIST_HEAD(,uma_bucket) uz_buckets; /* full buckets */
283
284 LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */
285 struct uma_klink uz_klink; /* klink for first keg. */
286
287 uma_slaballoc uz_slab; /* Allocate a slab from the backend. */
288 uma_ctor uz_ctor; /* Constructor for each allocation */
289 uma_dtor uz_dtor; /* Destructor */
290 uma_init uz_init; /* Initializer for each item */
291 uma_fini uz_fini; /* Finalizer for each item. */
292 uma_import uz_import; /* Import new memory to cache. */
293 uma_release uz_release; /* Release memory from cache. */
294 void *uz_arg; /* Import/release argument. */
295
296 uint32_t uz_flags; /* Flags inherited from kegs */
297 uint32_t uz_size; /* Size inherited from kegs */
298
299 volatile u_long uz_allocs UMA_ALIGN; /* Total number of allocations */
300 volatile u_long uz_fails; /* Total number of alloc failures */
301 volatile u_long uz_frees; /* Total number of frees */
302 uint64_t uz_sleeps; /* Total number of alloc sleeps */
303 uint16_t uz_count; /* Amount of items in full bucket */
304 uint16_t uz_count_min; /* Minimal amount of items there */
305
306 /* The next two fields are used to print a rate-limited warnings. */
307 const char *uz_warning; /* Warning to print on failure */
308 struct timeval uz_ratecheck; /* Warnings rate-limiting */
309
28 *
29 */
30
31/*
32 * This file includes definitions, structures, prototypes, and inlines that
33 * should not be used outside of the actual implementation of UMA.
34 */
35
36/*
37 * Here's a quick description of the relationship between the objects:
38 *
39 * Kegs contain lists of slabs which are stored in either the full bin, empty
40 * bin, or partially allocated bin, to reduce fragmentation. They also contain
41 * the user supplied value for size, which is adjusted for alignment purposes
42 * and rsize is the result of that. The Keg also stores information for
43 * managing a hash of page addresses that maps pages to uma_slab_t structures
44 * for pages that don't have embedded uma_slab_t's.
45 *
46 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
47 * be allocated off the page from a special slab zone. The free list within a
48 * slab is managed with a bitmask. For item sizes that would yield more than
49 * 10% memory waste we potentially allocate a separate uma_slab_t if this will
50 * improve the number of items per slab that will fit.
51 *
52 * The only really gross cases, with regards to memory waste, are for those
53 * items that are just over half the page size. You can get nearly 50% waste,
54 * so you fall back to the memory footprint of the power of two allocator. I
55 * have looked at memory allocation sizes on many of the machines available to
56 * me, and there does not seem to be an abundance of allocations at this range
57 * so at this time it may not make sense to optimize for it. This can, of
58 * course, be solved with dynamic slab sizes.
59 *
60 * Kegs may serve multiple Zones but by far most of the time they only serve
61 * one. When a Zone is created, a Keg is allocated and setup for it. While
62 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
63 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
64 * pair, as well as with its own set of small per-CPU caches, layered above
65 * the Zone's general Bucket cache.
66 *
67 * The PCPU caches are protected by critical sections, and may be accessed
68 * safely only from their associated CPU, while the Zones backed by the same
69 * Keg all share a common Keg lock (to coalesce contention on the backing
70 * slabs). The backing Keg typically only serves one Zone but in the case of
71 * multiple Zones, one of the Zones is considered the Master Zone and all
72 * Zone-related stats from the Keg are done in the Master Zone. For an
73 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
74 */
75
76/*
77 * This is the representation for normal (Non OFFPAGE slab)
78 *
79 * i == item
80 * s == slab pointer
81 *
82 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
83 * ___________________________________________________________
84 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
85 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
86 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
87 * |___________________________________________________________|
88 *
89 *
90 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
91 *
92 * ___________________________________________________________
93 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
94 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
95 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
96 * |___________________________________________________________|
97 * ___________ ^
98 * |slab header| |
99 * |___________|---*
100 *
101 */
102
103#ifndef VM_UMA_INT_H
104#define VM_UMA_INT_H
105
106#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
107#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
108#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
109
110#define UMA_BOOT_PAGES 64 /* Pages allocated for startup */
111
112/* Max waste percentage before going to off page slab management */
113#define UMA_MAX_WASTE 10
114
115/*
116 * I doubt there will be many cases where this is exceeded. This is the initial
117 * size of the hash table for uma_slabs that are managed off page. This hash
118 * does expand by powers of two. Currently it doesn't get smaller.
119 */
120#define UMA_HASH_SIZE_INIT 32
121
122/*
123 * I should investigate other hashing algorithms. This should yield a low
124 * number of collisions if the pages are relatively contiguous.
125 */
126
127#define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
128
129#define UMA_HASH_INSERT(h, s, mem) \
130 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
131 (mem))], (s), us_hlink)
132#define UMA_HASH_REMOVE(h, s, mem) \
133 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
134 (mem))], (s), uma_slab, us_hlink)
135
136/* Hash table for freed address -> slab translation */
137
138SLIST_HEAD(slabhead, uma_slab);
139
140struct uma_hash {
141 struct slabhead *uh_slab_hash; /* Hash table for slabs */
142 int uh_hashsize; /* Current size of the hash table */
143 int uh_hashmask; /* Mask used during hashing */
144};
145
146/*
147 * align field or structure to cache line
148 */
149#if defined(__amd64__)
150#define UMA_ALIGN __aligned(CACHE_LINE_SIZE)
151#else
152#define UMA_ALIGN
153#endif
154
155/*
156 * Structures for per cpu queues.
157 */
158
159struct uma_bucket {
160 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
161 int16_t ub_cnt; /* Count of free items. */
162 int16_t ub_entries; /* Max items. */
163 void *ub_bucket[]; /* actual allocation storage */
164};
165
166typedef struct uma_bucket * uma_bucket_t;
167
168struct uma_cache {
169 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
170 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
171 uint64_t uc_allocs; /* Count of allocations */
172 uint64_t uc_frees; /* Count of frees */
173} UMA_ALIGN;
174
175typedef struct uma_cache * uma_cache_t;
176
177/*
178 * Keg management structure
179 *
180 * TODO: Optimize for cache line size
181 *
182 */
183struct uma_keg {
184 struct mtx_padalign uk_lock; /* Lock for the keg */
185 struct uma_hash uk_hash;
186
187 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
188 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
189 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
190 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
191
192 uint32_t uk_align; /* Alignment mask */
193 uint32_t uk_pages; /* Total page count */
194 uint32_t uk_free; /* Count of items free in slabs */
195 uint32_t uk_reserve; /* Number of reserved items. */
196 uint32_t uk_size; /* Requested size of each item */
197 uint32_t uk_rsize; /* Real size of each item */
198 uint32_t uk_maxpages; /* Maximum number of pages to alloc */
199
200 uma_init uk_init; /* Keg's init routine */
201 uma_fini uk_fini; /* Keg's fini routine */
202 uma_alloc uk_allocf; /* Allocation function */
203 uma_free uk_freef; /* Free routine */
204
205 u_long uk_offset; /* Next free offset from base KVA */
206 vm_offset_t uk_kva; /* Zone base KVA */
207 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
208
209 uint16_t uk_slabsize; /* Slab size for this keg */
210 uint16_t uk_pgoff; /* Offset to uma_slab struct */
211 uint16_t uk_ppera; /* pages per allocation from backend */
212 uint16_t uk_ipers; /* Items per slab */
213 uint32_t uk_flags; /* Internal flags */
214
215 /* Least used fields go to the last cache line. */
216 const char *uk_name; /* Name of creating zone. */
217 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
218};
219typedef struct uma_keg * uma_keg_t;
220
221/*
222 * Free bits per-slab.
223 */
224#define SLAB_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
225BITSET_DEFINE(slabbits, SLAB_SETSIZE);
226
227/*
228 * The slab structure manages a single contiguous allocation from backing
229 * store and subdivides it into individually allocatable items.
230 */
231struct uma_slab {
232 uma_keg_t us_keg; /* Keg we live in */
233 union {
234 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
235 unsigned long _us_size; /* Size of allocation */
236 } us_type;
237 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
238 uint8_t *us_data; /* First item */
239 struct slabbits us_free; /* Free bitmask. */
240#ifdef INVARIANTS
241 struct slabbits us_debugfree; /* Debug bitmask. */
242#endif
243 uint16_t us_freecount; /* How many are free? */
244 uint8_t us_flags; /* Page flags see uma.h */
245 uint8_t us_pad; /* Pad to 32bits, unused. */
246};
247
248#define us_link us_type._us_link
249#define us_size us_type._us_size
250
251/*
252 * The slab structure for UMA_ZONE_REFCNT zones for whose items we
253 * maintain reference counters in the slab for.
254 */
255struct uma_slab_refcnt {
256 struct uma_slab us_head; /* slab header data */
257 uint32_t us_refcnt[0]; /* Actually larger. */
258};
259
260typedef struct uma_slab * uma_slab_t;
261typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
262typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
263
264struct uma_klink {
265 LIST_ENTRY(uma_klink) kl_link;
266 uma_keg_t kl_keg;
267};
268typedef struct uma_klink *uma_klink_t;
269
270/*
271 * Zone management structure
272 *
273 * TODO: Optimize for cache line size
274 *
275 */
276struct uma_zone {
277 struct mtx_padalign uz_lock; /* Lock for the zone */
278 struct mtx_padalign *uz_lockptr;
279 const char *uz_name; /* Text name of the zone */
280
281 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
282 LIST_HEAD(,uma_bucket) uz_buckets; /* full buckets */
283
284 LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */
285 struct uma_klink uz_klink; /* klink for first keg. */
286
287 uma_slaballoc uz_slab; /* Allocate a slab from the backend. */
288 uma_ctor uz_ctor; /* Constructor for each allocation */
289 uma_dtor uz_dtor; /* Destructor */
290 uma_init uz_init; /* Initializer for each item */
291 uma_fini uz_fini; /* Finalizer for each item. */
292 uma_import uz_import; /* Import new memory to cache. */
293 uma_release uz_release; /* Release memory from cache. */
294 void *uz_arg; /* Import/release argument. */
295
296 uint32_t uz_flags; /* Flags inherited from kegs */
297 uint32_t uz_size; /* Size inherited from kegs */
298
299 volatile u_long uz_allocs UMA_ALIGN; /* Total number of allocations */
300 volatile u_long uz_fails; /* Total number of alloc failures */
301 volatile u_long uz_frees; /* Total number of frees */
302 uint64_t uz_sleeps; /* Total number of alloc sleeps */
303 uint16_t uz_count; /* Amount of items in full bucket */
304 uint16_t uz_count_min; /* Minimal amount of items there */
305
306 /* The next two fields are used to print a rate-limited warnings. */
307 const char *uz_warning; /* Warning to print on failure */
308 struct timeval uz_ratecheck; /* Warnings rate-limiting */
309
310 uma_maxaction_t uz_maxaction; /* Function to run when at limit */
310 struct task uz_maxaction; /* Task to run when at limit */
311
312 /*
313 * This HAS to be the last item because we adjust the zone size
314 * based on NCPU and then allocate the space for the zones.
315 */
316 struct uma_cache uz_cpu[1]; /* Per cpu caches */
317};
318
319/*
320 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
321 */
322#define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */
323#define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */
324#define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */
325#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
326#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
327#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
328
329#define UMA_ZFLAG_INHERIT \
330 (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
331
332static inline uma_keg_t
333zone_first_keg(uma_zone_t zone)
334{
335 uma_klink_t klink;
336
337 klink = LIST_FIRST(&zone->uz_kegs);
338 return (klink != NULL) ? klink->kl_keg : NULL;
339}
340
341#undef UMA_ALIGN
342
343#ifdef _KERNEL
344/* Internal prototypes */
345static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
346void *uma_large_malloc(vm_size_t size, int wait);
347void uma_large_free(uma_slab_t slab);
348
349/* Lock Macros */
350
351#define KEG_LOCK_INIT(k, lc) \
352 do { \
353 if ((lc)) \
354 mtx_init(&(k)->uk_lock, (k)->uk_name, \
355 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
356 else \
357 mtx_init(&(k)->uk_lock, (k)->uk_name, \
358 "UMA zone", MTX_DEF | MTX_DUPOK); \
359 } while (0)
360
361#define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock)
362#define KEG_LOCK(k) mtx_lock(&(k)->uk_lock)
363#define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
364
365#define ZONE_LOCK_INIT(z, lc) \
366 do { \
367 if ((lc)) \
368 mtx_init(&(z)->uz_lock, (z)->uz_name, \
369 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
370 else \
371 mtx_init(&(z)->uz_lock, (z)->uz_name, \
372 "UMA zone", MTX_DEF | MTX_DUPOK); \
373 } while (0)
374
375#define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr)
376#define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr)
377#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr)
378#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
379
380/*
381 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
382 * the slab structure.
383 *
384 * Arguments:
385 * hash The hash table to search.
386 * data The base page of the item.
387 *
388 * Returns:
389 * A pointer to a slab if successful, else NULL.
390 */
391static __inline uma_slab_t
392hash_sfind(struct uma_hash *hash, uint8_t *data)
393{
394 uma_slab_t slab;
395 int hval;
396
397 hval = UMA_HASH(hash, data);
398
399 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
400 if ((uint8_t *)slab->us_data == data)
401 return (slab);
402 }
403 return (NULL);
404}
405
406static __inline uma_slab_t
407vtoslab(vm_offset_t va)
408{
409 vm_page_t p;
410
411 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
412 return ((uma_slab_t)p->plinks.s.pv);
413}
414
415static __inline void
416vsetslab(vm_offset_t va, uma_slab_t slab)
417{
418 vm_page_t p;
419
420 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
421 p->plinks.s.pv = slab;
422}
423
424/*
425 * The following two functions may be defined by architecture specific code
426 * if they can provide more effecient allocation functions. This is useful
427 * for using direct mapped addresses.
428 */
429void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,
430 int wait);
431void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
432#endif /* _KERNEL */
433
434#endif /* VM_UMA_INT_H */
311
312 /*
313 * This HAS to be the last item because we adjust the zone size
314 * based on NCPU and then allocate the space for the zones.
315 */
316 struct uma_cache uz_cpu[1]; /* Per cpu caches */
317};
318
319/*
320 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
321 */
322#define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */
323#define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */
324#define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */
325#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
326#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
327#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
328
329#define UMA_ZFLAG_INHERIT \
330 (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
331
332static inline uma_keg_t
333zone_first_keg(uma_zone_t zone)
334{
335 uma_klink_t klink;
336
337 klink = LIST_FIRST(&zone->uz_kegs);
338 return (klink != NULL) ? klink->kl_keg : NULL;
339}
340
341#undef UMA_ALIGN
342
343#ifdef _KERNEL
344/* Internal prototypes */
345static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
346void *uma_large_malloc(vm_size_t size, int wait);
347void uma_large_free(uma_slab_t slab);
348
349/* Lock Macros */
350
351#define KEG_LOCK_INIT(k, lc) \
352 do { \
353 if ((lc)) \
354 mtx_init(&(k)->uk_lock, (k)->uk_name, \
355 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
356 else \
357 mtx_init(&(k)->uk_lock, (k)->uk_name, \
358 "UMA zone", MTX_DEF | MTX_DUPOK); \
359 } while (0)
360
361#define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock)
362#define KEG_LOCK(k) mtx_lock(&(k)->uk_lock)
363#define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
364
365#define ZONE_LOCK_INIT(z, lc) \
366 do { \
367 if ((lc)) \
368 mtx_init(&(z)->uz_lock, (z)->uz_name, \
369 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
370 else \
371 mtx_init(&(z)->uz_lock, (z)->uz_name, \
372 "UMA zone", MTX_DEF | MTX_DUPOK); \
373 } while (0)
374
375#define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr)
376#define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr)
377#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr)
378#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
379
380/*
381 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
382 * the slab structure.
383 *
384 * Arguments:
385 * hash The hash table to search.
386 * data The base page of the item.
387 *
388 * Returns:
389 * A pointer to a slab if successful, else NULL.
390 */
391static __inline uma_slab_t
392hash_sfind(struct uma_hash *hash, uint8_t *data)
393{
394 uma_slab_t slab;
395 int hval;
396
397 hval = UMA_HASH(hash, data);
398
399 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
400 if ((uint8_t *)slab->us_data == data)
401 return (slab);
402 }
403 return (NULL);
404}
405
406static __inline uma_slab_t
407vtoslab(vm_offset_t va)
408{
409 vm_page_t p;
410
411 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
412 return ((uma_slab_t)p->plinks.s.pv);
413}
414
415static __inline void
416vsetslab(vm_offset_t va, uma_slab_t slab)
417{
418 vm_page_t p;
419
420 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
421 p->plinks.s.pv = slab;
422}
423
424/*
425 * The following two functions may be defined by architecture specific code
426 * if they can provide more effecient allocation functions. This is useful
427 * for using direct mapped addresses.
428 */
429void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,
430 int wait);
431void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
432#endif /* _KERNEL */
433
434#endif /* VM_UMA_INT_H */