Deleted Added
full compact
uma_int.h (94631) uma_int.h (95758)
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/vm/uma_int.h 94631 2002-04-14 01:56:25Z jeff $
26 * $FreeBSD: head/sys/vm/uma_int.h 95758 2002-04-29 23:45:41Z jeff $
27 *
28 */
29
30/*
31 *
32 * Jeff Roberson <jroberson@chesapeake.net>
33 *
34 * This file includes definitions, structures, prototypes, and inlines that
35 * should not be used outside of the actual implementation of UMA.
36 *
37 */
38
39/*
40 * Here's a quick description of the relationship between the objects:
41 *
42 * Zones contain lists of slabs which are stored in either the full bin, empty
43 * bin, or partially allocated bin, to reduce fragmentation. They also contain
44 * the user supplied value for size, which is adjusted for alignment purposes
45 * and rsize is the result of that. The zone also stores information for
46 * managing a hash of page addresses that maps pages to uma_slab_t structures
47 * for pages that don't have embedded uma_slab_t's.
48 *
49 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
50 * be allocated off the page from a special slab zone. The free list within a
51 * slab is managed with a linked list of indexes, which are 8 bit values. If
52 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
53 * values. Currently on alpha you can get 250 or so 32 byte items and on x86
54 * you can get 250 or so 16byte items. For item sizes that would yield more
55 * than 10% memory waste we potentially allocate a separate uma_slab_t if this
56 * will improve the number of items per slab that will fit.
57 *
58 * Other potential space optimizations are storing the 8bit of linkage in space
59 * wasted between items due to alignment problems. This may yield a much better
60 * memory footprint for certain sizes of objects. Another alternative is to
61 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer
62 * dynamic slab sizes because we could stick with 8 bit indexes and only use
63 * large slab sizes for zones with a lot of waste per slab. This may create
64 * ineffeciencies in the vm subsystem due to fragmentation in the address space.
65 *
66 * The only really gross cases, with regards to memory waste, are for those
67 * items that are just over half the page size. You can get nearly 50% waste,
68 * so you fall back to the memory footprint of the power of two allocator. I
69 * have looked at memory allocation sizes on many of the machines available to
70 * me, and there does not seem to be an abundance of allocations at this range
71 * so at this time it may not make sense to optimize for it. This can, of
72 * course, be solved with dynamic slab sizes.
73 *
74 */
75
76/*
77 * This is the representation for normal (Non OFFPAGE slab)
78 *
79 * i == item
80 * s == slab pointer
81 *
82 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
83 * ___________________________________________________________
84 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
85 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
86 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
87 * |___________________________________________________________|
88 *
89 *
90 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
91 *
92 * ___________________________________________________________
93 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
94 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
95 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
96 * |___________________________________________________________|
97 * ___________ ^
98 * |slab header| |
99 * |___________|---*
100 *
101 */
102
103#ifndef VM_UMA_INT_H
104#define VM_UMA_INT_H
105
106#include <sys/mutex.h>
107
108#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
109#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
110#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
111
112#define UMA_BOOT_PAGES 15 /* Number of pages allocated for startup */
113#define UMA_WORKING_TIME 20 /* Seconds worth of items to keep */
114
115
116/* Max waste before going to off page slab management */
117#define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10)
118
119/*
120 * I doubt there will be many cases where this is exceeded. This is the initial
121 * size of the hash table for uma_slabs that are managed off page. This hash
122 * does expand by powers of two. Currently it doesn't get smaller.
123 */
124#define UMA_HASH_SIZE_INIT 32
125
126
127/*
128 * I should investigate other hashing algorithms. This should yield a low
129 * number of collisions if the pages are relatively contiguous.
130 *
131 * This is the same algorithm that most processor caches use.
132 *
133 * I'm shifting and masking instead of % because it should be faster.
134 */
135
136#define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \
137 (h)->uh_hashmask)
138
139#define UMA_HASH_INSERT(h, s, mem) \
140 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
141 (mem))], (s), us_hlink);
142#define UMA_HASH_REMOVE(h, s, mem) \
143 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
144 (mem))], (s), uma_slab, us_hlink);
145
146/* Page management structure */
147
148/* Sorry for the union, but space efficiency is important */
149struct uma_slab {
150 uma_zone_t us_zone; /* Zone we live in */
151 union {
152 LIST_ENTRY(uma_slab) us_link; /* slabs in zone */
153 unsigned long us_size; /* Size of allocation */
154 } us_type;
155 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
156 u_int8_t *us_data; /* First item */
157 u_int8_t us_flags; /* Page flags see uma.h */
158 u_int8_t us_freecount; /* How many are free? */
159 u_int8_t us_firstfree; /* First free item index */
160 u_int8_t us_freelist[1]; /* Free List (actually larger) */
161};
162
163#define us_link us_type.us_link
164#define us_size us_type.us_size
165
166typedef struct uma_slab * uma_slab_t;
167
168/* Hash table for freed address -> slab translation */
169
170SLIST_HEAD(slabhead, uma_slab);
171
172struct uma_hash {
173 struct slabhead *uh_slab_hash; /* Hash table for slabs */
174 int uh_hashsize; /* Current size of the hash table */
175 int uh_hashmask; /* Mask used during hashing */
176};
177
178extern struct uma_hash *mallochash;
179
180/*
181 * Structures for per cpu queues.
182 */
183
184/*
185 * This size was chosen so that the struct bucket size is roughly
186 * 128 * sizeof(void *). This is exactly true for x86, and for alpha
187 * it will would be 32bits smaller if it didn't have alignment adjustments.
188 */
189
190#define UMA_BUCKET_SIZE 125
191
192struct uma_bucket {
193 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
194 int16_t ub_ptr; /* Pointer to current item */
195 void *ub_bucket[UMA_BUCKET_SIZE]; /* actual allocation storage */
196};
197
198typedef struct uma_bucket * uma_bucket_t;
199
200struct uma_cache {
201 struct mtx uc_lock; /* Spin lock on this cpu's bucket */
202 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
203 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
204 u_int64_t uc_allocs; /* Count of allocations */
205};
206
207typedef struct uma_cache * uma_cache_t;
208
209#define LOCKNAME_LEN 16 /* Length of the name for cpu locks */
210
211/*
212 * Zone management structure
213 *
214 * TODO: Optimize for cache line size
215 *
216 */
217struct uma_zone {
218 char uz_lname[LOCKNAME_LEN]; /* Text name for the cpu lock */
219 char *uz_name; /* Text name of the zone */
220 LIST_ENTRY(uma_zone) uz_link; /* List of all zones */
221 u_int32_t uz_align; /* Alignment mask */
222 u_int32_t uz_pages; /* Total page count */
223
224/* Used during alloc / free */
225 struct mtx uz_lock; /* Lock for the zone */
226 u_int32_t uz_free; /* Count of items free in slabs */
227 u_int16_t uz_ipers; /* Items per slab */
228 u_int16_t uz_flags; /* Internal flags */
229
230 LIST_HEAD(,uma_slab) uz_part_slab; /* partially allocated slabs */
231 LIST_HEAD(,uma_slab) uz_free_slab; /* empty slab list */
232 LIST_HEAD(,uma_slab) uz_full_slab; /* full slabs */
233 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
234 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
235 u_int32_t uz_size; /* Requested size of each item */
236 u_int32_t uz_rsize; /* Real size of each item */
237
238 struct uma_hash uz_hash;
239 u_int16_t uz_pgoff; /* Offset to uma_slab struct */
240 u_int16_t uz_ppera; /* pages per allocation from backend */
241 u_int16_t uz_cacheoff; /* Next cache offset */
242 u_int16_t uz_cachemax; /* Max cache offset */
243
244 uma_ctor uz_ctor; /* Constructor for each allocation */
245 uma_dtor uz_dtor; /* Destructor */
246 u_int64_t uz_allocs; /* Total number of allocations */
247
248 uma_init uz_init; /* Initializer for each item */
249 uma_fini uz_fini; /* Discards memory */
250 uma_alloc uz_allocf; /* Allocation function */
251 uma_free uz_freef; /* Free routine */
252 struct vm_object *uz_obj; /* Zone specific object */
253 vm_offset_t uz_kva; /* Base kva for zones with objs */
254 u_int32_t uz_maxpages; /* Maximum number of pages to alloc */
255 u_int32_t uz_cachefree; /* Last count of items free in caches */
256 u_int64_t uz_oallocs; /* old allocs count */
257 u_int64_t uz_wssize; /* Working set size */
258 int uz_recurse; /* Allocation recursion count */
259 uint16_t uz_fills; /* Outstanding bucket fills */
260 uint16_t uz_count; /* Highest value ub_ptr can have */
261 /*
262 * This HAS to be the last item because we adjust the zone size
263 * based on NCPU and then allocate the space for the zones.
264 */
265 struct uma_cache uz_cpu[1]; /* Per cpu caches */
266};
267
268#define UMA_CACHE_INC 16 /* How much will we move data */
269
270#define UMA_ZFLAG_OFFPAGE 0x0001 /* Struct slab/freelist off page */
271#define UMA_ZFLAG_PRIVALLOC 0x0002 /* Zone has supplied it's own alloc */
272#define UMA_ZFLAG_INTERNAL 0x0004 /* Internal zone, no offpage no PCPU */
273#define UMA_ZFLAG_MALLOC 0x0008 /* Zone created by malloc */
274#define UMA_ZFLAG_NOFREE 0x0010 /* Don't free data from this zone */
275#define UMA_ZFLAG_FULL 0x0020 /* This zone reached uz_maxpages */
276
277/* This lives in uflags */
278#define UMA_ZONE_INTERNAL 0x1000 /* Internal zone for uflags */
279
280/* Internal prototypes */
281static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
282void *uma_large_malloc(int size, int wait);
283void uma_large_free(uma_slab_t slab);
284
285/* Lock Macros */
286
27 *
28 */
29
30/*
31 *
32 * Jeff Roberson <jroberson@chesapeake.net>
33 *
34 * This file includes definitions, structures, prototypes, and inlines that
35 * should not be used outside of the actual implementation of UMA.
36 *
37 */
38
39/*
40 * Here's a quick description of the relationship between the objects:
41 *
42 * Zones contain lists of slabs which are stored in either the full bin, empty
43 * bin, or partially allocated bin, to reduce fragmentation. They also contain
44 * the user supplied value for size, which is adjusted for alignment purposes
45 * and rsize is the result of that. The zone also stores information for
46 * managing a hash of page addresses that maps pages to uma_slab_t structures
47 * for pages that don't have embedded uma_slab_t's.
48 *
49 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
50 * be allocated off the page from a special slab zone. The free list within a
51 * slab is managed with a linked list of indexes, which are 8 bit values. If
52 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
53 * values. Currently on alpha you can get 250 or so 32 byte items and on x86
54 * you can get 250 or so 16byte items. For item sizes that would yield more
55 * than 10% memory waste we potentially allocate a separate uma_slab_t if this
56 * will improve the number of items per slab that will fit.
57 *
58 * Other potential space optimizations are storing the 8bit of linkage in space
59 * wasted between items due to alignment problems. This may yield a much better
60 * memory footprint for certain sizes of objects. Another alternative is to
61 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer
62 * dynamic slab sizes because we could stick with 8 bit indexes and only use
63 * large slab sizes for zones with a lot of waste per slab. This may create
64 * ineffeciencies in the vm subsystem due to fragmentation in the address space.
65 *
66 * The only really gross cases, with regards to memory waste, are for those
67 * items that are just over half the page size. You can get nearly 50% waste,
68 * so you fall back to the memory footprint of the power of two allocator. I
69 * have looked at memory allocation sizes on many of the machines available to
70 * me, and there does not seem to be an abundance of allocations at this range
71 * so at this time it may not make sense to optimize for it. This can, of
72 * course, be solved with dynamic slab sizes.
73 *
74 */
75
76/*
77 * This is the representation for normal (Non OFFPAGE slab)
78 *
79 * i == item
80 * s == slab pointer
81 *
82 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
83 * ___________________________________________________________
84 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
85 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
86 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
87 * |___________________________________________________________|
88 *
89 *
90 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
91 *
92 * ___________________________________________________________
93 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
94 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
95 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
96 * |___________________________________________________________|
97 * ___________ ^
98 * |slab header| |
99 * |___________|---*
100 *
101 */
102
103#ifndef VM_UMA_INT_H
104#define VM_UMA_INT_H
105
106#include <sys/mutex.h>
107
108#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
109#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
110#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
111
112#define UMA_BOOT_PAGES 15 /* Number of pages allocated for startup */
113#define UMA_WORKING_TIME 20 /* Seconds worth of items to keep */
114
115
116/* Max waste before going to off page slab management */
117#define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10)
118
119/*
120 * I doubt there will be many cases where this is exceeded. This is the initial
121 * size of the hash table for uma_slabs that are managed off page. This hash
122 * does expand by powers of two. Currently it doesn't get smaller.
123 */
124#define UMA_HASH_SIZE_INIT 32
125
126
127/*
128 * I should investigate other hashing algorithms. This should yield a low
129 * number of collisions if the pages are relatively contiguous.
130 *
131 * This is the same algorithm that most processor caches use.
132 *
133 * I'm shifting and masking instead of % because it should be faster.
134 */
135
136#define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \
137 (h)->uh_hashmask)
138
139#define UMA_HASH_INSERT(h, s, mem) \
140 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
141 (mem))], (s), us_hlink);
142#define UMA_HASH_REMOVE(h, s, mem) \
143 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
144 (mem))], (s), uma_slab, us_hlink);
145
146/* Page management structure */
147
148/* Sorry for the union, but space efficiency is important */
149struct uma_slab {
150 uma_zone_t us_zone; /* Zone we live in */
151 union {
152 LIST_ENTRY(uma_slab) us_link; /* slabs in zone */
153 unsigned long us_size; /* Size of allocation */
154 } us_type;
155 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
156 u_int8_t *us_data; /* First item */
157 u_int8_t us_flags; /* Page flags see uma.h */
158 u_int8_t us_freecount; /* How many are free? */
159 u_int8_t us_firstfree; /* First free item index */
160 u_int8_t us_freelist[1]; /* Free List (actually larger) */
161};
162
163#define us_link us_type.us_link
164#define us_size us_type.us_size
165
166typedef struct uma_slab * uma_slab_t;
167
168/* Hash table for freed address -> slab translation */
169
170SLIST_HEAD(slabhead, uma_slab);
171
172struct uma_hash {
173 struct slabhead *uh_slab_hash; /* Hash table for slabs */
174 int uh_hashsize; /* Current size of the hash table */
175 int uh_hashmask; /* Mask used during hashing */
176};
177
178extern struct uma_hash *mallochash;
179
180/*
181 * Structures for per cpu queues.
182 */
183
184/*
185 * This size was chosen so that the struct bucket size is roughly
186 * 128 * sizeof(void *). This is exactly true for x86, and for alpha
187 * it will would be 32bits smaller if it didn't have alignment adjustments.
188 */
189
190#define UMA_BUCKET_SIZE 125
191
192struct uma_bucket {
193 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
194 int16_t ub_ptr; /* Pointer to current item */
195 void *ub_bucket[UMA_BUCKET_SIZE]; /* actual allocation storage */
196};
197
198typedef struct uma_bucket * uma_bucket_t;
199
200struct uma_cache {
201 struct mtx uc_lock; /* Spin lock on this cpu's bucket */
202 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
203 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
204 u_int64_t uc_allocs; /* Count of allocations */
205};
206
207typedef struct uma_cache * uma_cache_t;
208
209#define LOCKNAME_LEN 16 /* Length of the name for cpu locks */
210
211/*
212 * Zone management structure
213 *
214 * TODO: Optimize for cache line size
215 *
216 */
217struct uma_zone {
218 char uz_lname[LOCKNAME_LEN]; /* Text name for the cpu lock */
219 char *uz_name; /* Text name of the zone */
220 LIST_ENTRY(uma_zone) uz_link; /* List of all zones */
221 u_int32_t uz_align; /* Alignment mask */
222 u_int32_t uz_pages; /* Total page count */
223
224/* Used during alloc / free */
225 struct mtx uz_lock; /* Lock for the zone */
226 u_int32_t uz_free; /* Count of items free in slabs */
227 u_int16_t uz_ipers; /* Items per slab */
228 u_int16_t uz_flags; /* Internal flags */
229
230 LIST_HEAD(,uma_slab) uz_part_slab; /* partially allocated slabs */
231 LIST_HEAD(,uma_slab) uz_free_slab; /* empty slab list */
232 LIST_HEAD(,uma_slab) uz_full_slab; /* full slabs */
233 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
234 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
235 u_int32_t uz_size; /* Requested size of each item */
236 u_int32_t uz_rsize; /* Real size of each item */
237
238 struct uma_hash uz_hash;
239 u_int16_t uz_pgoff; /* Offset to uma_slab struct */
240 u_int16_t uz_ppera; /* pages per allocation from backend */
241 u_int16_t uz_cacheoff; /* Next cache offset */
242 u_int16_t uz_cachemax; /* Max cache offset */
243
244 uma_ctor uz_ctor; /* Constructor for each allocation */
245 uma_dtor uz_dtor; /* Destructor */
246 u_int64_t uz_allocs; /* Total number of allocations */
247
248 uma_init uz_init; /* Initializer for each item */
249 uma_fini uz_fini; /* Discards memory */
250 uma_alloc uz_allocf; /* Allocation function */
251 uma_free uz_freef; /* Free routine */
252 struct vm_object *uz_obj; /* Zone specific object */
253 vm_offset_t uz_kva; /* Base kva for zones with objs */
254 u_int32_t uz_maxpages; /* Maximum number of pages to alloc */
255 u_int32_t uz_cachefree; /* Last count of items free in caches */
256 u_int64_t uz_oallocs; /* old allocs count */
257 u_int64_t uz_wssize; /* Working set size */
258 int uz_recurse; /* Allocation recursion count */
259 uint16_t uz_fills; /* Outstanding bucket fills */
260 uint16_t uz_count; /* Highest value ub_ptr can have */
261 /*
262 * This HAS to be the last item because we adjust the zone size
263 * based on NCPU and then allocate the space for the zones.
264 */
265 struct uma_cache uz_cpu[1]; /* Per cpu caches */
266};
267
268#define UMA_CACHE_INC 16 /* How much will we move data */
269
270#define UMA_ZFLAG_OFFPAGE 0x0001 /* Struct slab/freelist off page */
271#define UMA_ZFLAG_PRIVALLOC 0x0002 /* Zone has supplied it's own alloc */
272#define UMA_ZFLAG_INTERNAL 0x0004 /* Internal zone, no offpage no PCPU */
273#define UMA_ZFLAG_MALLOC 0x0008 /* Zone created by malloc */
274#define UMA_ZFLAG_NOFREE 0x0010 /* Don't free data from this zone */
275#define UMA_ZFLAG_FULL 0x0020 /* This zone reached uz_maxpages */
276
277/* This lives in uflags */
278#define UMA_ZONE_INTERNAL 0x1000 /* Internal zone for uflags */
279
280/* Internal prototypes */
281static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
282void *uma_large_malloc(int size, int wait);
283void uma_large_free(uma_slab_t slab);
284
285/* Lock Macros */
286
287#define ZONE_LOCK_INIT(z) \
288 mtx_init(&(z)->uz_lock, (z)->uz_name, "UMA zone", \
289 MTX_DEF | MTX_DUPOK)
287#define ZONE_LOCK_INIT(z, lc) \
288 do { \
289 if ((lc)) \
290 mtx_init(&(z)->uz_lock, (z)->uz_name, \
291 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
292 else \
293 mtx_init(&(z)->uz_lock, (z)->uz_name, \
294 "UMA zone", MTX_DEF | MTX_DUPOK); \
295 } while (0)
296
290#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
291#define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock)
292#define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock)
293
297#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
298#define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock)
299#define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock)
300
294#define CPU_LOCK_INIT(z, cpu) \
295 mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, (z)->uz_lname, "UMA cpu", \
296 MTX_DEF | MTX_DUPOK)
301#define CPU_LOCK_INIT(z, cpu, lc) \
302 do { \
303 if ((lc)) \
304 mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, \
305 (z)->uz_lname, (z)->uz_lname, \
306 MTX_DEF | MTX_DUPOK); \
307 else \
308 mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, \
309 (z)->uz_lname, "UMA cpu", \
310 MTX_DEF | MTX_DUPOK); \
311 } while (0)
297
298#define CPU_LOCK_FINI(z, cpu) \
299 mtx_destroy(&(z)->uz_cpu[(cpu)].uc_lock)
300
301#define CPU_LOCK(z, cpu) \
302 mtx_lock(&(z)->uz_cpu[(cpu)].uc_lock)
303
304#define CPU_UNLOCK(z, cpu) \
305 mtx_unlock(&(z)->uz_cpu[(cpu)].uc_lock)
306
307/*
308 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
309 * the slab structure.
310 *
311 * Arguments:
312 * hash The hash table to search.
313 * data The base page of the item.
314 *
315 * Returns:
316 * A pointer to a slab if successful, else NULL.
317 */
318static __inline uma_slab_t
319hash_sfind(struct uma_hash *hash, u_int8_t *data)
320{
321 uma_slab_t slab;
322 int hval;
323
324 hval = UMA_HASH(hash, data);
325
326 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
327 if ((u_int8_t *)slab->us_data == data)
328 return (slab);
329 }
330 return (NULL);
331}
332
333
334#endif /* VM_UMA_INT_H */
312
313#define CPU_LOCK_FINI(z, cpu) \
314 mtx_destroy(&(z)->uz_cpu[(cpu)].uc_lock)
315
316#define CPU_LOCK(z, cpu) \
317 mtx_lock(&(z)->uz_cpu[(cpu)].uc_lock)
318
319#define CPU_UNLOCK(z, cpu) \
320 mtx_unlock(&(z)->uz_cpu[(cpu)].uc_lock)
321
322/*
323 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
324 * the slab structure.
325 *
326 * Arguments:
327 * hash The hash table to search.
328 * data The base page of the item.
329 *
330 * Returns:
331 * A pointer to a slab if successful, else NULL.
332 */
333static __inline uma_slab_t
334hash_sfind(struct uma_hash *hash, u_int8_t *data)
335{
336 uma_slab_t slab;
337 int hval;
338
339 hval = UMA_HASH(hash, data);
340
341 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
342 if ((u_int8_t *)slab->us_data == data)
343 return (slab);
344 }
345 return (NULL);
346}
347
348
349#endif /* VM_UMA_INT_H */