Deleted Added
full compact
uma_core.c (94159) uma_core.c (94161)
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/vm/uma_core.c 94159 2002-04-08 02:42:55Z jeff $
26 * $FreeBSD: head/sys/vm/uma_core.c 94161 2002-04-08 04:48:58Z jeff $
27 *
28 */
29
30/*
31 * uma_core.c Implementation of the Universal Memory allocator
32 *
33 * This allocator is intended to replace the multitude of similar object caches
34 * in the standard FreeBSD kernel. The intent is to be flexible as well as
35 * effecient. A primary design goal is to return unused memory to the rest of
36 * the system. This will make the system as a whole more flexible due to the
37 * ability to move memory to subsystems which most need it instead of leaving
38 * pools of reserved memory unused.
39 *
40 * The basic ideas stem from similar slab/zone based allocators whose algorithms
41 * are well known.
42 *
43 */
44
45/*
46 * TODO:
47 * - Improve memory usage for large allocations
48 * - Improve INVARIANTS (0xdeadc0de write out)
49 * - Investigate cache size adjustments
50 */
51
52/* I should really use ktr.. */
53/*
54#define UMA_DEBUG 1
55#define UMA_DEBUG_ALLOC 1
56#define UMA_DEBUG_ALLOC_1 1
57*/
58
59
60#include "opt_param.h"
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/kernel.h>
64#include <sys/types.h>
65#include <sys/queue.h>
66#include <sys/malloc.h>
67#include <sys/lock.h>
68#include <sys/sysctl.h>
69#include <machine/types.h>
70#include <sys/mutex.h>
71#include <sys/smp.h>
72
73#include <vm/vm.h>
74#include <vm/vm_object.h>
75#include <vm/vm_page.h>
76#include <vm/vm_param.h>
77#include <vm/vm_map.h>
78#include <vm/vm_kern.h>
79#include <vm/vm_extern.h>
80#include <vm/uma.h>
81#include <vm/uma_int.h>
82
83/*
84 * This is the zone from which all zones are spawned. The idea is that even
85 * the zone heads are allocated from the allocator, so we use the bss section
86 * to bootstrap us.
87 */
88static struct uma_zone master_zone;
89static uma_zone_t zones = &master_zone;
90
91/* This is the zone from which all of uma_slab_t's are allocated. */
92static uma_zone_t slabzone;
93
94/*
95 * The initial hash tables come out of this zone so they can be allocated
96 * prior to malloc coming up.
97 */
98static uma_zone_t hashzone;
99
100/*
101 * Zone that buckets come from.
102 */
103static uma_zone_t bucketzone;
104
105/* Linked list of all zones in the system */
106static LIST_HEAD(,uma_zone) uma_zones = LIST_HEAD_INITIALIZER(&uma_zones);
107
108/* This mutex protects the zone list */
109static struct mtx uma_mtx;
110
111/* Linked list of boot time pages */
112static LIST_HEAD(,uma_slab) uma_boot_pages =
113 LIST_HEAD_INITIALIZER(&uma_boot_pages);
114
115/* Count of free boottime pages */
116static int uma_boot_free = 0;
117
118/* Is the VM done starting up? */
119static int booted = 0;
120
121/* This is the handle used to schedule our working set calculator */
122static struct callout uma_callout;
123
124/* This is mp_maxid + 1, for use while looping over each cpu */
125static int maxcpu;
126
127/*
128 * This structure is passed as the zone ctor arg so that I don't have to create
129 * a special allocation function just for zones.
130 */
131struct uma_zctor_args {
132 char *name;
133 int size;
134 uma_ctor ctor;
135 uma_dtor dtor;
136 uma_init uminit;
137 uma_fini fini;
138 int align;
139 u_int16_t flags;
140};
141
142/*
143 * This is the malloc hash table which is used to find the zone that a
144 * malloc allocation came from. It is not currently resizeable. The
145 * memory for the actual hash bucket is allocated in kmeminit.
146 */
147struct uma_hash mhash;
148struct uma_hash *mallochash = &mhash;
149
150/* Prototypes.. */
151
152static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
153static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
154static void page_free(void *, int, u_int8_t);
155static uma_slab_t slab_zalloc(uma_zone_t, int);
156static void cache_drain(uma_zone_t);
157static void bucket_drain(uma_zone_t, uma_bucket_t);
158static void zone_drain(uma_zone_t);
159static void zone_ctor(void *, int, void *);
27 *
28 */
29
30/*
31 * uma_core.c Implementation of the Universal Memory allocator
32 *
33 * This allocator is intended to replace the multitude of similar object caches
34 * in the standard FreeBSD kernel. The intent is to be flexible as well as
35 * effecient. A primary design goal is to return unused memory to the rest of
36 * the system. This will make the system as a whole more flexible due to the
37 * ability to move memory to subsystems which most need it instead of leaving
38 * pools of reserved memory unused.
39 *
40 * The basic ideas stem from similar slab/zone based allocators whose algorithms
41 * are well known.
42 *
43 */
44
45/*
46 * TODO:
47 * - Improve memory usage for large allocations
48 * - Improve INVARIANTS (0xdeadc0de write out)
49 * - Investigate cache size adjustments
50 */
51
52/* I should really use ktr.. */
53/*
54#define UMA_DEBUG 1
55#define UMA_DEBUG_ALLOC 1
56#define UMA_DEBUG_ALLOC_1 1
57*/
58
59
60#include "opt_param.h"
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/kernel.h>
64#include <sys/types.h>
65#include <sys/queue.h>
66#include <sys/malloc.h>
67#include <sys/lock.h>
68#include <sys/sysctl.h>
69#include <machine/types.h>
70#include <sys/mutex.h>
71#include <sys/smp.h>
72
73#include <vm/vm.h>
74#include <vm/vm_object.h>
75#include <vm/vm_page.h>
76#include <vm/vm_param.h>
77#include <vm/vm_map.h>
78#include <vm/vm_kern.h>
79#include <vm/vm_extern.h>
80#include <vm/uma.h>
81#include <vm/uma_int.h>
82
83/*
84 * This is the zone from which all zones are spawned. The idea is that even
85 * the zone heads are allocated from the allocator, so we use the bss section
86 * to bootstrap us.
87 */
88static struct uma_zone master_zone;
89static uma_zone_t zones = &master_zone;
90
91/* This is the zone from which all of uma_slab_t's are allocated. */
92static uma_zone_t slabzone;
93
94/*
95 * The initial hash tables come out of this zone so they can be allocated
96 * prior to malloc coming up.
97 */
98static uma_zone_t hashzone;
99
100/*
101 * Zone that buckets come from.
102 */
103static uma_zone_t bucketzone;
104
105/* Linked list of all zones in the system */
106static LIST_HEAD(,uma_zone) uma_zones = LIST_HEAD_INITIALIZER(&uma_zones);
107
108/* This mutex protects the zone list */
109static struct mtx uma_mtx;
110
111/* Linked list of boot time pages */
112static LIST_HEAD(,uma_slab) uma_boot_pages =
113 LIST_HEAD_INITIALIZER(&uma_boot_pages);
114
115/* Count of free boottime pages */
116static int uma_boot_free = 0;
117
118/* Is the VM done starting up? */
119static int booted = 0;
120
121/* This is the handle used to schedule our working set calculator */
122static struct callout uma_callout;
123
124/* This is mp_maxid + 1, for use while looping over each cpu */
125static int maxcpu;
126
127/*
128 * This structure is passed as the zone ctor arg so that I don't have to create
129 * a special allocation function just for zones.
130 */
131struct uma_zctor_args {
132 char *name;
133 int size;
134 uma_ctor ctor;
135 uma_dtor dtor;
136 uma_init uminit;
137 uma_fini fini;
138 int align;
139 u_int16_t flags;
140};
141
142/*
143 * This is the malloc hash table which is used to find the zone that a
144 * malloc allocation came from. It is not currently resizeable. The
145 * memory for the actual hash bucket is allocated in kmeminit.
146 */
147struct uma_hash mhash;
148struct uma_hash *mallochash = &mhash;
149
150/* Prototypes.. */
151
152static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
153static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
154static void page_free(void *, int, u_int8_t);
155static uma_slab_t slab_zalloc(uma_zone_t, int);
156static void cache_drain(uma_zone_t);
157static void bucket_drain(uma_zone_t, uma_bucket_t);
158static void zone_drain(uma_zone_t);
159static void zone_ctor(void *, int, void *);
160static void zone_dtor(void *, int, void *);
160static void zero_init(void *, int);
161static void zone_small_init(uma_zone_t zone);
162static void zone_large_init(uma_zone_t zone);
163static void zone_foreach(void (*zfunc)(uma_zone_t));
164static void zone_timeout(uma_zone_t zone);
165static void hash_expand(struct uma_hash *);
161static void zero_init(void *, int);
162static void zone_small_init(uma_zone_t zone);
163static void zone_large_init(uma_zone_t zone);
164static void zone_foreach(void (*zfunc)(uma_zone_t));
165static void zone_timeout(uma_zone_t zone);
166static void hash_expand(struct uma_hash *);
167static void hash_free(struct uma_hash *hash);
166static void uma_timeout(void *);
167static void uma_startup3(void);
168static void *uma_zalloc_internal(uma_zone_t, void *, int, uma_bucket_t);
169static void uma_zfree_internal(uma_zone_t,
170 void *, void *, int);
171void uma_print_zone(uma_zone_t);
172void uma_print_stats(void);
173static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
174
175SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
176 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
177SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
178
179
180/*
181 * Routine called by timeout which is used to fire off some time interval
182 * based calculations. (working set, stats, etc.)
183 *
184 * Arguments:
185 * arg Unused
186 *
187 * Returns:
188 * Nothing
189 */
190static void
191uma_timeout(void *unused)
192{
193 zone_foreach(zone_timeout);
194
195 /* Reschedule this event */
196 callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
197}
198
199/*
200 * Routine to perform timeout driven calculations. This does the working set
201 * as well as hash expanding, and per cpu statistics aggregation.
202 *
203 * Arguments:
204 * zone The zone to operate on
205 *
206 * Returns:
207 * Nothing
208 */
209static void
210zone_timeout(uma_zone_t zone)
211{
212 uma_cache_t cache;
213 u_int64_t alloc;
214 int free;
215 int cpu;
216
217 alloc = 0;
218 free = 0;
219
220 /*
221 * Aggregate per cpu cache statistics back to the zone.
222 *
223 * I may rewrite this to set a flag in the per cpu cache instead of
224 * locking. If the flag is not cleared on the next round I will have
225 * to lock and do it here instead so that the statistics don't get too
226 * far out of sync.
227 */
228 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) {
229 for (cpu = 0; cpu < maxcpu; cpu++) {
230 if (CPU_ABSENT(cpu))
231 continue;
232 CPU_LOCK(zone, cpu);
233 cache = &zone->uz_cpu[cpu];
234 /* Add them up, and reset */
235 alloc += cache->uc_allocs;
236 cache->uc_allocs = 0;
237 if (cache->uc_allocbucket)
238 free += cache->uc_allocbucket->ub_ptr + 1;
239 if (cache->uc_freebucket)
240 free += cache->uc_freebucket->ub_ptr + 1;
241 CPU_UNLOCK(zone, cpu);
242 }
243 }
244
245 /* Now push these stats back into the zone.. */
246 ZONE_LOCK(zone);
247 zone->uz_allocs += alloc;
248
249 /*
250 * cachefree is an instantanious snapshot of what is in the per cpu
251 * caches, not an accurate counter
252 */
253 zone->uz_cachefree = free;
254
255 /*
256 * Expand the zone hash table.
257 *
258 * This is done if the number of slabs is larger than the hash size.
259 * What I'm trying to do here is completely reduce collisions. This
260 * may be a little aggressive. Should I allow for two collisions max?
261 */
262
263 if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) &&
264 !(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
265 if (zone->uz_pages / zone->uz_ppera
266 >= zone->uz_hash.uh_hashsize)
267 hash_expand(&zone->uz_hash);
268 }
269
270 /*
271 * Here we compute the working set size as the total number of items
272 * left outstanding since the last time interval. This is slightly
273 * suboptimal. What we really want is the highest number of outstanding
274 * items during the last time quantum. This should be close enough.
275 *
276 * The working set size is used to throttle the zone_drain function.
277 * We don't want to return memory that we may need again immediately.
278 */
279 alloc = zone->uz_allocs - zone->uz_oallocs;
280 zone->uz_oallocs = zone->uz_allocs;
281 zone->uz_wssize = alloc;
282
283 ZONE_UNLOCK(zone);
284}
285
286/*
287 * Expands the hash table for OFFPAGE zones. This is done from zone_timeout
288 * to reduce collisions. This must not be done in the regular allocation path,
289 * otherwise, we can recurse on the vm while allocating pages.
290 *
291 * Arguments:
292 * hash The hash you want to expand by a factor of two.
293 *
294 * Returns:
295 * Nothing
296 *
297 * Discussion:
298 */
299static void
300hash_expand(struct uma_hash *hash)
301{
302 struct slabhead *newhash;
303 struct slabhead *oldhash;
304 uma_slab_t slab;
168static void uma_timeout(void *);
169static void uma_startup3(void);
170static void *uma_zalloc_internal(uma_zone_t, void *, int, uma_bucket_t);
171static void uma_zfree_internal(uma_zone_t,
172 void *, void *, int);
173void uma_print_zone(uma_zone_t);
174void uma_print_stats(void);
175static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
176
177SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
178 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
179SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
180
181
182/*
183 * Routine called by timeout which is used to fire off some time interval
184 * based calculations. (working set, stats, etc.)
185 *
186 * Arguments:
187 * arg Unused
188 *
189 * Returns:
190 * Nothing
191 */
192static void
193uma_timeout(void *unused)
194{
195 zone_foreach(zone_timeout);
196
197 /* Reschedule this event */
198 callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
199}
200
201/*
202 * Routine to perform timeout driven calculations. This does the working set
203 * as well as hash expanding, and per cpu statistics aggregation.
204 *
205 * Arguments:
206 * zone The zone to operate on
207 *
208 * Returns:
209 * Nothing
210 */
211static void
212zone_timeout(uma_zone_t zone)
213{
214 uma_cache_t cache;
215 u_int64_t alloc;
216 int free;
217 int cpu;
218
219 alloc = 0;
220 free = 0;
221
222 /*
223 * Aggregate per cpu cache statistics back to the zone.
224 *
225 * I may rewrite this to set a flag in the per cpu cache instead of
226 * locking. If the flag is not cleared on the next round I will have
227 * to lock and do it here instead so that the statistics don't get too
228 * far out of sync.
229 */
230 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) {
231 for (cpu = 0; cpu < maxcpu; cpu++) {
232 if (CPU_ABSENT(cpu))
233 continue;
234 CPU_LOCK(zone, cpu);
235 cache = &zone->uz_cpu[cpu];
236 /* Add them up, and reset */
237 alloc += cache->uc_allocs;
238 cache->uc_allocs = 0;
239 if (cache->uc_allocbucket)
240 free += cache->uc_allocbucket->ub_ptr + 1;
241 if (cache->uc_freebucket)
242 free += cache->uc_freebucket->ub_ptr + 1;
243 CPU_UNLOCK(zone, cpu);
244 }
245 }
246
247 /* Now push these stats back into the zone.. */
248 ZONE_LOCK(zone);
249 zone->uz_allocs += alloc;
250
251 /*
252 * cachefree is an instantanious snapshot of what is in the per cpu
253 * caches, not an accurate counter
254 */
255 zone->uz_cachefree = free;
256
257 /*
258 * Expand the zone hash table.
259 *
260 * This is done if the number of slabs is larger than the hash size.
261 * What I'm trying to do here is completely reduce collisions. This
262 * may be a little aggressive. Should I allow for two collisions max?
263 */
264
265 if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) &&
266 !(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
267 if (zone->uz_pages / zone->uz_ppera
268 >= zone->uz_hash.uh_hashsize)
269 hash_expand(&zone->uz_hash);
270 }
271
272 /*
273 * Here we compute the working set size as the total number of items
274 * left outstanding since the last time interval. This is slightly
275 * suboptimal. What we really want is the highest number of outstanding
276 * items during the last time quantum. This should be close enough.
277 *
278 * The working set size is used to throttle the zone_drain function.
279 * We don't want to return memory that we may need again immediately.
280 */
281 alloc = zone->uz_allocs - zone->uz_oallocs;
282 zone->uz_oallocs = zone->uz_allocs;
283 zone->uz_wssize = alloc;
284
285 ZONE_UNLOCK(zone);
286}
287
288/*
289 * Expands the hash table for OFFPAGE zones. This is done from zone_timeout
290 * to reduce collisions. This must not be done in the regular allocation path,
291 * otherwise, we can recurse on the vm while allocating pages.
292 *
293 * Arguments:
294 * hash The hash you want to expand by a factor of two.
295 *
296 * Returns:
297 * Nothing
298 *
299 * Discussion:
300 */
301static void
302hash_expand(struct uma_hash *hash)
303{
304 struct slabhead *newhash;
305 struct slabhead *oldhash;
306 uma_slab_t slab;
305 int hzonefree;
306 int hashsize;
307 int oldsize;
308 int newsize;
307 int alloc;
308 int hval;
309 int i;
310
311
312 /*
313 * Remember the old hash size and see if it has to go back to the
314 * hash zone, or malloc. The hash zone is used for the initial hash
315 */
316
309 int alloc;
310 int hval;
311 int i;
312
313
314 /*
315 * Remember the old hash size and see if it has to go back to the
316 * hash zone, or malloc. The hash zone is used for the initial hash
317 */
318
317 hashsize = hash->uh_hashsize;
319 oldsize = hash->uh_hashsize;
318 oldhash = hash->uh_slab_hash;
319
320 oldhash = hash->uh_slab_hash;
321
320 if (hashsize == UMA_HASH_SIZE_INIT)
321 hzonefree = 1;
322 else
323 hzonefree = 0;
324
325
326 /* We're just going to go to a power of two greater */
327 if (hash->uh_hashsize) {
322 /* We're just going to go to a power of two greater */
323 if (hash->uh_hashsize) {
328 alloc = sizeof(hash->uh_slab_hash[0]) * (hash->uh_hashsize * 2);
324 newsize = oldsize * 2;
325 alloc = sizeof(hash->uh_slab_hash[0]) * newsize;
329 /* XXX Shouldn't be abusing DEVBUF here */
330 newhash = (struct slabhead *)malloc(alloc, M_DEVBUF, M_NOWAIT);
331 if (newhash == NULL) {
332 return;
333 }
326 /* XXX Shouldn't be abusing DEVBUF here */
327 newhash = (struct slabhead *)malloc(alloc, M_DEVBUF, M_NOWAIT);
328 if (newhash == NULL) {
329 return;
330 }
334 hash->uh_hashsize *= 2;
335 } else {
336 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
337 newhash = uma_zalloc_internal(hashzone, NULL, M_WAITOK, NULL);
331 } else {
332 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
333 newhash = uma_zalloc_internal(hashzone, NULL, M_WAITOK, NULL);
338 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
334 newsize = UMA_HASH_SIZE_INIT;
339 }
340
341 bzero(newhash, alloc);
342
335 }
336
337 bzero(newhash, alloc);
338
343 hash->uh_hashmask = hash->uh_hashsize - 1;
339 hash->uh_hashmask = newsize - 1;
344
345 /*
346 * I need to investigate hash algorithms for resizing without a
347 * full rehash.
348 */
349
340
341 /*
342 * I need to investigate hash algorithms for resizing without a
343 * full rehash.
344 */
345
350 for (i = 0; i < hashsize; i++)
346 for (i = 0; i < oldsize; i++)
351 while (!SLIST_EMPTY(&hash->uh_slab_hash[i])) {
352 slab = SLIST_FIRST(&hash->uh_slab_hash[i]);
353 SLIST_REMOVE_HEAD(&hash->uh_slab_hash[i], us_hlink);
354 hval = UMA_HASH(hash, slab->us_data);
355 SLIST_INSERT_HEAD(&newhash[hval], slab, us_hlink);
356 }
357
347 while (!SLIST_EMPTY(&hash->uh_slab_hash[i])) {
348 slab = SLIST_FIRST(&hash->uh_slab_hash[i]);
349 SLIST_REMOVE_HEAD(&hash->uh_slab_hash[i], us_hlink);
350 hval = UMA_HASH(hash, slab->us_data);
351 SLIST_INSERT_HEAD(&newhash[hval], slab, us_hlink);
352 }
353
358 if (hash->uh_slab_hash) {
359 if (hzonefree)
360 uma_zfree_internal(hashzone,
361 hash->uh_slab_hash, NULL, 0);
362 else
363 free(hash->uh_slab_hash, M_DEVBUF);
364 }
354 if (oldhash)
355 hash_free(hash);
356
365 hash->uh_slab_hash = newhash;
357 hash->uh_slab_hash = newhash;
358 hash->uh_hashsize = newsize;
366
367 return;
368}
369
359
360 return;
361}
362
363static void
364hash_free(struct uma_hash *hash)
365{
366 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
367 uma_zfree_internal(hashzone,
368 hash->uh_slab_hash, NULL, 0);
369 else
370 free(hash->uh_slab_hash, M_DEVBUF);
371
372 hash->uh_slab_hash = NULL;
373}
374
370/*
371 * Frees all outstanding items in a bucket
372 *
373 * Arguments:
374 * zone The zone to free to, must be unlocked.
375 * bucket The free/alloc bucket with items, cpu queue must be locked.
376 *
377 * Returns:
378 * Nothing
379 */
380
381static void
382bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
383{
384 uma_slab_t slab;
385 int mzone;
386 void *item;
387
388 if (bucket == NULL)
389 return;
390
391 slab = NULL;
392 mzone = 0;
393
394 /* We have to lookup the slab again for malloc.. */
395 if (zone->uz_flags & UMA_ZFLAG_MALLOC)
396 mzone = 1;
397
398 while (bucket->ub_ptr > -1) {
399 item = bucket->ub_bucket[bucket->ub_ptr];
400#ifdef INVARIANTS
401 bucket->ub_bucket[bucket->ub_ptr] = NULL;
402 KASSERT(item != NULL,
403 ("bucket_drain: botched ptr, item is NULL"));
404#endif
405 bucket->ub_ptr--;
406 /*
407 * This is extremely inefficient. The slab pointer was passed
408 * to uma_zfree_arg, but we lost it because the buckets don't
409 * hold them. This will go away when free() gets a size passed
410 * to it.
411 */
412 if (mzone)
413 slab = hash_sfind(mallochash,
414 (u_int8_t *)((unsigned long)item &
415 (~UMA_SLAB_MASK)));
416 uma_zfree_internal(zone, item, slab, 1);
417 }
418}
419
420/*
421 * Drains the per cpu caches for a zone.
422 *
423 * Arguments:
424 * zone The zone to drain, must be unlocked.
425 *
426 * Returns:
427 * Nothing
428 *
429 * This function returns with the zone locked so that the per cpu queues can
430 * not be filled until zone_drain is finished.
431 *
432 */
433static void
434cache_drain(uma_zone_t zone)
435{
436 uma_bucket_t bucket;
437 uma_cache_t cache;
438 int cpu;
439
440 /*
441 * Flush out the per cpu queues.
442 *
443 * XXX This causes unnecessary thrashing due to immediately having
444 * empty per cpu queues. I need to improve this.
445 */
446
447 /*
448 * We have to lock each cpu cache before locking the zone
449 */
450 ZONE_UNLOCK(zone);
451
452 for (cpu = 0; cpu < maxcpu; cpu++) {
453 if (CPU_ABSENT(cpu))
454 continue;
455 CPU_LOCK(zone, cpu);
456 cache = &zone->uz_cpu[cpu];
457 bucket_drain(zone, cache->uc_allocbucket);
458 bucket_drain(zone, cache->uc_freebucket);
459 }
460
461 /*
462 * Drain the bucket queues and free the buckets, we just keep two per
463 * cpu (alloc/free).
464 */
465 ZONE_LOCK(zone);
466 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
467 LIST_REMOVE(bucket, ub_link);
468 ZONE_UNLOCK(zone);
469 bucket_drain(zone, bucket);
470 uma_zfree_internal(bucketzone, bucket, NULL, 0);
471 ZONE_LOCK(zone);
472 }
473
474 /* Now we do the free queue.. */
475 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
476 LIST_REMOVE(bucket, ub_link);
477 uma_zfree_internal(bucketzone, bucket, NULL, 0);
478 }
479
480 /* We unlock here, but they will all block until the zone is unlocked */
481 for (cpu = 0; cpu < maxcpu; cpu++) {
482 if (CPU_ABSENT(cpu))
483 continue;
484 CPU_UNLOCK(zone, cpu);
485 }
486
487 zone->uz_cachefree = 0;
488}
489
490/*
491 * Frees pages from a zone back to the system. This is done on demand from
492 * the pageout daemon.
493 *
494 * Arguments:
495 * zone The zone to free pages from
375/*
376 * Frees all outstanding items in a bucket
377 *
378 * Arguments:
379 * zone The zone to free to, must be unlocked.
380 * bucket The free/alloc bucket with items, cpu queue must be locked.
381 *
382 * Returns:
383 * Nothing
384 */
385
386static void
387bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
388{
389 uma_slab_t slab;
390 int mzone;
391 void *item;
392
393 if (bucket == NULL)
394 return;
395
396 slab = NULL;
397 mzone = 0;
398
399 /* We have to lookup the slab again for malloc.. */
400 if (zone->uz_flags & UMA_ZFLAG_MALLOC)
401 mzone = 1;
402
403 while (bucket->ub_ptr > -1) {
404 item = bucket->ub_bucket[bucket->ub_ptr];
405#ifdef INVARIANTS
406 bucket->ub_bucket[bucket->ub_ptr] = NULL;
407 KASSERT(item != NULL,
408 ("bucket_drain: botched ptr, item is NULL"));
409#endif
410 bucket->ub_ptr--;
411 /*
412 * This is extremely inefficient. The slab pointer was passed
413 * to uma_zfree_arg, but we lost it because the buckets don't
414 * hold them. This will go away when free() gets a size passed
415 * to it.
416 */
417 if (mzone)
418 slab = hash_sfind(mallochash,
419 (u_int8_t *)((unsigned long)item &
420 (~UMA_SLAB_MASK)));
421 uma_zfree_internal(zone, item, slab, 1);
422 }
423}
424
425/*
426 * Drains the per cpu caches for a zone.
427 *
428 * Arguments:
429 * zone The zone to drain, must be unlocked.
430 *
431 * Returns:
432 * Nothing
433 *
434 * This function returns with the zone locked so that the per cpu queues can
435 * not be filled until zone_drain is finished.
436 *
437 */
438static void
439cache_drain(uma_zone_t zone)
440{
441 uma_bucket_t bucket;
442 uma_cache_t cache;
443 int cpu;
444
445 /*
446 * Flush out the per cpu queues.
447 *
448 * XXX This causes unnecessary thrashing due to immediately having
449 * empty per cpu queues. I need to improve this.
450 */
451
452 /*
453 * We have to lock each cpu cache before locking the zone
454 */
455 ZONE_UNLOCK(zone);
456
457 for (cpu = 0; cpu < maxcpu; cpu++) {
458 if (CPU_ABSENT(cpu))
459 continue;
460 CPU_LOCK(zone, cpu);
461 cache = &zone->uz_cpu[cpu];
462 bucket_drain(zone, cache->uc_allocbucket);
463 bucket_drain(zone, cache->uc_freebucket);
464 }
465
466 /*
467 * Drain the bucket queues and free the buckets, we just keep two per
468 * cpu (alloc/free).
469 */
470 ZONE_LOCK(zone);
471 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
472 LIST_REMOVE(bucket, ub_link);
473 ZONE_UNLOCK(zone);
474 bucket_drain(zone, bucket);
475 uma_zfree_internal(bucketzone, bucket, NULL, 0);
476 ZONE_LOCK(zone);
477 }
478
479 /* Now we do the free queue.. */
480 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
481 LIST_REMOVE(bucket, ub_link);
482 uma_zfree_internal(bucketzone, bucket, NULL, 0);
483 }
484
485 /* We unlock here, but they will all block until the zone is unlocked */
486 for (cpu = 0; cpu < maxcpu; cpu++) {
487 if (CPU_ABSENT(cpu))
488 continue;
489 CPU_UNLOCK(zone, cpu);
490 }
491
492 zone->uz_cachefree = 0;
493}
494
495/*
496 * Frees pages from a zone back to the system. This is done on demand from
497 * the pageout daemon.
498 *
499 * Arguments:
500 * zone The zone to free pages from
501 * all Should we drain all items?
496 *
497 * Returns:
498 * Nothing.
499 */
500static void
501zone_drain(uma_zone_t zone)
502{
503 uma_slab_t slab;
504 uma_slab_t n;
505 u_int64_t extra;
506 u_int8_t flags;
507 u_int8_t *mem;
508 int i;
509
510 /*
511 * We don't want to take pages from staticly allocated zones at this
512 * time
513 */
514 if (zone->uz_flags & UMA_ZFLAG_NOFREE || zone->uz_freef == NULL)
515 return;
516
517 ZONE_LOCK(zone);
518
519 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
520 cache_drain(zone);
521
522 if (zone->uz_free < zone->uz_wssize)
523 goto finished;
524#ifdef UMA_DEBUG
525 printf("%s working set size: %llu free items: %u\n",
526 zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free);
527#endif
502 *
503 * Returns:
504 * Nothing.
505 */
506static void
507zone_drain(uma_zone_t zone)
508{
509 uma_slab_t slab;
510 uma_slab_t n;
511 u_int64_t extra;
512 u_int8_t flags;
513 u_int8_t *mem;
514 int i;
515
516 /*
517 * We don't want to take pages from staticly allocated zones at this
518 * time
519 */
520 if (zone->uz_flags & UMA_ZFLAG_NOFREE || zone->uz_freef == NULL)
521 return;
522
523 ZONE_LOCK(zone);
524
525 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
526 cache_drain(zone);
527
528 if (zone->uz_free < zone->uz_wssize)
529 goto finished;
530#ifdef UMA_DEBUG
531 printf("%s working set size: %llu free items: %u\n",
532 zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free);
533#endif
528 extra = zone->uz_wssize - zone->uz_free;
534 extra = zone->uz_free - zone->uz_wssize;
529 extra /= zone->uz_ipers;
530
531 /* extra is now the number of extra slabs that we can free */
532
533 if (extra == 0)
534 goto finished;
535
536 slab = LIST_FIRST(&zone->uz_free_slab);
537 while (slab && extra) {
538 n = LIST_NEXT(slab, us_link);
539
540 /* We have no where to free these to */
541 if (slab->us_flags & UMA_SLAB_BOOT) {
542 slab = n;
543 continue;
544 }
545
546 LIST_REMOVE(slab, us_link);
547 zone->uz_pages -= zone->uz_ppera;
548 zone->uz_free -= zone->uz_ipers;
549 if (zone->uz_fini)
550 for (i = 0; i < zone->uz_ipers; i++)
551 zone->uz_fini(
552 slab->us_data + (zone->uz_rsize * i),
553 zone->uz_size);
554 flags = slab->us_flags;
555 mem = slab->us_data;
556 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
557 if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
558 UMA_HASH_REMOVE(mallochash,
559 slab, slab->us_data);
560 } else {
561 UMA_HASH_REMOVE(&zone->uz_hash,
562 slab, slab->us_data);
563 }
564 uma_zfree_internal(slabzone, slab, NULL, 0);
565 } else if (zone->uz_flags & UMA_ZFLAG_MALLOC)
566 UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
567#ifdef UMA_DEBUG
568 printf("%s: Returning %d bytes.\n",
569 zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
570#endif
571 zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags);
572
573 slab = n;
574 extra--;
575 }
576
577finished:
578 ZONE_UNLOCK(zone);
579}
580
581/*
582 * Allocate a new slab for a zone. This does not insert the slab onto a list.
583 *
584 * Arguments:
585 * zone The zone to allocate slabs for
586 * wait Shall we wait?
587 *
588 * Returns:
589 * The slab that was allocated or NULL if there is no memory and the
590 * caller specified M_NOWAIT.
591 *
592 */
593static uma_slab_t
594slab_zalloc(uma_zone_t zone, int wait)
595{
596 uma_slab_t slab; /* Starting slab */
597 u_int8_t *mem;
598 u_int8_t flags;
599 int i;
600
601 slab = NULL;
602
603#ifdef UMA_DEBUG
604 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name);
605#endif
606 if (zone->uz_maxpages &&
607 zone->uz_pages + zone->uz_ppera > zone->uz_maxpages)
608 return (NULL);
609
610 ZONE_UNLOCK(zone);
611
612 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
613 slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
614 if (slab == NULL) {
615 ZONE_LOCK(zone);
616 return NULL;
617 }
618 }
619
620 if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) {
621 mtx_lock(&Giant);
622 mem = zone->uz_allocf(zone,
623 zone->uz_ppera * UMA_SLAB_SIZE, &flags, wait);
624 mtx_unlock(&Giant);
625 if (mem == NULL) {
626 ZONE_LOCK(zone);
627 return (NULL);
628 }
629 } else {
630 uma_slab_t tmps;
631
632 if (zone->uz_ppera > 1)
633 panic("UMA: Attemping to allocate multiple pages before vm has started.\n");
634 if (zone->uz_flags & UMA_ZFLAG_MALLOC)
635 panic("Mallocing before uma_startup2 has been called.\n");
636 if (uma_boot_free == 0)
637 panic("UMA: Ran out of pre init pages, increase UMA_BOOT_PAGES\n");
638 tmps = LIST_FIRST(&uma_boot_pages);
639 LIST_REMOVE(tmps, us_link);
640 uma_boot_free--;
641 mem = tmps->us_data;
642 }
643
644 ZONE_LOCK(zone);
645
646 /* Alloc slab structure for offpage, otherwise adjust it's position */
647 if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
648 slab = (uma_slab_t )(mem + zone->uz_pgoff);
649 } else {
650 if (!(zone->uz_flags & UMA_ZFLAG_MALLOC))
651 UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
652 }
653 if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
654#ifdef UMA_DEBUG
655 printf("Inserting %p into malloc hash from slab %p\n",
656 mem, slab);
657#endif
658 /* XXX Yikes! No lock on the malloc hash! */
659 UMA_HASH_INSERT(mallochash, slab, mem);
660 }
661
662 slab->us_zone = zone;
663 slab->us_data = mem;
664
665 /*
666 * This is intended to spread data out across cache lines.
667 *
668 * This code doesn't seem to work properly on x86, and on alpha
669 * it makes absolutely no performance difference. I'm sure it could
670 * use some tuning, but sun makes outrageous claims about it's
671 * performance.
672 */
673#if 0
674 if (zone->uz_cachemax) {
675 slab->us_data += zone->uz_cacheoff;
676 zone->uz_cacheoff += UMA_CACHE_INC;
677 if (zone->uz_cacheoff > zone->uz_cachemax)
678 zone->uz_cacheoff = 0;
679 }
680#endif
681
682 slab->us_freecount = zone->uz_ipers;
683 slab->us_firstfree = 0;
684 slab->us_flags = flags;
685 for (i = 0; i < zone->uz_ipers; i++)
686 slab->us_freelist[i] = i+1;
687
688 if (zone->uz_init)
689 for (i = 0; i < zone->uz_ipers; i++)
690 zone->uz_init(slab->us_data + (zone->uz_rsize * i),
691 zone->uz_size);
692
693 zone->uz_pages += zone->uz_ppera;
694 zone->uz_free += zone->uz_ipers;
695
696 return (slab);
697}
698
699/*
700 * Allocates a number of pages from the system
701 *
702 * Arguments:
703 * zone Unused
704 * bytes The number of bytes requested
705 * wait Shall we wait?
706 *
707 * Returns:
708 * A pointer to the alloced memory or possibly
709 * NULL if M_NOWAIT is set.
710 */
711static void *
712page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
713{
714 void *p; /* Returned page */
715
716 /*
717 * XXX The original zone allocator did this, but I don't think it's
718 * necessary in current.
719 */
720
721 if (lockstatus(&kernel_map->lock, NULL)) {
722 *pflag = UMA_SLAB_KMEM;
723 p = (void *) kmem_malloc(kmem_map, bytes, wait);
724 } else {
725 *pflag = UMA_SLAB_KMAP;
726 p = (void *) kmem_alloc(kernel_map, bytes);
727 }
728
729 return (p);
730}
731
732/*
733 * Allocates a number of pages from within an object
734 *
735 * Arguments:
736 * zone Unused
737 * bytes The number of bytes requested
738 * wait Shall we wait?
739 *
740 * Returns:
741 * A pointer to the alloced memory or possibly
742 * NULL if M_NOWAIT is set.
743 */
744static void *
745obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
746{
747 vm_offset_t zkva;
748 vm_offset_t retkva;
749 vm_page_t p;
750 int pages;
751
752 retkva = NULL;
753 pages = zone->uz_pages;
754
755 /*
756 * This looks a little weird since we're getting one page at a time
757 */
758 while (bytes > 0) {
759 p = vm_page_alloc(zone->uz_obj, pages,
760 VM_ALLOC_INTERRUPT);
761 if (p == NULL)
762 return (NULL);
763
764 zkva = zone->uz_kva + pages * PAGE_SIZE;
765 if (retkva == NULL)
766 retkva = zkva;
767 pmap_qenter(zkva, &p, 1);
768 bytes -= PAGE_SIZE;
769 pages += 1;
770 }
771
772 *flags = UMA_SLAB_PRIV;
773
774 return ((void *)retkva);
775}
776
777/*
778 * Frees a number of pages to the system
779 *
780 * Arguments:
781 * mem A pointer to the memory to be freed
782 * size The size of the memory being freed
783 * flags The original p->us_flags field
784 *
785 * Returns:
786 * Nothing
787 *
788 */
789static void
790page_free(void *mem, int size, u_int8_t flags)
791{
792 vm_map_t map;
793 if (flags & UMA_SLAB_KMEM)
794 map = kmem_map;
795 else if (flags & UMA_SLAB_KMAP)
796 map = kernel_map;
797 else
798 panic("UMA: page_free used with invalid flags %d\n", flags);
799
800 kmem_free(map, (vm_offset_t)mem, size);
801}
802
803/*
804 * Zero fill initializer
805 *
806 * Arguments/Returns follow uma_init specifications
807 *
808 */
809static void
810zero_init(void *mem, int size)
811{
812 bzero(mem, size);
813}
814
815/*
816 * Finish creating a small uma zone. This calculates ipers, and the zone size.
817 *
818 * Arguments
819 * zone The zone we should initialize
820 *
821 * Returns
822 * Nothing
823 */
824static void
825zone_small_init(uma_zone_t zone)
826{
827 int rsize;
828 int memused;
829 int ipers;
830
831 rsize = zone->uz_size;
832
833 if (rsize < UMA_SMALLEST_UNIT)
834 rsize = UMA_SMALLEST_UNIT;
835
836 if (rsize & zone->uz_align)
837 rsize = (rsize & ~zone->uz_align) + (zone->uz_align + 1);
838
839 zone->uz_rsize = rsize;
840
841 rsize += 1; /* Account for the byte of linkage */
842 zone->uz_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize;
843 zone->uz_ppera = 1;
844
845 memused = zone->uz_ipers * zone->uz_rsize;
846
847 /* Can we do any better? */
848 if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) {
849 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
850 return;
851 ipers = UMA_SLAB_SIZE / zone->uz_rsize;
852 if (ipers > zone->uz_ipers) {
853 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
854 zone->uz_ipers = ipers;
855 }
856 }
857
858}
859
860/*
861 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do
862 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
863 * more complicated.
864 *
865 * Arguments
866 * zone The zone we should initialize
867 *
868 * Returns
869 * Nothing
870 */
871static void
872zone_large_init(uma_zone_t zone)
873{
874 int pages;
875
876 pages = zone->uz_size / UMA_SLAB_SIZE;
877
878 /* Account for remainder */
879 if ((pages * UMA_SLAB_SIZE) < zone->uz_size)
880 pages++;
881
882 zone->uz_ppera = pages;
883 zone->uz_ipers = 1;
884
885 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
886 zone->uz_rsize = zone->uz_size;
887}
888
889/*
890 * Zone header ctor. This initializes all fields, locks, etc. And inserts
891 * the zone onto the global zone list.
892 *
893 * Arguments/Returns follow uma_ctor specifications
894 * udata Actually uma_zcreat_args
895 *
896 */
897
898static void
899zone_ctor(void *mem, int size, void *udata)
900{
901 struct uma_zctor_args *arg = udata;
902 uma_zone_t zone = mem;
903 int cplen;
904 int cpu;
905
906 bzero(zone, size);
907 zone->uz_name = arg->name;
908 zone->uz_size = arg->size;
909 zone->uz_ctor = arg->ctor;
910 zone->uz_dtor = arg->dtor;
911 zone->uz_init = arg->uminit;
912 zone->uz_align = arg->align;
913 zone->uz_free = 0;
914 zone->uz_pages = 0;
915 zone->uz_flags = 0;
916 zone->uz_allocf = page_alloc;
917 zone->uz_freef = page_free;
918
919 if (arg->flags & UMA_ZONE_ZINIT)
920 zone->uz_init = zero_init;
921
922 if (arg->flags & UMA_ZONE_INTERNAL)
923 zone->uz_flags |= UMA_ZFLAG_INTERNAL;
924
925 if (arg->flags & UMA_ZONE_MALLOC)
926 zone->uz_flags |= UMA_ZFLAG_MALLOC;
927
928 if (arg->flags & UMA_ZONE_NOFREE)
929 zone->uz_flags |= UMA_ZFLAG_NOFREE;
930
931 if (zone->uz_size > UMA_SLAB_SIZE)
932 zone_large_init(zone);
933 else
934 zone_small_init(zone);
935
936 /* We do this so that the per cpu lock name is unique for each zone */
937 memcpy(zone->uz_lname, "PCPU ", 5);
938 cplen = min(strlen(zone->uz_name) + 1, LOCKNAME_LEN - 6);
939 memcpy(zone->uz_lname+5, zone->uz_name, cplen);
940 zone->uz_lname[LOCKNAME_LEN - 1] = '\0';
941
942 /*
943 * If we're putting the slab header in the actual page we need to
944 * figure out where in each page it goes. This calculates a right
945 * justified offset into the memory on a ALIGN_PTR boundary.
946 */
947 if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
948 int totsize;
949 int waste;
950
951 /* Size of the slab struct and free list */
952 totsize = sizeof(struct uma_slab) + zone->uz_ipers;
953 if (totsize & UMA_ALIGN_PTR)
954 totsize = (totsize & ~UMA_ALIGN_PTR) +
955 (UMA_ALIGN_PTR + 1);
956 zone->uz_pgoff = UMA_SLAB_SIZE - totsize;
957
958 waste = zone->uz_pgoff;
959 waste -= (zone->uz_ipers * zone->uz_rsize);
960
961 /*
962 * This calculates how much space we have for cache line size
963 * optimizations. It works by offseting each slab slightly.
964 * Currently it breaks on x86, and so it is disabled.
965 */
966
967 if (zone->uz_align < UMA_CACHE_INC && waste > UMA_CACHE_INC) {
968 zone->uz_cachemax = waste - UMA_CACHE_INC;
969 zone->uz_cacheoff = 0;
970 }
971
972 totsize = zone->uz_pgoff + sizeof(struct uma_slab)
973 + zone->uz_ipers;
974 /* I don't think it's possible, but I'll make sure anyway */
975 if (totsize > UMA_SLAB_SIZE) {
976 printf("zone %s ipers %d rsize %d size %d\n",
977 zone->uz_name, zone->uz_ipers, zone->uz_rsize,
978 zone->uz_size);
979 panic("UMA slab won't fit.\n");
980 }
981 } else {
982 /* hash_expand here to allocate the initial hash table */
983 hash_expand(&zone->uz_hash);
984 zone->uz_pgoff = 0;
985 }
986
987#ifdef UMA_DEBUG
988 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
989 zone->uz_name, zone,
990 zone->uz_size, zone->uz_ipers,
991 zone->uz_ppera, zone->uz_pgoff);
992#endif
993 ZONE_LOCK_INIT(zone);
994
995 mtx_lock(&uma_mtx);
996 LIST_INSERT_HEAD(&uma_zones, zone, uz_link);
997 mtx_unlock(&uma_mtx);
998
999 /*
1000 * Some internal zones don't have room allocated for the per cpu
1001 * caches. If we're internal, bail out here.
1002 */
1003
1004 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1005 return;
1006
1007 if (zone->uz_ipers < UMA_BUCKET_SIZE)
1008 zone->uz_count = zone->uz_ipers - 1;
1009 else
1010 zone->uz_count = UMA_BUCKET_SIZE - 1;
1011
1012 for (cpu = 0; cpu < maxcpu; cpu++)
1013 CPU_LOCK_INIT(zone, cpu);
1014}
1015
535 extra /= zone->uz_ipers;
536
537 /* extra is now the number of extra slabs that we can free */
538
539 if (extra == 0)
540 goto finished;
541
542 slab = LIST_FIRST(&zone->uz_free_slab);
543 while (slab && extra) {
544 n = LIST_NEXT(slab, us_link);
545
546 /* We have no where to free these to */
547 if (slab->us_flags & UMA_SLAB_BOOT) {
548 slab = n;
549 continue;
550 }
551
552 LIST_REMOVE(slab, us_link);
553 zone->uz_pages -= zone->uz_ppera;
554 zone->uz_free -= zone->uz_ipers;
555 if (zone->uz_fini)
556 for (i = 0; i < zone->uz_ipers; i++)
557 zone->uz_fini(
558 slab->us_data + (zone->uz_rsize * i),
559 zone->uz_size);
560 flags = slab->us_flags;
561 mem = slab->us_data;
562 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
563 if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
564 UMA_HASH_REMOVE(mallochash,
565 slab, slab->us_data);
566 } else {
567 UMA_HASH_REMOVE(&zone->uz_hash,
568 slab, slab->us_data);
569 }
570 uma_zfree_internal(slabzone, slab, NULL, 0);
571 } else if (zone->uz_flags & UMA_ZFLAG_MALLOC)
572 UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
573#ifdef UMA_DEBUG
574 printf("%s: Returning %d bytes.\n",
575 zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
576#endif
577 zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags);
578
579 slab = n;
580 extra--;
581 }
582
583finished:
584 ZONE_UNLOCK(zone);
585}
586
587/*
588 * Allocate a new slab for a zone. This does not insert the slab onto a list.
589 *
590 * Arguments:
591 * zone The zone to allocate slabs for
592 * wait Shall we wait?
593 *
594 * Returns:
595 * The slab that was allocated or NULL if there is no memory and the
596 * caller specified M_NOWAIT.
597 *
598 */
599static uma_slab_t
600slab_zalloc(uma_zone_t zone, int wait)
601{
602 uma_slab_t slab; /* Starting slab */
603 u_int8_t *mem;
604 u_int8_t flags;
605 int i;
606
607 slab = NULL;
608
609#ifdef UMA_DEBUG
610 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name);
611#endif
612 if (zone->uz_maxpages &&
613 zone->uz_pages + zone->uz_ppera > zone->uz_maxpages)
614 return (NULL);
615
616 ZONE_UNLOCK(zone);
617
618 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
619 slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
620 if (slab == NULL) {
621 ZONE_LOCK(zone);
622 return NULL;
623 }
624 }
625
626 if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) {
627 mtx_lock(&Giant);
628 mem = zone->uz_allocf(zone,
629 zone->uz_ppera * UMA_SLAB_SIZE, &flags, wait);
630 mtx_unlock(&Giant);
631 if (mem == NULL) {
632 ZONE_LOCK(zone);
633 return (NULL);
634 }
635 } else {
636 uma_slab_t tmps;
637
638 if (zone->uz_ppera > 1)
639 panic("UMA: Attemping to allocate multiple pages before vm has started.\n");
640 if (zone->uz_flags & UMA_ZFLAG_MALLOC)
641 panic("Mallocing before uma_startup2 has been called.\n");
642 if (uma_boot_free == 0)
643 panic("UMA: Ran out of pre init pages, increase UMA_BOOT_PAGES\n");
644 tmps = LIST_FIRST(&uma_boot_pages);
645 LIST_REMOVE(tmps, us_link);
646 uma_boot_free--;
647 mem = tmps->us_data;
648 }
649
650 ZONE_LOCK(zone);
651
652 /* Alloc slab structure for offpage, otherwise adjust it's position */
653 if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
654 slab = (uma_slab_t )(mem + zone->uz_pgoff);
655 } else {
656 if (!(zone->uz_flags & UMA_ZFLAG_MALLOC))
657 UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
658 }
659 if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
660#ifdef UMA_DEBUG
661 printf("Inserting %p into malloc hash from slab %p\n",
662 mem, slab);
663#endif
664 /* XXX Yikes! No lock on the malloc hash! */
665 UMA_HASH_INSERT(mallochash, slab, mem);
666 }
667
668 slab->us_zone = zone;
669 slab->us_data = mem;
670
671 /*
672 * This is intended to spread data out across cache lines.
673 *
674 * This code doesn't seem to work properly on x86, and on alpha
675 * it makes absolutely no performance difference. I'm sure it could
676 * use some tuning, but sun makes outrageous claims about it's
677 * performance.
678 */
679#if 0
680 if (zone->uz_cachemax) {
681 slab->us_data += zone->uz_cacheoff;
682 zone->uz_cacheoff += UMA_CACHE_INC;
683 if (zone->uz_cacheoff > zone->uz_cachemax)
684 zone->uz_cacheoff = 0;
685 }
686#endif
687
688 slab->us_freecount = zone->uz_ipers;
689 slab->us_firstfree = 0;
690 slab->us_flags = flags;
691 for (i = 0; i < zone->uz_ipers; i++)
692 slab->us_freelist[i] = i+1;
693
694 if (zone->uz_init)
695 for (i = 0; i < zone->uz_ipers; i++)
696 zone->uz_init(slab->us_data + (zone->uz_rsize * i),
697 zone->uz_size);
698
699 zone->uz_pages += zone->uz_ppera;
700 zone->uz_free += zone->uz_ipers;
701
702 return (slab);
703}
704
705/*
706 * Allocates a number of pages from the system
707 *
708 * Arguments:
709 * zone Unused
710 * bytes The number of bytes requested
711 * wait Shall we wait?
712 *
713 * Returns:
714 * A pointer to the alloced memory or possibly
715 * NULL if M_NOWAIT is set.
716 */
717static void *
718page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
719{
720 void *p; /* Returned page */
721
722 /*
723 * XXX The original zone allocator did this, but I don't think it's
724 * necessary in current.
725 */
726
727 if (lockstatus(&kernel_map->lock, NULL)) {
728 *pflag = UMA_SLAB_KMEM;
729 p = (void *) kmem_malloc(kmem_map, bytes, wait);
730 } else {
731 *pflag = UMA_SLAB_KMAP;
732 p = (void *) kmem_alloc(kernel_map, bytes);
733 }
734
735 return (p);
736}
737
738/*
739 * Allocates a number of pages from within an object
740 *
741 * Arguments:
742 * zone Unused
743 * bytes The number of bytes requested
744 * wait Shall we wait?
745 *
746 * Returns:
747 * A pointer to the alloced memory or possibly
748 * NULL if M_NOWAIT is set.
749 */
750static void *
751obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
752{
753 vm_offset_t zkva;
754 vm_offset_t retkva;
755 vm_page_t p;
756 int pages;
757
758 retkva = NULL;
759 pages = zone->uz_pages;
760
761 /*
762 * This looks a little weird since we're getting one page at a time
763 */
764 while (bytes > 0) {
765 p = vm_page_alloc(zone->uz_obj, pages,
766 VM_ALLOC_INTERRUPT);
767 if (p == NULL)
768 return (NULL);
769
770 zkva = zone->uz_kva + pages * PAGE_SIZE;
771 if (retkva == NULL)
772 retkva = zkva;
773 pmap_qenter(zkva, &p, 1);
774 bytes -= PAGE_SIZE;
775 pages += 1;
776 }
777
778 *flags = UMA_SLAB_PRIV;
779
780 return ((void *)retkva);
781}
782
783/*
784 * Frees a number of pages to the system
785 *
786 * Arguments:
787 * mem A pointer to the memory to be freed
788 * size The size of the memory being freed
789 * flags The original p->us_flags field
790 *
791 * Returns:
792 * Nothing
793 *
794 */
795static void
796page_free(void *mem, int size, u_int8_t flags)
797{
798 vm_map_t map;
799 if (flags & UMA_SLAB_KMEM)
800 map = kmem_map;
801 else if (flags & UMA_SLAB_KMAP)
802 map = kernel_map;
803 else
804 panic("UMA: page_free used with invalid flags %d\n", flags);
805
806 kmem_free(map, (vm_offset_t)mem, size);
807}
808
809/*
810 * Zero fill initializer
811 *
812 * Arguments/Returns follow uma_init specifications
813 *
814 */
815static void
816zero_init(void *mem, int size)
817{
818 bzero(mem, size);
819}
820
821/*
822 * Finish creating a small uma zone. This calculates ipers, and the zone size.
823 *
824 * Arguments
825 * zone The zone we should initialize
826 *
827 * Returns
828 * Nothing
829 */
830static void
831zone_small_init(uma_zone_t zone)
832{
833 int rsize;
834 int memused;
835 int ipers;
836
837 rsize = zone->uz_size;
838
839 if (rsize < UMA_SMALLEST_UNIT)
840 rsize = UMA_SMALLEST_UNIT;
841
842 if (rsize & zone->uz_align)
843 rsize = (rsize & ~zone->uz_align) + (zone->uz_align + 1);
844
845 zone->uz_rsize = rsize;
846
847 rsize += 1; /* Account for the byte of linkage */
848 zone->uz_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize;
849 zone->uz_ppera = 1;
850
851 memused = zone->uz_ipers * zone->uz_rsize;
852
853 /* Can we do any better? */
854 if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) {
855 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
856 return;
857 ipers = UMA_SLAB_SIZE / zone->uz_rsize;
858 if (ipers > zone->uz_ipers) {
859 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
860 zone->uz_ipers = ipers;
861 }
862 }
863
864}
865
866/*
867 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do
868 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
869 * more complicated.
870 *
871 * Arguments
872 * zone The zone we should initialize
873 *
874 * Returns
875 * Nothing
876 */
877static void
878zone_large_init(uma_zone_t zone)
879{
880 int pages;
881
882 pages = zone->uz_size / UMA_SLAB_SIZE;
883
884 /* Account for remainder */
885 if ((pages * UMA_SLAB_SIZE) < zone->uz_size)
886 pages++;
887
888 zone->uz_ppera = pages;
889 zone->uz_ipers = 1;
890
891 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
892 zone->uz_rsize = zone->uz_size;
893}
894
895/*
896 * Zone header ctor. This initializes all fields, locks, etc. And inserts
897 * the zone onto the global zone list.
898 *
899 * Arguments/Returns follow uma_ctor specifications
900 * udata Actually uma_zcreat_args
901 *
902 */
903
904static void
905zone_ctor(void *mem, int size, void *udata)
906{
907 struct uma_zctor_args *arg = udata;
908 uma_zone_t zone = mem;
909 int cplen;
910 int cpu;
911
912 bzero(zone, size);
913 zone->uz_name = arg->name;
914 zone->uz_size = arg->size;
915 zone->uz_ctor = arg->ctor;
916 zone->uz_dtor = arg->dtor;
917 zone->uz_init = arg->uminit;
918 zone->uz_align = arg->align;
919 zone->uz_free = 0;
920 zone->uz_pages = 0;
921 zone->uz_flags = 0;
922 zone->uz_allocf = page_alloc;
923 zone->uz_freef = page_free;
924
925 if (arg->flags & UMA_ZONE_ZINIT)
926 zone->uz_init = zero_init;
927
928 if (arg->flags & UMA_ZONE_INTERNAL)
929 zone->uz_flags |= UMA_ZFLAG_INTERNAL;
930
931 if (arg->flags & UMA_ZONE_MALLOC)
932 zone->uz_flags |= UMA_ZFLAG_MALLOC;
933
934 if (arg->flags & UMA_ZONE_NOFREE)
935 zone->uz_flags |= UMA_ZFLAG_NOFREE;
936
937 if (zone->uz_size > UMA_SLAB_SIZE)
938 zone_large_init(zone);
939 else
940 zone_small_init(zone);
941
942 /* We do this so that the per cpu lock name is unique for each zone */
943 memcpy(zone->uz_lname, "PCPU ", 5);
944 cplen = min(strlen(zone->uz_name) + 1, LOCKNAME_LEN - 6);
945 memcpy(zone->uz_lname+5, zone->uz_name, cplen);
946 zone->uz_lname[LOCKNAME_LEN - 1] = '\0';
947
948 /*
949 * If we're putting the slab header in the actual page we need to
950 * figure out where in each page it goes. This calculates a right
951 * justified offset into the memory on a ALIGN_PTR boundary.
952 */
953 if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
954 int totsize;
955 int waste;
956
957 /* Size of the slab struct and free list */
958 totsize = sizeof(struct uma_slab) + zone->uz_ipers;
959 if (totsize & UMA_ALIGN_PTR)
960 totsize = (totsize & ~UMA_ALIGN_PTR) +
961 (UMA_ALIGN_PTR + 1);
962 zone->uz_pgoff = UMA_SLAB_SIZE - totsize;
963
964 waste = zone->uz_pgoff;
965 waste -= (zone->uz_ipers * zone->uz_rsize);
966
967 /*
968 * This calculates how much space we have for cache line size
969 * optimizations. It works by offseting each slab slightly.
970 * Currently it breaks on x86, and so it is disabled.
971 */
972
973 if (zone->uz_align < UMA_CACHE_INC && waste > UMA_CACHE_INC) {
974 zone->uz_cachemax = waste - UMA_CACHE_INC;
975 zone->uz_cacheoff = 0;
976 }
977
978 totsize = zone->uz_pgoff + sizeof(struct uma_slab)
979 + zone->uz_ipers;
980 /* I don't think it's possible, but I'll make sure anyway */
981 if (totsize > UMA_SLAB_SIZE) {
982 printf("zone %s ipers %d rsize %d size %d\n",
983 zone->uz_name, zone->uz_ipers, zone->uz_rsize,
984 zone->uz_size);
985 panic("UMA slab won't fit.\n");
986 }
987 } else {
988 /* hash_expand here to allocate the initial hash table */
989 hash_expand(&zone->uz_hash);
990 zone->uz_pgoff = 0;
991 }
992
993#ifdef UMA_DEBUG
994 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
995 zone->uz_name, zone,
996 zone->uz_size, zone->uz_ipers,
997 zone->uz_ppera, zone->uz_pgoff);
998#endif
999 ZONE_LOCK_INIT(zone);
1000
1001 mtx_lock(&uma_mtx);
1002 LIST_INSERT_HEAD(&uma_zones, zone, uz_link);
1003 mtx_unlock(&uma_mtx);
1004
1005 /*
1006 * Some internal zones don't have room allocated for the per cpu
1007 * caches. If we're internal, bail out here.
1008 */
1009
1010 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1011 return;
1012
1013 if (zone->uz_ipers < UMA_BUCKET_SIZE)
1014 zone->uz_count = zone->uz_ipers - 1;
1015 else
1016 zone->uz_count = UMA_BUCKET_SIZE - 1;
1017
1018 for (cpu = 0; cpu < maxcpu; cpu++)
1019 CPU_LOCK_INIT(zone, cpu);
1020}
1021
1022/*
1023 * Zone header dtor. This frees all data, destroys locks, frees the hash table
1024 * and removes the zone from the global list.
1025 *
1026 * Arguments/Returns follow uma_dtor specifications
1027 * udata unused
1028 */
1029
1030static void
1031zone_dtor(void *arg, int size, void *udata)
1032{
1033 uma_zone_t zone;
1034 int cpu;
1035
1036 zone = (uma_zone_t)arg;
1037
1038 mtx_lock(&uma_mtx);
1039 LIST_REMOVE(zone, uz_link);
1040 mtx_unlock(&uma_mtx);
1041
1042 ZONE_LOCK(zone);
1043 zone->uz_wssize = 0;
1044 ZONE_UNLOCK(zone);
1045
1046 zone_drain(zone);
1047 ZONE_LOCK(zone);
1048 if (zone->uz_free != 0)
1049 printf("Zone %s was not empty. Lost %d pages of memory.\n",
1050 zone->uz_name, zone->uz_pages);
1051
1052 if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) != 0)
1053 for (cpu = 0; cpu < maxcpu; cpu++)
1054 CPU_LOCK_FINI(zone, cpu);
1055
1056 if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) != 0)
1057 hash_free(&zone->uz_hash);
1058
1059 ZONE_UNLOCK(zone);
1060 ZONE_LOCK_FINI(zone);
1061}
1016/*
1017 * Traverses every zone in the system and calls a callback
1018 *
1019 * Arguments:
1020 * zfunc A pointer to a function which accepts a zone
1021 * as an argument.
1022 *
1023 * Returns:
1024 * Nothing
1025 */
1026static void
1027zone_foreach(void (*zfunc)(uma_zone_t))
1028{
1029 uma_zone_t zone;
1030
1031 mtx_lock(&uma_mtx);
1032 LIST_FOREACH(zone, &uma_zones, uz_link) {
1033 zfunc(zone);
1034 }
1035 mtx_unlock(&uma_mtx);
1036}
1037
1038/* Public functions */
1039/* See uma.h */
1040void
1041uma_startup(void *bootmem)
1042{
1043 struct uma_zctor_args args;
1044 uma_slab_t slab;
1045 int slabsize;
1046 int i;
1047
1048#ifdef UMA_DEBUG
1049 printf("Creating uma zone headers zone.\n");
1050#endif
1051#ifdef SMP
1052 maxcpu = mp_maxid + 1;
1053#else
1054 maxcpu = 1;
1055#endif
1056#ifdef UMA_DEBUG
1057 printf("Max cpu = %d, mp_maxid = %d\n", maxcpu, mp_maxid);
1058 Debugger("stop");
1059#endif
1060 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1061 /* "manually" Create the initial zone */
1062 args.name = "UMA Zones";
1063 args.size = sizeof(struct uma_zone) +
1064 (sizeof(struct uma_cache) * (maxcpu - 1));
1065 args.ctor = zone_ctor;
1062/*
1063 * Traverses every zone in the system and calls a callback
1064 *
1065 * Arguments:
1066 * zfunc A pointer to a function which accepts a zone
1067 * as an argument.
1068 *
1069 * Returns:
1070 * Nothing
1071 */
1072static void
1073zone_foreach(void (*zfunc)(uma_zone_t))
1074{
1075 uma_zone_t zone;
1076
1077 mtx_lock(&uma_mtx);
1078 LIST_FOREACH(zone, &uma_zones, uz_link) {
1079 zfunc(zone);
1080 }
1081 mtx_unlock(&uma_mtx);
1082}
1083
1084/* Public functions */
1085/* See uma.h */
1086void
1087uma_startup(void *bootmem)
1088{
1089 struct uma_zctor_args args;
1090 uma_slab_t slab;
1091 int slabsize;
1092 int i;
1093
1094#ifdef UMA_DEBUG
1095 printf("Creating uma zone headers zone.\n");
1096#endif
1097#ifdef SMP
1098 maxcpu = mp_maxid + 1;
1099#else
1100 maxcpu = 1;
1101#endif
1102#ifdef UMA_DEBUG
1103 printf("Max cpu = %d, mp_maxid = %d\n", maxcpu, mp_maxid);
1104 Debugger("stop");
1105#endif
1106 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1107 /* "manually" Create the initial zone */
1108 args.name = "UMA Zones";
1109 args.size = sizeof(struct uma_zone) +
1110 (sizeof(struct uma_cache) * (maxcpu - 1));
1111 args.ctor = zone_ctor;
1066 args.dtor = NULL;
1112 args.dtor = zone_dtor;
1067 args.uminit = zero_init;
1068 args.fini = NULL;
1069 args.align = 32 - 1;
1070 args.flags = UMA_ZONE_INTERNAL;
1071 /* The initial zone has no Per cpu queues so it's smaller */
1072 zone_ctor(zones, sizeof(struct uma_zone), &args);
1073
1074#ifdef UMA_DEBUG
1075 printf("Filling boot free list.\n");
1076#endif
1077 for (i = 0; i < UMA_BOOT_PAGES; i++) {
1078 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1079 slab->us_data = (u_int8_t *)slab;
1080 slab->us_flags = UMA_SLAB_BOOT;
1081 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1082 uma_boot_free++;
1083 }
1084
1085#ifdef UMA_DEBUG
1086 printf("Creating slab zone.\n");
1087#endif
1088
1089 /*
1090 * This is the max number of free list items we'll have with
1091 * offpage slabs.
1092 */
1093
1094 slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab);
1095 slabsize /= UMA_MAX_WASTE;
1096 slabsize++; /* In case there it's rounded */
1097 slabsize += sizeof(struct uma_slab);
1098
1099 /* Now make a zone for slab headers */
1100 slabzone = uma_zcreate("UMA Slabs",
1101 slabsize,
1102 NULL, NULL, NULL, NULL,
1103 UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
1104
1105 hashzone = uma_zcreate("UMA Hash",
1106 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1107 NULL, NULL, NULL, NULL,
1108 UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
1109
1110 bucketzone = uma_zcreate("UMA Buckets", sizeof(struct uma_bucket),
1111 NULL, NULL, NULL, NULL,
1112 UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
1113
1114
1115#ifdef UMA_DEBUG
1116 printf("UMA startup complete.\n");
1117#endif
1118}
1119
1120/* see uma.h */
1121void
1122uma_startup2(void *hashmem, u_long elems)
1123{
1124 bzero(hashmem, elems * sizeof(void *));
1125 mallochash->uh_slab_hash = hashmem;
1126 mallochash->uh_hashsize = elems;
1127 mallochash->uh_hashmask = elems - 1;
1128 booted = 1;
1129#ifdef UMA_DEBUG
1130 printf("UMA startup2 complete.\n");
1131#endif
1132}
1133
1134/*
1135 * Initialize our callout handle
1136 *
1137 */
1138
1139static void
1140uma_startup3(void)
1141{
1142#ifdef UMA_DEBUG
1143 printf("Starting callout.\n");
1144#endif
1145 /* We'll be mpsafe once the vm is locked. */
1146 callout_init(&uma_callout, 0);
1147 callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
1148#ifdef UMA_DEBUG
1149 printf("UMA startup3 complete.\n");
1150#endif
1151}
1152
1153/* See uma.h */
1154uma_zone_t
1155uma_zcreate(char *name, int size, uma_ctor ctor, uma_dtor dtor, uma_init uminit,
1156 uma_fini fini, int align, u_int16_t flags)
1157
1158{
1159 struct uma_zctor_args args;
1160
1161 /* This stuff is essential for the zone ctor */
1162 args.name = name;
1163 args.size = size;
1164 args.ctor = ctor;
1165 args.dtor = dtor;
1166 args.uminit = uminit;
1167 args.fini = fini;
1168 args.align = align;
1169 args.flags = flags;
1170
1171 return (uma_zalloc_internal(zones, &args, M_WAITOK, NULL));
1172}
1173
1174/* See uma.h */
1113 args.uminit = zero_init;
1114 args.fini = NULL;
1115 args.align = 32 - 1;
1116 args.flags = UMA_ZONE_INTERNAL;
1117 /* The initial zone has no Per cpu queues so it's smaller */
1118 zone_ctor(zones, sizeof(struct uma_zone), &args);
1119
1120#ifdef UMA_DEBUG
1121 printf("Filling boot free list.\n");
1122#endif
1123 for (i = 0; i < UMA_BOOT_PAGES; i++) {
1124 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1125 slab->us_data = (u_int8_t *)slab;
1126 slab->us_flags = UMA_SLAB_BOOT;
1127 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1128 uma_boot_free++;
1129 }
1130
1131#ifdef UMA_DEBUG
1132 printf("Creating slab zone.\n");
1133#endif
1134
1135 /*
1136 * This is the max number of free list items we'll have with
1137 * offpage slabs.
1138 */
1139
1140 slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab);
1141 slabsize /= UMA_MAX_WASTE;
1142 slabsize++; /* In case there it's rounded */
1143 slabsize += sizeof(struct uma_slab);
1144
1145 /* Now make a zone for slab headers */
1146 slabzone = uma_zcreate("UMA Slabs",
1147 slabsize,
1148 NULL, NULL, NULL, NULL,
1149 UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
1150
1151 hashzone = uma_zcreate("UMA Hash",
1152 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1153 NULL, NULL, NULL, NULL,
1154 UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
1155
1156 bucketzone = uma_zcreate("UMA Buckets", sizeof(struct uma_bucket),
1157 NULL, NULL, NULL, NULL,
1158 UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
1159
1160
1161#ifdef UMA_DEBUG
1162 printf("UMA startup complete.\n");
1163#endif
1164}
1165
1166/* see uma.h */
1167void
1168uma_startup2(void *hashmem, u_long elems)
1169{
1170 bzero(hashmem, elems * sizeof(void *));
1171 mallochash->uh_slab_hash = hashmem;
1172 mallochash->uh_hashsize = elems;
1173 mallochash->uh_hashmask = elems - 1;
1174 booted = 1;
1175#ifdef UMA_DEBUG
1176 printf("UMA startup2 complete.\n");
1177#endif
1178}
1179
1180/*
1181 * Initialize our callout handle
1182 *
1183 */
1184
1185static void
1186uma_startup3(void)
1187{
1188#ifdef UMA_DEBUG
1189 printf("Starting callout.\n");
1190#endif
1191 /* We'll be mpsafe once the vm is locked. */
1192 callout_init(&uma_callout, 0);
1193 callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
1194#ifdef UMA_DEBUG
1195 printf("UMA startup3 complete.\n");
1196#endif
1197}
1198
1199/* See uma.h */
1200uma_zone_t
1201uma_zcreate(char *name, int size, uma_ctor ctor, uma_dtor dtor, uma_init uminit,
1202 uma_fini fini, int align, u_int16_t flags)
1203
1204{
1205 struct uma_zctor_args args;
1206
1207 /* This stuff is essential for the zone ctor */
1208 args.name = name;
1209 args.size = size;
1210 args.ctor = ctor;
1211 args.dtor = dtor;
1212 args.uminit = uminit;
1213 args.fini = fini;
1214 args.align = align;
1215 args.flags = flags;
1216
1217 return (uma_zalloc_internal(zones, &args, M_WAITOK, NULL));
1218}
1219
1220/* See uma.h */
1221void
1222uma_zdestroy(uma_zone_t zone)
1223{
1224 uma_zfree_internal(zones, zone, NULL, 0);
1225}
1226
1227/* See uma.h */
1175void *
1176uma_zalloc_arg(uma_zone_t zone, void *udata, int wait)
1177{
1178 void *item;
1179 uma_cache_t cache;
1180 uma_bucket_t bucket;
1181 int cpu;
1182
1183 /* This is the fast path allocation */
1184#ifdef UMA_DEBUG_ALLOC_1
1185 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1186#endif
1187
1188zalloc_restart:
1189 cpu = PCPU_GET(cpuid);
1190 CPU_LOCK(zone, cpu);
1191 cache = &zone->uz_cpu[cpu];
1192
1193zalloc_start:
1194 bucket = cache->uc_allocbucket;
1195
1196 if (bucket) {
1197 if (bucket->ub_ptr > -1) {
1198 item = bucket->ub_bucket[bucket->ub_ptr];
1199#ifdef INVARIANTS
1200 bucket->ub_bucket[bucket->ub_ptr] = NULL;
1201#endif
1202 bucket->ub_ptr--;
1203 KASSERT(item != NULL,
1204 ("uma_zalloc: Bucket pointer mangled."));
1205 cache->uc_allocs++;
1206 CPU_UNLOCK(zone, cpu);
1207 if (zone->uz_ctor)
1208 zone->uz_ctor(item, zone->uz_size, udata);
1209 return (item);
1210 } else if (cache->uc_freebucket) {
1211 /*
1212 * We have run out of items in our allocbucket.
1213 * See if we can switch with our free bucket.
1214 */
1215 if (cache->uc_freebucket->ub_ptr > -1) {
1216 uma_bucket_t swap;
1217
1218#ifdef UMA_DEBUG_ALLOC
1219 printf("uma_zalloc: Swapping empty with alloc.\n");
1220#endif
1221 swap = cache->uc_freebucket;
1222 cache->uc_freebucket = cache->uc_allocbucket;
1223 cache->uc_allocbucket = swap;
1224
1225 goto zalloc_start;
1226 }
1227 }
1228 }
1229 ZONE_LOCK(zone);
1230 /* Since we have locked the zone we may as well send back our stats */
1231 zone->uz_allocs += cache->uc_allocs;
1232 cache->uc_allocs = 0;
1233
1234 /* Our old one is now a free bucket */
1235 if (cache->uc_allocbucket) {
1236 KASSERT(cache->uc_allocbucket->ub_ptr == -1,
1237 ("uma_zalloc_arg: Freeing a non free bucket."));
1238 LIST_INSERT_HEAD(&zone->uz_free_bucket,
1239 cache->uc_allocbucket, ub_link);
1240 cache->uc_allocbucket = NULL;
1241 }
1242
1243 /* Check the free list for a new alloc bucket */
1244 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1245 KASSERT(bucket->ub_ptr != -1,
1246 ("uma_zalloc_arg: Returning an empty bucket."));
1247
1248 LIST_REMOVE(bucket, ub_link);
1249 cache->uc_allocbucket = bucket;
1250 ZONE_UNLOCK(zone);
1251 goto zalloc_start;
1252 }
1253 /* Bump up our uz_count so we get here less */
1254 if (zone->uz_count < UMA_BUCKET_SIZE - 1)
1255 zone->uz_count++;
1256
1257 /* We are no longer associated with this cpu!!! */
1258 CPU_UNLOCK(zone, cpu);
1259
1260 /*
1261 * Now lets just fill a bucket and put it on the free list. If that
1262 * works we'll restart the allocation from the begining.
1263 *
1264 * Try this zone's free list first so we don't allocate extra buckets.
1265 */
1266
1267 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL)
1268 LIST_REMOVE(bucket, ub_link);
1269
1270 /* Now we no longer need the zone lock. */
1271 ZONE_UNLOCK(zone);
1272
1273 if (bucket == NULL)
1274 bucket = uma_zalloc_internal(bucketzone,
1275 NULL, wait, NULL);
1276
1277 if (bucket != NULL) {
1278#ifdef INVARIANTS
1279 bzero(bucket, bucketzone->uz_size);
1280#endif
1281 bucket->ub_ptr = -1;
1282
1283 if (uma_zalloc_internal(zone, udata, wait, bucket))
1284 goto zalloc_restart;
1285 else
1286 uma_zfree_internal(bucketzone, bucket, NULL, 0);
1287 }
1288 /*
1289 * We may not get a bucket if we recurse, so
1290 * return an actual item.
1291 */
1292#ifdef UMA_DEBUG
1293 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1294#endif
1295
1296 return (uma_zalloc_internal(zone, udata, wait, NULL));
1297}
1298
1299/*
1300 * Allocates an item for an internal zone OR fills a bucket
1301 *
1302 * Arguments
1303 * zone The zone to alloc for.
1304 * udata The data to be passed to the constructor.
1305 * wait M_WAITOK or M_NOWAIT.
1306 * bucket The bucket to fill or NULL
1307 *
1308 * Returns
1309 * NULL if there is no memory and M_NOWAIT is set
1310 * An item if called on an interal zone
1311 * Non NULL if called to fill a bucket and it was successful.
1312 *
1313 * Discussion:
1314 * This was much cleaner before it had to do per cpu caches. It is
1315 * complicated now because it has to handle the simple internal case, and
1316 * the more involved bucket filling and allocation.
1317 */
1318
1319static void *
1320uma_zalloc_internal(uma_zone_t zone, void *udata, int wait, uma_bucket_t bucket)
1321{
1322 uma_slab_t slab;
1323 u_int8_t freei;
1324 void *item;
1325
1326 item = NULL;
1327
1328 /*
1329 * This is to stop us from allocating per cpu buckets while we're
1330 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the
1331 * boot pages.
1332 */
1333
1334 if (!booted && zone == bucketzone)
1335 return (NULL);
1336
1337#ifdef UMA_DEBUG_ALLOC
1338 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
1339#endif
1340 ZONE_LOCK(zone);
1341
1342 /*
1343 * This code is here to limit the number of simultaneous bucket fills
1344 * for any given zone to the number of per cpu caches in this zone. This
1345 * is done so that we don't allocate more memory than we really need.
1346 */
1347
1348 if (bucket) {
1349#ifdef SMP
1350 if (zone->uz_fills >= mp_ncpus)
1351#else
1352 if (zone->uz_fills > 1)
1353#endif
1354 return (NULL);
1355
1356 zone->uz_fills++;
1357 }
1358
1359new_slab:
1360
1361 /* Find a slab with some space */
1362 if (zone->uz_free) {
1363 if (!LIST_EMPTY(&zone->uz_part_slab)) {
1364 slab = LIST_FIRST(&zone->uz_part_slab);
1365 } else {
1366 slab = LIST_FIRST(&zone->uz_free_slab);
1367 LIST_REMOVE(slab, us_link);
1368 LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1369 }
1370 } else {
1371 /*
1372 * This is to prevent us from recursively trying to allocate
1373 * buckets. The problem is that if an allocation forces us to
1374 * grab a new bucket we will call page_alloc, which will go off
1375 * and cause the vm to allocate vm_map_entries. If we need new
1376 * buckets there too we will recurse in kmem_alloc and bad
1377 * things happen. So instead we return a NULL bucket, and make
1378 * the code that allocates buckets smart enough to deal with it */
1379 if (zone == bucketzone && zone->uz_recurse != 0) {
1380 ZONE_UNLOCK(zone);
1381 return (NULL);
1382 }
1383 zone->uz_recurse++;
1384 slab = slab_zalloc(zone, wait);
1385 zone->uz_recurse--;
1386 if (slab) {
1387 LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1388 /*
1389 * We might not have been able to get a page, but another cpu
1390 * could have while we were unlocked.
1391 */
1392 } else if (zone->uz_free == 0) {
1393 /* If we're filling a bucket return what we have */
1394 if (bucket != NULL)
1395 zone->uz_fills--;
1396 ZONE_UNLOCK(zone);
1397
1398 if (bucket != NULL && bucket->ub_ptr != -1)
1399 return (bucket);
1400 else
1401 return (NULL);
1402 } else {
1403 /* Another cpu must have succeeded */
1404 if ((slab = LIST_FIRST(&zone->uz_part_slab)) == NULL) {
1405 slab = LIST_FIRST(&zone->uz_free_slab);
1406 LIST_REMOVE(slab, us_link);
1407 LIST_INSERT_HEAD(&zone->uz_part_slab,
1408 slab, us_link);
1409 }
1410 }
1411 }
1412 /*
1413 * If this is our first time though put this guy on the list.
1414 */
1415 if (bucket != NULL && bucket->ub_ptr == -1)
1416 LIST_INSERT_HEAD(&zone->uz_full_bucket,
1417 bucket, ub_link);
1418
1419
1420 while (slab->us_freecount) {
1421 freei = slab->us_firstfree;
1422 slab->us_firstfree = slab->us_freelist[freei];
1423#ifdef INVARIANTS
1424 slab->us_freelist[freei] = 255;
1425#endif
1426 slab->us_freecount--;
1427 zone->uz_free--;
1428 item = slab->us_data + (zone->uz_rsize * freei);
1429
1430 if (bucket == NULL) {
1431 zone->uz_allocs++;
1432 break;
1433 }
1434 bucket->ub_bucket[++bucket->ub_ptr] = item;
1435
1436 /* Don't overfill the bucket! */
1437 if (bucket->ub_ptr == zone->uz_count)
1438 break;
1439 }
1440
1441 /* Move this slab to the full list */
1442 if (slab->us_freecount == 0) {
1443 LIST_REMOVE(slab, us_link);
1444 LIST_INSERT_HEAD(&zone->uz_full_slab, slab, us_link);
1445 }
1446
1447 if (bucket != NULL) {
1448 /* Try to keep the buckets totally full, but don't block */
1449 if (bucket->ub_ptr < zone->uz_count) {
1450 wait = M_NOWAIT;
1451 goto new_slab;
1452 } else
1453 zone->uz_fills--;
1454 }
1455
1456 ZONE_UNLOCK(zone);
1457
1458 /* Only construct at this time if we're not filling a bucket */
1459 if (bucket == NULL && zone->uz_ctor != NULL)
1460 zone->uz_ctor(item, zone->uz_size, udata);
1461
1462 return (item);
1463}
1464
1465/* See uma.h */
1466void
1467uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
1468{
1469 uma_cache_t cache;
1470 uma_bucket_t bucket;
1471 int cpu;
1472
1473 /* This is the fast path free */
1474#ifdef UMA_DEBUG_ALLOC_1
1475 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
1476#endif
1477zfree_restart:
1478 cpu = PCPU_GET(cpuid);
1479 CPU_LOCK(zone, cpu);
1480 cache = &zone->uz_cpu[cpu];
1481
1482zfree_start:
1483 bucket = cache->uc_freebucket;
1484
1485 if (bucket) {
1486 /*
1487 * Do we have room in our bucket? It is OK for this uz count
1488 * check to be slightly out of sync.
1489 */
1490
1491 if (bucket->ub_ptr < zone->uz_count) {
1492 bucket->ub_ptr++;
1493 KASSERT(bucket->ub_bucket[bucket->ub_ptr] == NULL,
1494 ("uma_zfree: Freeing to non free bucket index."));
1495 bucket->ub_bucket[bucket->ub_ptr] = item;
1496 CPU_UNLOCK(zone, cpu);
1497 if (zone->uz_dtor)
1498 zone->uz_dtor(item, zone->uz_size, udata);
1499 return;
1500 } else if (cache->uc_allocbucket) {
1501#ifdef UMA_DEBUG_ALLOC
1502 printf("uma_zfree: Swapping buckets.\n");
1503#endif
1504 /*
1505 * We have run out of space in our freebucket.
1506 * See if we can switch with our alloc bucket.
1507 */
1508 if (cache->uc_allocbucket->ub_ptr <
1509 cache->uc_freebucket->ub_ptr) {
1510 uma_bucket_t swap;
1511
1512 swap = cache->uc_freebucket;
1513 cache->uc_freebucket = cache->uc_allocbucket;
1514 cache->uc_allocbucket = swap;
1515
1516 goto zfree_start;
1517 }
1518 }
1519 }
1520
1521 /*
1522 * We can get here for two reasons:
1523 *
1524 * 1) The buckets are NULL
1525 * 2) The alloc and free buckets are both somewhat full.
1526 *
1527 */
1528
1529 ZONE_LOCK(zone);
1530
1531 bucket = cache->uc_freebucket;
1532 cache->uc_freebucket = NULL;
1533
1534 /* Can we throw this on the zone full list? */
1535 if (bucket != NULL) {
1536#ifdef UMA_DEBUG_ALLOC
1537 printf("uma_zfree: Putting old bucket on the free list.\n");
1538#endif
1539 /* ub_ptr is pointing to the last free item */
1540 KASSERT(bucket->ub_ptr != -1,
1541 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
1542 LIST_INSERT_HEAD(&zone->uz_full_bucket,
1543 bucket, ub_link);
1544 }
1545 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
1546 LIST_REMOVE(bucket, ub_link);
1547 ZONE_UNLOCK(zone);
1548 cache->uc_freebucket = bucket;
1549 goto zfree_start;
1550 }
1551 /* We're done with this CPU now */
1552 CPU_UNLOCK(zone, cpu);
1553
1554 /* And the zone.. */
1555 ZONE_UNLOCK(zone);
1556
1557#ifdef UMA_DEBUG_ALLOC
1558 printf("uma_zfree: Allocating new free bucket.\n");
1559#endif
1560 bucket = uma_zalloc_internal(bucketzone,
1561 NULL, M_NOWAIT, NULL);
1562 if (bucket) {
1563#ifdef INVARIANTS
1564 bzero(bucket, bucketzone->uz_size);
1565#endif
1566 bucket->ub_ptr = -1;
1567 ZONE_LOCK(zone);
1568 LIST_INSERT_HEAD(&zone->uz_free_bucket,
1569 bucket, ub_link);
1570 ZONE_UNLOCK(zone);
1571 goto zfree_restart;
1572 }
1573
1574 /*
1575 * If nothing else caught this, we'll just do an internal free.
1576 */
1577
1578 uma_zfree_internal(zone, item, udata, 0);
1579
1580 return;
1581
1582}
1583
1584/*
1585 * Frees an item to an INTERNAL zone or allocates a free bucket
1586 *
1587 * Arguments:
1588 * zone The zone to free to
1589 * item The item we're freeing
1590 * udata User supplied data for the dtor
1591 * skip Skip the dtor, it was done in uma_zfree_arg
1592 */
1593
1594static void
1595uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
1596{
1597 uma_slab_t slab;
1598 u_int8_t *mem;
1599 u_int8_t freei;
1600
1601 ZONE_LOCK(zone);
1602
1603 if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
1604 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
1605 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
1606 slab = hash_sfind(&zone->uz_hash, mem);
1607 else {
1608 mem += zone->uz_pgoff;
1609 slab = (uma_slab_t)mem;
1610 }
1611 } else {
1612 slab = (uma_slab_t)udata;
1613 }
1614
1615 /* Do we need to remove from any lists? */
1616 if (slab->us_freecount+1 == zone->uz_ipers) {
1617 LIST_REMOVE(slab, us_link);
1618 LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
1619 } else if (slab->us_freecount == 0) {
1620 LIST_REMOVE(slab, us_link);
1621 LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1622 }
1623
1624 /* Slab management stuff */
1625 freei = ((unsigned long)item - (unsigned long)slab->us_data)
1626 / zone->uz_rsize;
1627#ifdef INVARIANTS
1628 if (((freei * zone->uz_rsize) + slab->us_data) != item)
1629 panic("zone: %s(%p) slab %p freed address %p unaligned.\n",
1630 zone->uz_name, zone, slab, item);
1631 if (freei >= zone->uz_ipers)
1632 panic("zone: %s(%p) slab %p freelist %i out of range 0-%d\n",
1633 zone->uz_name, zone, slab, freei, zone->uz_ipers-1);
1634
1635 if (slab->us_freelist[freei] != 255) {
1636 printf("Slab at %p, freei %d = %d.\n",
1637 slab, freei, slab->us_freelist[freei]);
1638 panic("Duplicate free of item %p from zone %p(%s)\n",
1639 item, zone, zone->uz_name);
1640 }
1641#endif
1642 slab->us_freelist[freei] = slab->us_firstfree;
1643 slab->us_firstfree = freei;
1644 slab->us_freecount++;
1645
1646 /* Zone statistics */
1647 zone->uz_free++;
1648
1649 ZONE_UNLOCK(zone);
1650
1651 if (!skip && zone->uz_dtor)
1652 zone->uz_dtor(item, zone->uz_size, udata);
1653}
1654
1655/* See uma.h */
1656void
1657uma_zone_set_max(uma_zone_t zone, int nitems)
1658{
1659 ZONE_LOCK(zone);
1660 if (zone->uz_ppera > 1)
1661 zone->uz_maxpages = nitems / zone->uz_ppera;
1662 else
1663 zone->uz_maxpages = nitems / zone->uz_ipers;
1664 ZONE_UNLOCK(zone);
1665}
1666
1667/* See uma.h */
1668void
1669uma_zone_set_freef(uma_zone_t zone, uma_free freef)
1670{
1671 ZONE_LOCK(zone);
1672
1673 zone->uz_freef = freef;
1674
1675 ZONE_UNLOCK(zone);
1676}
1677
1678/* See uma.h */
1679void
1680uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
1681{
1682 ZONE_LOCK(zone);
1683
1684 zone->uz_flags |= UMA_ZFLAG_PRIVALLOC;
1685 zone->uz_allocf = allocf;
1686
1687 ZONE_UNLOCK(zone);
1688}
1689
1690/* See uma.h */
1691int
1692uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
1693{
1694 int pages;
1695 vm_offset_t kva;
1696
1697 mtx_lock(&Giant);
1698
1699 pages = count / zone->uz_ipers;
1700
1701 if (pages * zone->uz_ipers < count)
1702 pages++;
1703
1704 kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE);
1705
1706 if (kva == 0) {
1707 mtx_unlock(&Giant);
1708 return (0);
1709 }
1710
1711
1712 if (obj == NULL)
1713 obj = vm_object_allocate(OBJT_DEFAULT,
1714 zone->uz_maxpages);
1715 else
1716 _vm_object_allocate(OBJT_DEFAULT,
1717 zone->uz_maxpages, obj);
1718
1719 ZONE_LOCK(zone);
1720 zone->uz_kva = kva;
1721 zone->uz_obj = obj;
1722 zone->uz_maxpages = pages;
1723
1724 zone->uz_allocf = obj_alloc;
1725 zone->uz_flags |= UMA_ZFLAG_NOFREE | UMA_ZFLAG_PRIVALLOC;
1726
1727 ZONE_UNLOCK(zone);
1728 mtx_unlock(&Giant);
1729
1730 return (1);
1731}
1732
1733/* See uma.h */
1734void
1735uma_prealloc(uma_zone_t zone, int items)
1736{
1737 int slabs;
1738 uma_slab_t slab;
1739
1740 ZONE_LOCK(zone);
1741 slabs = items / zone->uz_ipers;
1742 if (slabs * zone->uz_ipers < items)
1743 slabs++;
1744
1745 while (slabs > 0) {
1746 slab = slab_zalloc(zone, M_WAITOK);
1747 LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
1748 slabs--;
1749 }
1750 ZONE_UNLOCK(zone);
1751}
1752
1753/* See uma.h */
1754void
1755uma_reclaim(void)
1756{
1757 /*
1758 * You might think that the delay below would improve performance since
1759 * the allocator will give away memory that it may ask for immediately.
1760 * Really, it makes things worse, since cpu cycles are so much cheaper
1761 * than disk activity.
1762 */
1763#if 0
1764 static struct timeval tv = {0};
1765 struct timeval now;
1766 getmicrouptime(&now);
1767 if (now.tv_sec > tv.tv_sec + 30)
1768 tv = now;
1769 else
1770 return;
1771#endif
1772#ifdef UMA_DEBUG
1773 printf("UMA: vm asked us to release pages!\n");
1774#endif
1775 zone_foreach(zone_drain);
1776
1777 /*
1778 * Some slabs may have been freed but this zone will be visited early
1779 * we visit again so that we can free pages that are empty once other
1780 * zones are drained. We have to do the same for buckets.
1781 */
1782 zone_drain(slabzone);
1783 zone_drain(bucketzone);
1784}
1785
1786void *
1787uma_large_malloc(int size, int wait)
1788{
1789 void *mem;
1790 uma_slab_t slab;
1791 u_int8_t flags;
1792
1793 slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
1794 if (slab == NULL)
1795 return (NULL);
1796
1797 mem = page_alloc(NULL, size, &flags, wait);
1798 if (mem) {
1799 slab->us_data = mem;
1800 slab->us_flags = flags | UMA_SLAB_MALLOC;
1801 slab->us_size = size;
1802 UMA_HASH_INSERT(mallochash, slab, mem);
1803 } else {
1804 uma_zfree_internal(slabzone, slab, NULL, 0);
1805 }
1806
1807
1808 return (mem);
1809}
1810
1811void
1812uma_large_free(uma_slab_t slab)
1813{
1814 UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
1815 page_free(slab->us_data, slab->us_size, slab->us_flags);
1816 uma_zfree_internal(slabzone, slab, NULL, 0);
1817}
1818
1819void
1820uma_print_stats(void)
1821{
1822 zone_foreach(uma_print_zone);
1823}
1824
1825void
1826uma_print_zone(uma_zone_t zone)
1827{
1828 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
1829 zone->uz_name, zone, zone->uz_size, zone->uz_rsize, zone->uz_flags,
1830 zone->uz_ipers, zone->uz_ppera,
1831 (zone->uz_ipers * zone->uz_pages) - zone->uz_free, zone->uz_free);
1832}
1833
1834/*
1835 * Sysctl handler for vm.zone
1836 *
1837 * stolen from vm_zone.c
1838 */
1839static int
1840sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
1841{
1842 int error, len, cnt;
1843 const int linesize = 128; /* conservative */
1844 int totalfree;
1845 char *tmpbuf, *offset;
1846 uma_zone_t z;
1847 char *p;
1848
1849 cnt = 0;
1850 LIST_FOREACH(z, &uma_zones, uz_link)
1851 cnt++;
1852 MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
1853 M_TEMP, M_WAITOK);
1854 len = snprintf(tmpbuf, linesize,
1855 "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n");
1856 if (cnt == 0)
1857 tmpbuf[len - 1] = '\0';
1858 error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
1859 if (error || cnt == 0)
1860 goto out;
1861 offset = tmpbuf;
1862 mtx_lock(&uma_mtx);
1863 LIST_FOREACH(z, &uma_zones, uz_link) {
1864 if (cnt == 0) /* list may have changed size */
1865 break;
1866 ZONE_LOCK(z);
1867 totalfree = z->uz_free + z->uz_cachefree;
1868 len = snprintf(offset, linesize,
1869 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
1870 z->uz_name, z->uz_size,
1871 z->uz_maxpages * z->uz_ipers,
1872 (z->uz_ipers * (z->uz_pages / z->uz_ppera)) - totalfree,
1873 totalfree,
1874 (unsigned long long)z->uz_allocs);
1875 ZONE_UNLOCK(z);
1876 for (p = offset + 12; p > offset && *p == ' '; --p)
1877 /* nothing */ ;
1878 p[1] = ':';
1879 cnt--;
1880 offset += len;
1881 }
1882 mtx_unlock(&uma_mtx);
1883 *offset++ = '\0';
1884 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
1885out:
1886 FREE(tmpbuf, M_TEMP);
1887 return (error);
1888}
1228void *
1229uma_zalloc_arg(uma_zone_t zone, void *udata, int wait)
1230{
1231 void *item;
1232 uma_cache_t cache;
1233 uma_bucket_t bucket;
1234 int cpu;
1235
1236 /* This is the fast path allocation */
1237#ifdef UMA_DEBUG_ALLOC_1
1238 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1239#endif
1240
1241zalloc_restart:
1242 cpu = PCPU_GET(cpuid);
1243 CPU_LOCK(zone, cpu);
1244 cache = &zone->uz_cpu[cpu];
1245
1246zalloc_start:
1247 bucket = cache->uc_allocbucket;
1248
1249 if (bucket) {
1250 if (bucket->ub_ptr > -1) {
1251 item = bucket->ub_bucket[bucket->ub_ptr];
1252#ifdef INVARIANTS
1253 bucket->ub_bucket[bucket->ub_ptr] = NULL;
1254#endif
1255 bucket->ub_ptr--;
1256 KASSERT(item != NULL,
1257 ("uma_zalloc: Bucket pointer mangled."));
1258 cache->uc_allocs++;
1259 CPU_UNLOCK(zone, cpu);
1260 if (zone->uz_ctor)
1261 zone->uz_ctor(item, zone->uz_size, udata);
1262 return (item);
1263 } else if (cache->uc_freebucket) {
1264 /*
1265 * We have run out of items in our allocbucket.
1266 * See if we can switch with our free bucket.
1267 */
1268 if (cache->uc_freebucket->ub_ptr > -1) {
1269 uma_bucket_t swap;
1270
1271#ifdef UMA_DEBUG_ALLOC
1272 printf("uma_zalloc: Swapping empty with alloc.\n");
1273#endif
1274 swap = cache->uc_freebucket;
1275 cache->uc_freebucket = cache->uc_allocbucket;
1276 cache->uc_allocbucket = swap;
1277
1278 goto zalloc_start;
1279 }
1280 }
1281 }
1282 ZONE_LOCK(zone);
1283 /* Since we have locked the zone we may as well send back our stats */
1284 zone->uz_allocs += cache->uc_allocs;
1285 cache->uc_allocs = 0;
1286
1287 /* Our old one is now a free bucket */
1288 if (cache->uc_allocbucket) {
1289 KASSERT(cache->uc_allocbucket->ub_ptr == -1,
1290 ("uma_zalloc_arg: Freeing a non free bucket."));
1291 LIST_INSERT_HEAD(&zone->uz_free_bucket,
1292 cache->uc_allocbucket, ub_link);
1293 cache->uc_allocbucket = NULL;
1294 }
1295
1296 /* Check the free list for a new alloc bucket */
1297 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1298 KASSERT(bucket->ub_ptr != -1,
1299 ("uma_zalloc_arg: Returning an empty bucket."));
1300
1301 LIST_REMOVE(bucket, ub_link);
1302 cache->uc_allocbucket = bucket;
1303 ZONE_UNLOCK(zone);
1304 goto zalloc_start;
1305 }
1306 /* Bump up our uz_count so we get here less */
1307 if (zone->uz_count < UMA_BUCKET_SIZE - 1)
1308 zone->uz_count++;
1309
1310 /* We are no longer associated with this cpu!!! */
1311 CPU_UNLOCK(zone, cpu);
1312
1313 /*
1314 * Now lets just fill a bucket and put it on the free list. If that
1315 * works we'll restart the allocation from the begining.
1316 *
1317 * Try this zone's free list first so we don't allocate extra buckets.
1318 */
1319
1320 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL)
1321 LIST_REMOVE(bucket, ub_link);
1322
1323 /* Now we no longer need the zone lock. */
1324 ZONE_UNLOCK(zone);
1325
1326 if (bucket == NULL)
1327 bucket = uma_zalloc_internal(bucketzone,
1328 NULL, wait, NULL);
1329
1330 if (bucket != NULL) {
1331#ifdef INVARIANTS
1332 bzero(bucket, bucketzone->uz_size);
1333#endif
1334 bucket->ub_ptr = -1;
1335
1336 if (uma_zalloc_internal(zone, udata, wait, bucket))
1337 goto zalloc_restart;
1338 else
1339 uma_zfree_internal(bucketzone, bucket, NULL, 0);
1340 }
1341 /*
1342 * We may not get a bucket if we recurse, so
1343 * return an actual item.
1344 */
1345#ifdef UMA_DEBUG
1346 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1347#endif
1348
1349 return (uma_zalloc_internal(zone, udata, wait, NULL));
1350}
1351
1352/*
1353 * Allocates an item for an internal zone OR fills a bucket
1354 *
1355 * Arguments
1356 * zone The zone to alloc for.
1357 * udata The data to be passed to the constructor.
1358 * wait M_WAITOK or M_NOWAIT.
1359 * bucket The bucket to fill or NULL
1360 *
1361 * Returns
1362 * NULL if there is no memory and M_NOWAIT is set
1363 * An item if called on an interal zone
1364 * Non NULL if called to fill a bucket and it was successful.
1365 *
1366 * Discussion:
1367 * This was much cleaner before it had to do per cpu caches. It is
1368 * complicated now because it has to handle the simple internal case, and
1369 * the more involved bucket filling and allocation.
1370 */
1371
1372static void *
1373uma_zalloc_internal(uma_zone_t zone, void *udata, int wait, uma_bucket_t bucket)
1374{
1375 uma_slab_t slab;
1376 u_int8_t freei;
1377 void *item;
1378
1379 item = NULL;
1380
1381 /*
1382 * This is to stop us from allocating per cpu buckets while we're
1383 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the
1384 * boot pages.
1385 */
1386
1387 if (!booted && zone == bucketzone)
1388 return (NULL);
1389
1390#ifdef UMA_DEBUG_ALLOC
1391 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
1392#endif
1393 ZONE_LOCK(zone);
1394
1395 /*
1396 * This code is here to limit the number of simultaneous bucket fills
1397 * for any given zone to the number of per cpu caches in this zone. This
1398 * is done so that we don't allocate more memory than we really need.
1399 */
1400
1401 if (bucket) {
1402#ifdef SMP
1403 if (zone->uz_fills >= mp_ncpus)
1404#else
1405 if (zone->uz_fills > 1)
1406#endif
1407 return (NULL);
1408
1409 zone->uz_fills++;
1410 }
1411
1412new_slab:
1413
1414 /* Find a slab with some space */
1415 if (zone->uz_free) {
1416 if (!LIST_EMPTY(&zone->uz_part_slab)) {
1417 slab = LIST_FIRST(&zone->uz_part_slab);
1418 } else {
1419 slab = LIST_FIRST(&zone->uz_free_slab);
1420 LIST_REMOVE(slab, us_link);
1421 LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1422 }
1423 } else {
1424 /*
1425 * This is to prevent us from recursively trying to allocate
1426 * buckets. The problem is that if an allocation forces us to
1427 * grab a new bucket we will call page_alloc, which will go off
1428 * and cause the vm to allocate vm_map_entries. If we need new
1429 * buckets there too we will recurse in kmem_alloc and bad
1430 * things happen. So instead we return a NULL bucket, and make
1431 * the code that allocates buckets smart enough to deal with it */
1432 if (zone == bucketzone && zone->uz_recurse != 0) {
1433 ZONE_UNLOCK(zone);
1434 return (NULL);
1435 }
1436 zone->uz_recurse++;
1437 slab = slab_zalloc(zone, wait);
1438 zone->uz_recurse--;
1439 if (slab) {
1440 LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1441 /*
1442 * We might not have been able to get a page, but another cpu
1443 * could have while we were unlocked.
1444 */
1445 } else if (zone->uz_free == 0) {
1446 /* If we're filling a bucket return what we have */
1447 if (bucket != NULL)
1448 zone->uz_fills--;
1449 ZONE_UNLOCK(zone);
1450
1451 if (bucket != NULL && bucket->ub_ptr != -1)
1452 return (bucket);
1453 else
1454 return (NULL);
1455 } else {
1456 /* Another cpu must have succeeded */
1457 if ((slab = LIST_FIRST(&zone->uz_part_slab)) == NULL) {
1458 slab = LIST_FIRST(&zone->uz_free_slab);
1459 LIST_REMOVE(slab, us_link);
1460 LIST_INSERT_HEAD(&zone->uz_part_slab,
1461 slab, us_link);
1462 }
1463 }
1464 }
1465 /*
1466 * If this is our first time though put this guy on the list.
1467 */
1468 if (bucket != NULL && bucket->ub_ptr == -1)
1469 LIST_INSERT_HEAD(&zone->uz_full_bucket,
1470 bucket, ub_link);
1471
1472
1473 while (slab->us_freecount) {
1474 freei = slab->us_firstfree;
1475 slab->us_firstfree = slab->us_freelist[freei];
1476#ifdef INVARIANTS
1477 slab->us_freelist[freei] = 255;
1478#endif
1479 slab->us_freecount--;
1480 zone->uz_free--;
1481 item = slab->us_data + (zone->uz_rsize * freei);
1482
1483 if (bucket == NULL) {
1484 zone->uz_allocs++;
1485 break;
1486 }
1487 bucket->ub_bucket[++bucket->ub_ptr] = item;
1488
1489 /* Don't overfill the bucket! */
1490 if (bucket->ub_ptr == zone->uz_count)
1491 break;
1492 }
1493
1494 /* Move this slab to the full list */
1495 if (slab->us_freecount == 0) {
1496 LIST_REMOVE(slab, us_link);
1497 LIST_INSERT_HEAD(&zone->uz_full_slab, slab, us_link);
1498 }
1499
1500 if (bucket != NULL) {
1501 /* Try to keep the buckets totally full, but don't block */
1502 if (bucket->ub_ptr < zone->uz_count) {
1503 wait = M_NOWAIT;
1504 goto new_slab;
1505 } else
1506 zone->uz_fills--;
1507 }
1508
1509 ZONE_UNLOCK(zone);
1510
1511 /* Only construct at this time if we're not filling a bucket */
1512 if (bucket == NULL && zone->uz_ctor != NULL)
1513 zone->uz_ctor(item, zone->uz_size, udata);
1514
1515 return (item);
1516}
1517
1518/* See uma.h */
1519void
1520uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
1521{
1522 uma_cache_t cache;
1523 uma_bucket_t bucket;
1524 int cpu;
1525
1526 /* This is the fast path free */
1527#ifdef UMA_DEBUG_ALLOC_1
1528 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
1529#endif
1530zfree_restart:
1531 cpu = PCPU_GET(cpuid);
1532 CPU_LOCK(zone, cpu);
1533 cache = &zone->uz_cpu[cpu];
1534
1535zfree_start:
1536 bucket = cache->uc_freebucket;
1537
1538 if (bucket) {
1539 /*
1540 * Do we have room in our bucket? It is OK for this uz count
1541 * check to be slightly out of sync.
1542 */
1543
1544 if (bucket->ub_ptr < zone->uz_count) {
1545 bucket->ub_ptr++;
1546 KASSERT(bucket->ub_bucket[bucket->ub_ptr] == NULL,
1547 ("uma_zfree: Freeing to non free bucket index."));
1548 bucket->ub_bucket[bucket->ub_ptr] = item;
1549 CPU_UNLOCK(zone, cpu);
1550 if (zone->uz_dtor)
1551 zone->uz_dtor(item, zone->uz_size, udata);
1552 return;
1553 } else if (cache->uc_allocbucket) {
1554#ifdef UMA_DEBUG_ALLOC
1555 printf("uma_zfree: Swapping buckets.\n");
1556#endif
1557 /*
1558 * We have run out of space in our freebucket.
1559 * See if we can switch with our alloc bucket.
1560 */
1561 if (cache->uc_allocbucket->ub_ptr <
1562 cache->uc_freebucket->ub_ptr) {
1563 uma_bucket_t swap;
1564
1565 swap = cache->uc_freebucket;
1566 cache->uc_freebucket = cache->uc_allocbucket;
1567 cache->uc_allocbucket = swap;
1568
1569 goto zfree_start;
1570 }
1571 }
1572 }
1573
1574 /*
1575 * We can get here for two reasons:
1576 *
1577 * 1) The buckets are NULL
1578 * 2) The alloc and free buckets are both somewhat full.
1579 *
1580 */
1581
1582 ZONE_LOCK(zone);
1583
1584 bucket = cache->uc_freebucket;
1585 cache->uc_freebucket = NULL;
1586
1587 /* Can we throw this on the zone full list? */
1588 if (bucket != NULL) {
1589#ifdef UMA_DEBUG_ALLOC
1590 printf("uma_zfree: Putting old bucket on the free list.\n");
1591#endif
1592 /* ub_ptr is pointing to the last free item */
1593 KASSERT(bucket->ub_ptr != -1,
1594 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
1595 LIST_INSERT_HEAD(&zone->uz_full_bucket,
1596 bucket, ub_link);
1597 }
1598 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
1599 LIST_REMOVE(bucket, ub_link);
1600 ZONE_UNLOCK(zone);
1601 cache->uc_freebucket = bucket;
1602 goto zfree_start;
1603 }
1604 /* We're done with this CPU now */
1605 CPU_UNLOCK(zone, cpu);
1606
1607 /* And the zone.. */
1608 ZONE_UNLOCK(zone);
1609
1610#ifdef UMA_DEBUG_ALLOC
1611 printf("uma_zfree: Allocating new free bucket.\n");
1612#endif
1613 bucket = uma_zalloc_internal(bucketzone,
1614 NULL, M_NOWAIT, NULL);
1615 if (bucket) {
1616#ifdef INVARIANTS
1617 bzero(bucket, bucketzone->uz_size);
1618#endif
1619 bucket->ub_ptr = -1;
1620 ZONE_LOCK(zone);
1621 LIST_INSERT_HEAD(&zone->uz_free_bucket,
1622 bucket, ub_link);
1623 ZONE_UNLOCK(zone);
1624 goto zfree_restart;
1625 }
1626
1627 /*
1628 * If nothing else caught this, we'll just do an internal free.
1629 */
1630
1631 uma_zfree_internal(zone, item, udata, 0);
1632
1633 return;
1634
1635}
1636
1637/*
1638 * Frees an item to an INTERNAL zone or allocates a free bucket
1639 *
1640 * Arguments:
1641 * zone The zone to free to
1642 * item The item we're freeing
1643 * udata User supplied data for the dtor
1644 * skip Skip the dtor, it was done in uma_zfree_arg
1645 */
1646
1647static void
1648uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
1649{
1650 uma_slab_t slab;
1651 u_int8_t *mem;
1652 u_int8_t freei;
1653
1654 ZONE_LOCK(zone);
1655
1656 if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
1657 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
1658 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
1659 slab = hash_sfind(&zone->uz_hash, mem);
1660 else {
1661 mem += zone->uz_pgoff;
1662 slab = (uma_slab_t)mem;
1663 }
1664 } else {
1665 slab = (uma_slab_t)udata;
1666 }
1667
1668 /* Do we need to remove from any lists? */
1669 if (slab->us_freecount+1 == zone->uz_ipers) {
1670 LIST_REMOVE(slab, us_link);
1671 LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
1672 } else if (slab->us_freecount == 0) {
1673 LIST_REMOVE(slab, us_link);
1674 LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1675 }
1676
1677 /* Slab management stuff */
1678 freei = ((unsigned long)item - (unsigned long)slab->us_data)
1679 / zone->uz_rsize;
1680#ifdef INVARIANTS
1681 if (((freei * zone->uz_rsize) + slab->us_data) != item)
1682 panic("zone: %s(%p) slab %p freed address %p unaligned.\n",
1683 zone->uz_name, zone, slab, item);
1684 if (freei >= zone->uz_ipers)
1685 panic("zone: %s(%p) slab %p freelist %i out of range 0-%d\n",
1686 zone->uz_name, zone, slab, freei, zone->uz_ipers-1);
1687
1688 if (slab->us_freelist[freei] != 255) {
1689 printf("Slab at %p, freei %d = %d.\n",
1690 slab, freei, slab->us_freelist[freei]);
1691 panic("Duplicate free of item %p from zone %p(%s)\n",
1692 item, zone, zone->uz_name);
1693 }
1694#endif
1695 slab->us_freelist[freei] = slab->us_firstfree;
1696 slab->us_firstfree = freei;
1697 slab->us_freecount++;
1698
1699 /* Zone statistics */
1700 zone->uz_free++;
1701
1702 ZONE_UNLOCK(zone);
1703
1704 if (!skip && zone->uz_dtor)
1705 zone->uz_dtor(item, zone->uz_size, udata);
1706}
1707
1708/* See uma.h */
1709void
1710uma_zone_set_max(uma_zone_t zone, int nitems)
1711{
1712 ZONE_LOCK(zone);
1713 if (zone->uz_ppera > 1)
1714 zone->uz_maxpages = nitems / zone->uz_ppera;
1715 else
1716 zone->uz_maxpages = nitems / zone->uz_ipers;
1717 ZONE_UNLOCK(zone);
1718}
1719
1720/* See uma.h */
1721void
1722uma_zone_set_freef(uma_zone_t zone, uma_free freef)
1723{
1724 ZONE_LOCK(zone);
1725
1726 zone->uz_freef = freef;
1727
1728 ZONE_UNLOCK(zone);
1729}
1730
1731/* See uma.h */
1732void
1733uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
1734{
1735 ZONE_LOCK(zone);
1736
1737 zone->uz_flags |= UMA_ZFLAG_PRIVALLOC;
1738 zone->uz_allocf = allocf;
1739
1740 ZONE_UNLOCK(zone);
1741}
1742
1743/* See uma.h */
1744int
1745uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
1746{
1747 int pages;
1748 vm_offset_t kva;
1749
1750 mtx_lock(&Giant);
1751
1752 pages = count / zone->uz_ipers;
1753
1754 if (pages * zone->uz_ipers < count)
1755 pages++;
1756
1757 kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE);
1758
1759 if (kva == 0) {
1760 mtx_unlock(&Giant);
1761 return (0);
1762 }
1763
1764
1765 if (obj == NULL)
1766 obj = vm_object_allocate(OBJT_DEFAULT,
1767 zone->uz_maxpages);
1768 else
1769 _vm_object_allocate(OBJT_DEFAULT,
1770 zone->uz_maxpages, obj);
1771
1772 ZONE_LOCK(zone);
1773 zone->uz_kva = kva;
1774 zone->uz_obj = obj;
1775 zone->uz_maxpages = pages;
1776
1777 zone->uz_allocf = obj_alloc;
1778 zone->uz_flags |= UMA_ZFLAG_NOFREE | UMA_ZFLAG_PRIVALLOC;
1779
1780 ZONE_UNLOCK(zone);
1781 mtx_unlock(&Giant);
1782
1783 return (1);
1784}
1785
1786/* See uma.h */
1787void
1788uma_prealloc(uma_zone_t zone, int items)
1789{
1790 int slabs;
1791 uma_slab_t slab;
1792
1793 ZONE_LOCK(zone);
1794 slabs = items / zone->uz_ipers;
1795 if (slabs * zone->uz_ipers < items)
1796 slabs++;
1797
1798 while (slabs > 0) {
1799 slab = slab_zalloc(zone, M_WAITOK);
1800 LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
1801 slabs--;
1802 }
1803 ZONE_UNLOCK(zone);
1804}
1805
1806/* See uma.h */
1807void
1808uma_reclaim(void)
1809{
1810 /*
1811 * You might think that the delay below would improve performance since
1812 * the allocator will give away memory that it may ask for immediately.
1813 * Really, it makes things worse, since cpu cycles are so much cheaper
1814 * than disk activity.
1815 */
1816#if 0
1817 static struct timeval tv = {0};
1818 struct timeval now;
1819 getmicrouptime(&now);
1820 if (now.tv_sec > tv.tv_sec + 30)
1821 tv = now;
1822 else
1823 return;
1824#endif
1825#ifdef UMA_DEBUG
1826 printf("UMA: vm asked us to release pages!\n");
1827#endif
1828 zone_foreach(zone_drain);
1829
1830 /*
1831 * Some slabs may have been freed but this zone will be visited early
1832 * we visit again so that we can free pages that are empty once other
1833 * zones are drained. We have to do the same for buckets.
1834 */
1835 zone_drain(slabzone);
1836 zone_drain(bucketzone);
1837}
1838
1839void *
1840uma_large_malloc(int size, int wait)
1841{
1842 void *mem;
1843 uma_slab_t slab;
1844 u_int8_t flags;
1845
1846 slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
1847 if (slab == NULL)
1848 return (NULL);
1849
1850 mem = page_alloc(NULL, size, &flags, wait);
1851 if (mem) {
1852 slab->us_data = mem;
1853 slab->us_flags = flags | UMA_SLAB_MALLOC;
1854 slab->us_size = size;
1855 UMA_HASH_INSERT(mallochash, slab, mem);
1856 } else {
1857 uma_zfree_internal(slabzone, slab, NULL, 0);
1858 }
1859
1860
1861 return (mem);
1862}
1863
1864void
1865uma_large_free(uma_slab_t slab)
1866{
1867 UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
1868 page_free(slab->us_data, slab->us_size, slab->us_flags);
1869 uma_zfree_internal(slabzone, slab, NULL, 0);
1870}
1871
1872void
1873uma_print_stats(void)
1874{
1875 zone_foreach(uma_print_zone);
1876}
1877
1878void
1879uma_print_zone(uma_zone_t zone)
1880{
1881 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
1882 zone->uz_name, zone, zone->uz_size, zone->uz_rsize, zone->uz_flags,
1883 zone->uz_ipers, zone->uz_ppera,
1884 (zone->uz_ipers * zone->uz_pages) - zone->uz_free, zone->uz_free);
1885}
1886
1887/*
1888 * Sysctl handler for vm.zone
1889 *
1890 * stolen from vm_zone.c
1891 */
1892static int
1893sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
1894{
1895 int error, len, cnt;
1896 const int linesize = 128; /* conservative */
1897 int totalfree;
1898 char *tmpbuf, *offset;
1899 uma_zone_t z;
1900 char *p;
1901
1902 cnt = 0;
1903 LIST_FOREACH(z, &uma_zones, uz_link)
1904 cnt++;
1905 MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
1906 M_TEMP, M_WAITOK);
1907 len = snprintf(tmpbuf, linesize,
1908 "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n");
1909 if (cnt == 0)
1910 tmpbuf[len - 1] = '\0';
1911 error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
1912 if (error || cnt == 0)
1913 goto out;
1914 offset = tmpbuf;
1915 mtx_lock(&uma_mtx);
1916 LIST_FOREACH(z, &uma_zones, uz_link) {
1917 if (cnt == 0) /* list may have changed size */
1918 break;
1919 ZONE_LOCK(z);
1920 totalfree = z->uz_free + z->uz_cachefree;
1921 len = snprintf(offset, linesize,
1922 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
1923 z->uz_name, z->uz_size,
1924 z->uz_maxpages * z->uz_ipers,
1925 (z->uz_ipers * (z->uz_pages / z->uz_ppera)) - totalfree,
1926 totalfree,
1927 (unsigned long long)z->uz_allocs);
1928 ZONE_UNLOCK(z);
1929 for (p = offset + 12; p > offset && *p == ' '; --p)
1930 /* nothing */ ;
1931 p[1] = ':';
1932 cnt--;
1933 offset += len;
1934 }
1935 mtx_unlock(&uma_mtx);
1936 *offset++ = '\0';
1937 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
1938out:
1939 FREE(tmpbuf, M_TEMP);
1940 return (error);
1941}