Deleted Added
full compact
uma_core.c (295222) uma_core.c (296243)
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * uma_core.c Implementation of the Universal Memory allocator
31 *
32 * This allocator is intended to replace the multitude of similar object caches
33 * in the standard FreeBSD kernel. The intent is to be flexible as well as
34 * effecient. A primary design goal is to return unused memory to the rest of
35 * the system. This will make the system as a whole more flexible due to the
36 * ability to move memory to subsystems which most need it instead of leaving
37 * pools of reserved memory unused.
38 *
39 * The basic ideas stem from similar slab/zone based allocators whose algorithms
40 * are well known.
41 *
42 */
43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * uma_core.c Implementation of the Universal Memory allocator
31 *
32 * This allocator is intended to replace the multitude of similar object caches
33 * in the standard FreeBSD kernel. The intent is to be flexible as well as
34 * effecient. A primary design goal is to return unused memory to the rest of
35 * the system. This will make the system as a whole more flexible due to the
36 * ability to move memory to subsystems which most need it instead of leaving
37 * pools of reserved memory unused.
38 *
39 * The basic ideas stem from similar slab/zone based allocators whose algorithms
40 * are well known.
41 *
42 */
43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 295222 2016-02-03 23:30:17Z glebius $");
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 296243 2016-03-01 00:33:32Z glebius $");
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59
60#include "opt_ddb.h"
61#include "opt_param.h"
62#include "opt_vm.h"
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/bitset.h>
67#include <sys/kernel.h>
68#include <sys/types.h>
69#include <sys/queue.h>
70#include <sys/malloc.h>
71#include <sys/ktr.h>
72#include <sys/lock.h>
73#include <sys/sysctl.h>
74#include <sys/mutex.h>
75#include <sys/proc.h>
76#include <sys/random.h>
77#include <sys/rwlock.h>
78#include <sys/sbuf.h>
79#include <sys/sched.h>
80#include <sys/smp.h>
81#include <sys/taskqueue.h>
82#include <sys/vmmeter.h>
83
84#include <vm/vm.h>
85#include <vm/vm_object.h>
86#include <vm/vm_page.h>
87#include <vm/vm_pageout.h>
88#include <vm/vm_param.h>
89#include <vm/vm_map.h>
90#include <vm/vm_kern.h>
91#include <vm/vm_extern.h>
92#include <vm/uma.h>
93#include <vm/uma_int.h>
94#include <vm/uma_dbg.h>
95
96#include <ddb/ddb.h>
97
98#ifdef DEBUG_MEMGUARD
99#include <vm/memguard.h>
100#endif
101
102/*
103 * This is the zone and keg from which all zones are spawned. The idea is that
104 * even the zone & keg heads are allocated from the allocator, so we use the
105 * bss section to bootstrap us.
106 */
107static struct uma_keg masterkeg;
108static struct uma_zone masterzone_k;
109static struct uma_zone masterzone_z;
110static uma_zone_t kegs = &masterzone_k;
111static uma_zone_t zones = &masterzone_z;
112
113/* This is the zone from which all of uma_slab_t's are allocated. */
114static uma_zone_t slabzone;
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59
60#include "opt_ddb.h"
61#include "opt_param.h"
62#include "opt_vm.h"
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/bitset.h>
67#include <sys/kernel.h>
68#include <sys/types.h>
69#include <sys/queue.h>
70#include <sys/malloc.h>
71#include <sys/ktr.h>
72#include <sys/lock.h>
73#include <sys/sysctl.h>
74#include <sys/mutex.h>
75#include <sys/proc.h>
76#include <sys/random.h>
77#include <sys/rwlock.h>
78#include <sys/sbuf.h>
79#include <sys/sched.h>
80#include <sys/smp.h>
81#include <sys/taskqueue.h>
82#include <sys/vmmeter.h>
83
84#include <vm/vm.h>
85#include <vm/vm_object.h>
86#include <vm/vm_page.h>
87#include <vm/vm_pageout.h>
88#include <vm/vm_param.h>
89#include <vm/vm_map.h>
90#include <vm/vm_kern.h>
91#include <vm/vm_extern.h>
92#include <vm/uma.h>
93#include <vm/uma_int.h>
94#include <vm/uma_dbg.h>
95
96#include <ddb/ddb.h>
97
98#ifdef DEBUG_MEMGUARD
99#include <vm/memguard.h>
100#endif
101
102/*
103 * This is the zone and keg from which all zones are spawned. The idea is that
104 * even the zone & keg heads are allocated from the allocator, so we use the
105 * bss section to bootstrap us.
106 */
107static struct uma_keg masterkeg;
108static struct uma_zone masterzone_k;
109static struct uma_zone masterzone_z;
110static uma_zone_t kegs = &masterzone_k;
111static uma_zone_t zones = &masterzone_z;
112
113/* This is the zone from which all of uma_slab_t's are allocated. */
114static uma_zone_t slabzone;
115static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
116
117/*
118 * The initial hash tables come out of this zone so they can be allocated
119 * prior to malloc coming up.
120 */
121static uma_zone_t hashzone;
122
123/* The boot-time adjusted value for cache line alignment. */
124int uma_align_cache = 64 - 1;
125
126static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
127
128/*
129 * Are we allowed to allocate buckets?
130 */
131static int bucketdisable = 1;
132
133/* Linked list of all kegs in the system */
134static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
135
136/* Linked list of all cache-only zones in the system */
137static LIST_HEAD(,uma_zone) uma_cachezones =
138 LIST_HEAD_INITIALIZER(uma_cachezones);
139
140/* This RW lock protects the keg list */
141static struct rwlock_padalign uma_rwlock;
142
143/* Linked list of boot time pages */
144static LIST_HEAD(,uma_slab) uma_boot_pages =
145 LIST_HEAD_INITIALIZER(uma_boot_pages);
146
147/* This mutex protects the boot time pages list */
148static struct mtx_padalign uma_boot_pages_mtx;
149
150static struct sx uma_drain_lock;
151
152/* Is the VM done starting up? */
153static int booted = 0;
154#define UMA_STARTUP 1
155#define UMA_STARTUP2 2
156
157/*
115
116/*
117 * The initial hash tables come out of this zone so they can be allocated
118 * prior to malloc coming up.
119 */
120static uma_zone_t hashzone;
121
122/* The boot-time adjusted value for cache line alignment. */
123int uma_align_cache = 64 - 1;
124
125static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
126
127/*
128 * Are we allowed to allocate buckets?
129 */
130static int bucketdisable = 1;
131
132/* Linked list of all kegs in the system */
133static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
134
135/* Linked list of all cache-only zones in the system */
136static LIST_HEAD(,uma_zone) uma_cachezones =
137 LIST_HEAD_INITIALIZER(uma_cachezones);
138
139/* This RW lock protects the keg list */
140static struct rwlock_padalign uma_rwlock;
141
142/* Linked list of boot time pages */
143static LIST_HEAD(,uma_slab) uma_boot_pages =
144 LIST_HEAD_INITIALIZER(uma_boot_pages);
145
146/* This mutex protects the boot time pages list */
147static struct mtx_padalign uma_boot_pages_mtx;
148
149static struct sx uma_drain_lock;
150
151/* Is the VM done starting up? */
152static int booted = 0;
153#define UMA_STARTUP 1
154#define UMA_STARTUP2 2
155
156/*
158 * Only mbuf clusters use ref zones. Just provide enough references
159 * to support the one user. New code should not use the ref facility.
160 */
161static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES;
162
163/*
164 * This is the handle used to schedule events that need to happen
165 * outside of the allocation fast path.
166 */
167static struct callout uma_callout;
168#define UMA_TIMEOUT 20 /* Seconds for callout interval. */
169
170/*
171 * This structure is passed as the zone ctor arg so that I don't have to create
172 * a special allocation function just for zones.
173 */
174struct uma_zctor_args {
175 const char *name;
176 size_t size;
177 uma_ctor ctor;
178 uma_dtor dtor;
179 uma_init uminit;
180 uma_fini fini;
181 uma_import import;
182 uma_release release;
183 void *arg;
184 uma_keg_t keg;
185 int align;
186 uint32_t flags;
187};
188
189struct uma_kctor_args {
190 uma_zone_t zone;
191 size_t size;
192 uma_init uminit;
193 uma_fini fini;
194 int align;
195 uint32_t flags;
196};
197
198struct uma_bucket_zone {
199 uma_zone_t ubz_zone;
200 char *ubz_name;
201 int ubz_entries; /* Number of items it can hold. */
202 int ubz_maxsize; /* Maximum allocation size per-item. */
203};
204
205/*
206 * Compute the actual number of bucket entries to pack them in power
207 * of two sizes for more efficient space utilization.
208 */
209#define BUCKET_SIZE(n) \
210 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
211
212#define BUCKET_MAX BUCKET_SIZE(256)
213
214struct uma_bucket_zone bucket_zones[] = {
215 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
216 { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
217 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
218 { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
219 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
220 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
221 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
222 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
223 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
224 { NULL, NULL, 0}
225};
226
227/*
228 * Flags and enumerations to be passed to internal functions.
229 */
230enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
231
232/* Prototypes.. */
233
234static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
235static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
236static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
237static void page_free(void *, vm_size_t, uint8_t);
238static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
239static void cache_drain(uma_zone_t);
240static void bucket_drain(uma_zone_t, uma_bucket_t);
241static void bucket_cache_drain(uma_zone_t zone);
242static int keg_ctor(void *, int, void *, int);
243static void keg_dtor(void *, int, void *);
244static int zone_ctor(void *, int, void *, int);
245static void zone_dtor(void *, int, void *);
246static int zero_init(void *, int, int);
247static void keg_small_init(uma_keg_t keg);
248static void keg_large_init(uma_keg_t keg);
249static void zone_foreach(void (*zfunc)(uma_zone_t));
250static void zone_timeout(uma_zone_t zone);
251static int hash_alloc(struct uma_hash *);
252static int hash_expand(struct uma_hash *, struct uma_hash *);
253static void hash_free(struct uma_hash *hash);
254static void uma_timeout(void *);
255static void uma_startup3(void);
256static void *zone_alloc_item(uma_zone_t, void *, int);
257static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
258static void bucket_enable(void);
259static void bucket_init(void);
260static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
261static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
262static void bucket_zone_drain(void);
263static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
264static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
265static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
266static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
267static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
268static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
269 uma_fini fini, int align, uint32_t flags);
270static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
271static void zone_release(uma_zone_t zone, void **bucket, int cnt);
272static void uma_zero_item(void *item, uma_zone_t zone);
273
274void uma_print_zone(uma_zone_t);
275void uma_print_stats(void);
276static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
277static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
278
279#ifdef INVARIANTS
280static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
281static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
282#endif
283
284SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
285
286SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
287 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
288
289SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
290 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
291
292static int zone_warnings = 1;
293SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
294 "Warn when UMA zones becomes full");
295
296/*
297 * This routine checks to see whether or not it's safe to enable buckets.
298 */
299static void
300bucket_enable(void)
301{
302 bucketdisable = vm_page_count_min();
303}
304
305/*
306 * Initialize bucket_zones, the array of zones of buckets of various sizes.
307 *
308 * For each zone, calculate the memory required for each bucket, consisting
309 * of the header and an array of pointers.
310 */
311static void
312bucket_init(void)
313{
314 struct uma_bucket_zone *ubz;
315 int size;
316
317 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
318 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
319 size += sizeof(void *) * ubz->ubz_entries;
320 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
321 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
322 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
323 }
324}
325
326/*
327 * Given a desired number of entries for a bucket, return the zone from which
328 * to allocate the bucket.
329 */
330static struct uma_bucket_zone *
331bucket_zone_lookup(int entries)
332{
333 struct uma_bucket_zone *ubz;
334
335 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
336 if (ubz->ubz_entries >= entries)
337 return (ubz);
338 ubz--;
339 return (ubz);
340}
341
342static int
343bucket_select(int size)
344{
345 struct uma_bucket_zone *ubz;
346
347 ubz = &bucket_zones[0];
348 if (size > ubz->ubz_maxsize)
349 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
350
351 for (; ubz->ubz_entries != 0; ubz++)
352 if (ubz->ubz_maxsize < size)
353 break;
354 ubz--;
355 return (ubz->ubz_entries);
356}
357
358static uma_bucket_t
359bucket_alloc(uma_zone_t zone, void *udata, int flags)
360{
361 struct uma_bucket_zone *ubz;
362 uma_bucket_t bucket;
363
364 /*
365 * This is to stop us from allocating per cpu buckets while we're
366 * running out of vm.boot_pages. Otherwise, we would exhaust the
367 * boot pages. This also prevents us from allocating buckets in
368 * low memory situations.
369 */
370 if (bucketdisable)
371 return (NULL);
372 /*
373 * To limit bucket recursion we store the original zone flags
374 * in a cookie passed via zalloc_arg/zfree_arg. This allows the
375 * NOVM flag to persist even through deep recursions. We also
376 * store ZFLAG_BUCKET once we have recursed attempting to allocate
377 * a bucket for a bucket zone so we do not allow infinite bucket
378 * recursion. This cookie will even persist to frees of unused
379 * buckets via the allocation path or bucket allocations in the
380 * free path.
381 */
382 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
383 udata = (void *)(uintptr_t)zone->uz_flags;
384 else {
385 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
386 return (NULL);
387 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
388 }
389 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
390 flags |= M_NOVM;
391 ubz = bucket_zone_lookup(zone->uz_count);
392 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
393 ubz++;
394 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
395 if (bucket) {
396#ifdef INVARIANTS
397 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
398#endif
399 bucket->ub_cnt = 0;
400 bucket->ub_entries = ubz->ubz_entries;
401 }
402
403 return (bucket);
404}
405
406static void
407bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
408{
409 struct uma_bucket_zone *ubz;
410
411 KASSERT(bucket->ub_cnt == 0,
412 ("bucket_free: Freeing a non free bucket."));
413 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
414 udata = (void *)(uintptr_t)zone->uz_flags;
415 ubz = bucket_zone_lookup(bucket->ub_entries);
416 uma_zfree_arg(ubz->ubz_zone, bucket, udata);
417}
418
419static void
420bucket_zone_drain(void)
421{
422 struct uma_bucket_zone *ubz;
423
424 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
425 zone_drain(ubz->ubz_zone);
426}
427
428static void
429zone_log_warning(uma_zone_t zone)
430{
431 static const struct timeval warninterval = { 300, 0 };
432
433 if (!zone_warnings || zone->uz_warning == NULL)
434 return;
435
436 if (ratecheck(&zone->uz_ratecheck, &warninterval))
437 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
438}
439
440static inline void
441zone_maxaction(uma_zone_t zone)
442{
443
444 if (zone->uz_maxaction.ta_func != NULL)
445 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
446}
447
448static void
449zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
450{
451 uma_klink_t klink;
452
453 LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
454 kegfn(klink->kl_keg);
455}
456
457/*
458 * Routine called by timeout which is used to fire off some time interval
459 * based calculations. (stats, hash size, etc.)
460 *
461 * Arguments:
462 * arg Unused
463 *
464 * Returns:
465 * Nothing
466 */
467static void
468uma_timeout(void *unused)
469{
470 bucket_enable();
471 zone_foreach(zone_timeout);
472
473 /* Reschedule this event */
474 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
475}
476
477/*
478 * Routine to perform timeout driven calculations. This expands the
479 * hashes and does per cpu statistics aggregation.
480 *
481 * Returns nothing.
482 */
483static void
484keg_timeout(uma_keg_t keg)
485{
486
487 KEG_LOCK(keg);
488 /*
489 * Expand the keg hash table.
490 *
491 * This is done if the number of slabs is larger than the hash size.
492 * What I'm trying to do here is completely reduce collisions. This
493 * may be a little aggressive. Should I allow for two collisions max?
494 */
495 if (keg->uk_flags & UMA_ZONE_HASH &&
496 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
497 struct uma_hash newhash;
498 struct uma_hash oldhash;
499 int ret;
500
501 /*
502 * This is so involved because allocating and freeing
503 * while the keg lock is held will lead to deadlock.
504 * I have to do everything in stages and check for
505 * races.
506 */
507 newhash = keg->uk_hash;
508 KEG_UNLOCK(keg);
509 ret = hash_alloc(&newhash);
510 KEG_LOCK(keg);
511 if (ret) {
512 if (hash_expand(&keg->uk_hash, &newhash)) {
513 oldhash = keg->uk_hash;
514 keg->uk_hash = newhash;
515 } else
516 oldhash = newhash;
517
518 KEG_UNLOCK(keg);
519 hash_free(&oldhash);
520 return;
521 }
522 }
523 KEG_UNLOCK(keg);
524}
525
526static void
527zone_timeout(uma_zone_t zone)
528{
529
530 zone_foreach_keg(zone, &keg_timeout);
531}
532
533/*
534 * Allocate and zero fill the next sized hash table from the appropriate
535 * backing store.
536 *
537 * Arguments:
538 * hash A new hash structure with the old hash size in uh_hashsize
539 *
540 * Returns:
541 * 1 on sucess and 0 on failure.
542 */
543static int
544hash_alloc(struct uma_hash *hash)
545{
546 int oldsize;
547 int alloc;
548
549 oldsize = hash->uh_hashsize;
550
551 /* We're just going to go to a power of two greater */
552 if (oldsize) {
553 hash->uh_hashsize = oldsize * 2;
554 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
555 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
556 M_UMAHASH, M_NOWAIT);
557 } else {
558 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
559 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
560 M_WAITOK);
561 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
562 }
563 if (hash->uh_slab_hash) {
564 bzero(hash->uh_slab_hash, alloc);
565 hash->uh_hashmask = hash->uh_hashsize - 1;
566 return (1);
567 }
568
569 return (0);
570}
571
572/*
573 * Expands the hash table for HASH zones. This is done from zone_timeout
574 * to reduce collisions. This must not be done in the regular allocation
575 * path, otherwise, we can recurse on the vm while allocating pages.
576 *
577 * Arguments:
578 * oldhash The hash you want to expand
579 * newhash The hash structure for the new table
580 *
581 * Returns:
582 * Nothing
583 *
584 * Discussion:
585 */
586static int
587hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
588{
589 uma_slab_t slab;
590 int hval;
591 int i;
592
593 if (!newhash->uh_slab_hash)
594 return (0);
595
596 if (oldhash->uh_hashsize >= newhash->uh_hashsize)
597 return (0);
598
599 /*
600 * I need to investigate hash algorithms for resizing without a
601 * full rehash.
602 */
603
604 for (i = 0; i < oldhash->uh_hashsize; i++)
605 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
606 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
607 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
608 hval = UMA_HASH(newhash, slab->us_data);
609 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
610 slab, us_hlink);
611 }
612
613 return (1);
614}
615
616/*
617 * Free the hash bucket to the appropriate backing store.
618 *
619 * Arguments:
620 * slab_hash The hash bucket we're freeing
621 * hashsize The number of entries in that hash bucket
622 *
623 * Returns:
624 * Nothing
625 */
626static void
627hash_free(struct uma_hash *hash)
628{
629 if (hash->uh_slab_hash == NULL)
630 return;
631 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
632 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
633 else
634 free(hash->uh_slab_hash, M_UMAHASH);
635}
636
637/*
638 * Frees all outstanding items in a bucket
639 *
640 * Arguments:
641 * zone The zone to free to, must be unlocked.
642 * bucket The free/alloc bucket with items, cpu queue must be locked.
643 *
644 * Returns:
645 * Nothing
646 */
647
648static void
649bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
650{
651 int i;
652
653 if (bucket == NULL)
654 return;
655
656 if (zone->uz_fini)
657 for (i = 0; i < bucket->ub_cnt; i++)
658 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
659 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
660 bucket->ub_cnt = 0;
661}
662
663/*
664 * Drains the per cpu caches for a zone.
665 *
666 * NOTE: This may only be called while the zone is being turn down, and not
667 * during normal operation. This is necessary in order that we do not have
668 * to migrate CPUs to drain the per-CPU caches.
669 *
670 * Arguments:
671 * zone The zone to drain, must be unlocked.
672 *
673 * Returns:
674 * Nothing
675 */
676static void
677cache_drain(uma_zone_t zone)
678{
679 uma_cache_t cache;
680 int cpu;
681
682 /*
683 * XXX: It is safe to not lock the per-CPU caches, because we're
684 * tearing down the zone anyway. I.e., there will be no further use
685 * of the caches at this point.
686 *
687 * XXX: It would good to be able to assert that the zone is being
688 * torn down to prevent improper use of cache_drain().
689 *
690 * XXX: We lock the zone before passing into bucket_cache_drain() as
691 * it is used elsewhere. Should the tear-down path be made special
692 * there in some form?
693 */
694 CPU_FOREACH(cpu) {
695 cache = &zone->uz_cpu[cpu];
696 bucket_drain(zone, cache->uc_allocbucket);
697 bucket_drain(zone, cache->uc_freebucket);
698 if (cache->uc_allocbucket != NULL)
699 bucket_free(zone, cache->uc_allocbucket, NULL);
700 if (cache->uc_freebucket != NULL)
701 bucket_free(zone, cache->uc_freebucket, NULL);
702 cache->uc_allocbucket = cache->uc_freebucket = NULL;
703 }
704 ZONE_LOCK(zone);
705 bucket_cache_drain(zone);
706 ZONE_UNLOCK(zone);
707}
708
709static void
710cache_shrink(uma_zone_t zone)
711{
712
713 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
714 return;
715
716 ZONE_LOCK(zone);
717 zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
718 ZONE_UNLOCK(zone);
719}
720
721static void
722cache_drain_safe_cpu(uma_zone_t zone)
723{
724 uma_cache_t cache;
725 uma_bucket_t b1, b2;
726
727 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
728 return;
729
730 b1 = b2 = NULL;
731 ZONE_LOCK(zone);
732 critical_enter();
733 cache = &zone->uz_cpu[curcpu];
734 if (cache->uc_allocbucket) {
735 if (cache->uc_allocbucket->ub_cnt != 0)
736 LIST_INSERT_HEAD(&zone->uz_buckets,
737 cache->uc_allocbucket, ub_link);
738 else
739 b1 = cache->uc_allocbucket;
740 cache->uc_allocbucket = NULL;
741 }
742 if (cache->uc_freebucket) {
743 if (cache->uc_freebucket->ub_cnt != 0)
744 LIST_INSERT_HEAD(&zone->uz_buckets,
745 cache->uc_freebucket, ub_link);
746 else
747 b2 = cache->uc_freebucket;
748 cache->uc_freebucket = NULL;
749 }
750 critical_exit();
751 ZONE_UNLOCK(zone);
752 if (b1)
753 bucket_free(zone, b1, NULL);
754 if (b2)
755 bucket_free(zone, b2, NULL);
756}
757
758/*
759 * Safely drain per-CPU caches of a zone(s) to alloc bucket.
760 * This is an expensive call because it needs to bind to all CPUs
761 * one by one and enter a critical section on each of them in order
762 * to safely access their cache buckets.
763 * Zone lock must not be held on call this function.
764 */
765static void
766cache_drain_safe(uma_zone_t zone)
767{
768 int cpu;
769
770 /*
771 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
772 */
773 if (zone)
774 cache_shrink(zone);
775 else
776 zone_foreach(cache_shrink);
777
778 CPU_FOREACH(cpu) {
779 thread_lock(curthread);
780 sched_bind(curthread, cpu);
781 thread_unlock(curthread);
782
783 if (zone)
784 cache_drain_safe_cpu(zone);
785 else
786 zone_foreach(cache_drain_safe_cpu);
787 }
788 thread_lock(curthread);
789 sched_unbind(curthread);
790 thread_unlock(curthread);
791}
792
793/*
794 * Drain the cached buckets from a zone. Expects a locked zone on entry.
795 */
796static void
797bucket_cache_drain(uma_zone_t zone)
798{
799 uma_bucket_t bucket;
800
801 /*
802 * Drain the bucket queues and free the buckets, we just keep two per
803 * cpu (alloc/free).
804 */
805 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
806 LIST_REMOVE(bucket, ub_link);
807 ZONE_UNLOCK(zone);
808 bucket_drain(zone, bucket);
809 bucket_free(zone, bucket, NULL);
810 ZONE_LOCK(zone);
811 }
812
813 /*
814 * Shrink further bucket sizes. Price of single zone lock collision
815 * is probably lower then price of global cache drain.
816 */
817 if (zone->uz_count > zone->uz_count_min)
818 zone->uz_count--;
819}
820
821static void
822keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
823{
824 uint8_t *mem;
825 int i;
826 uint8_t flags;
827
828 mem = slab->us_data;
829 flags = slab->us_flags;
830 i = start;
831 if (keg->uk_fini != NULL) {
832 for (i--; i > -1; i--)
833 keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
834 keg->uk_size);
835 }
836 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
837 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
838#ifdef UMA_DEBUG
839 printf("%s: Returning %d bytes.\n", keg->uk_name,
840 PAGE_SIZE * keg->uk_ppera);
841#endif
842 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
843}
844
845/*
846 * Frees pages from a keg back to the system. This is done on demand from
847 * the pageout daemon.
848 *
849 * Returns nothing.
850 */
851static void
852keg_drain(uma_keg_t keg)
853{
854 struct slabhead freeslabs = { 0 };
855 uma_slab_t slab;
856 uma_slab_t n;
857
858 /*
859 * We don't want to take pages from statically allocated kegs at this
860 * time
861 */
862 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
863 return;
864
865#ifdef UMA_DEBUG
866 printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
867#endif
868 KEG_LOCK(keg);
869 if (keg->uk_free == 0)
870 goto finished;
871
872 slab = LIST_FIRST(&keg->uk_free_slab);
873 while (slab) {
874 n = LIST_NEXT(slab, us_link);
875
876 /* We have no where to free these to */
877 if (slab->us_flags & UMA_SLAB_BOOT) {
878 slab = n;
879 continue;
880 }
881
882 LIST_REMOVE(slab, us_link);
883 keg->uk_pages -= keg->uk_ppera;
884 keg->uk_free -= keg->uk_ipers;
885
886 if (keg->uk_flags & UMA_ZONE_HASH)
887 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
888
889 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
890
891 slab = n;
892 }
893finished:
894 KEG_UNLOCK(keg);
895
896 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
897 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
898 keg_free_slab(keg, slab, keg->uk_ipers);
899 }
900}
901
902static void
903zone_drain_wait(uma_zone_t zone, int waitok)
904{
905
906 /*
907 * Set draining to interlock with zone_dtor() so we can release our
908 * locks as we go. Only dtor() should do a WAITOK call since it
909 * is the only call that knows the structure will still be available
910 * when it wakes up.
911 */
912 ZONE_LOCK(zone);
913 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
914 if (waitok == M_NOWAIT)
915 goto out;
916 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
917 }
918 zone->uz_flags |= UMA_ZFLAG_DRAINING;
919 bucket_cache_drain(zone);
920 ZONE_UNLOCK(zone);
921 /*
922 * The DRAINING flag protects us from being freed while
923 * we're running. Normally the uma_rwlock would protect us but we
924 * must be able to release and acquire the right lock for each keg.
925 */
926 zone_foreach_keg(zone, &keg_drain);
927 ZONE_LOCK(zone);
928 zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
929 wakeup(zone);
930out:
931 ZONE_UNLOCK(zone);
932}
933
934void
935zone_drain(uma_zone_t zone)
936{
937
938 zone_drain_wait(zone, M_NOWAIT);
939}
940
941/*
942 * Allocate a new slab for a keg. This does not insert the slab onto a list.
943 *
944 * Arguments:
945 * wait Shall we wait?
946 *
947 * Returns:
948 * The slab that was allocated or NULL if there is no memory and the
949 * caller specified M_NOWAIT.
950 */
951static uma_slab_t
952keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
953{
157 * This is the handle used to schedule events that need to happen
158 * outside of the allocation fast path.
159 */
160static struct callout uma_callout;
161#define UMA_TIMEOUT 20 /* Seconds for callout interval. */
162
163/*
164 * This structure is passed as the zone ctor arg so that I don't have to create
165 * a special allocation function just for zones.
166 */
167struct uma_zctor_args {
168 const char *name;
169 size_t size;
170 uma_ctor ctor;
171 uma_dtor dtor;
172 uma_init uminit;
173 uma_fini fini;
174 uma_import import;
175 uma_release release;
176 void *arg;
177 uma_keg_t keg;
178 int align;
179 uint32_t flags;
180};
181
182struct uma_kctor_args {
183 uma_zone_t zone;
184 size_t size;
185 uma_init uminit;
186 uma_fini fini;
187 int align;
188 uint32_t flags;
189};
190
191struct uma_bucket_zone {
192 uma_zone_t ubz_zone;
193 char *ubz_name;
194 int ubz_entries; /* Number of items it can hold. */
195 int ubz_maxsize; /* Maximum allocation size per-item. */
196};
197
198/*
199 * Compute the actual number of bucket entries to pack them in power
200 * of two sizes for more efficient space utilization.
201 */
202#define BUCKET_SIZE(n) \
203 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
204
205#define BUCKET_MAX BUCKET_SIZE(256)
206
207struct uma_bucket_zone bucket_zones[] = {
208 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
209 { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
210 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
211 { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
212 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
213 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
214 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
215 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
216 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
217 { NULL, NULL, 0}
218};
219
220/*
221 * Flags and enumerations to be passed to internal functions.
222 */
223enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
224
225/* Prototypes.. */
226
227static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
228static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
229static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
230static void page_free(void *, vm_size_t, uint8_t);
231static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
232static void cache_drain(uma_zone_t);
233static void bucket_drain(uma_zone_t, uma_bucket_t);
234static void bucket_cache_drain(uma_zone_t zone);
235static int keg_ctor(void *, int, void *, int);
236static void keg_dtor(void *, int, void *);
237static int zone_ctor(void *, int, void *, int);
238static void zone_dtor(void *, int, void *);
239static int zero_init(void *, int, int);
240static void keg_small_init(uma_keg_t keg);
241static void keg_large_init(uma_keg_t keg);
242static void zone_foreach(void (*zfunc)(uma_zone_t));
243static void zone_timeout(uma_zone_t zone);
244static int hash_alloc(struct uma_hash *);
245static int hash_expand(struct uma_hash *, struct uma_hash *);
246static void hash_free(struct uma_hash *hash);
247static void uma_timeout(void *);
248static void uma_startup3(void);
249static void *zone_alloc_item(uma_zone_t, void *, int);
250static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
251static void bucket_enable(void);
252static void bucket_init(void);
253static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
254static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
255static void bucket_zone_drain(void);
256static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
257static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
258static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
259static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
260static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
261static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
262 uma_fini fini, int align, uint32_t flags);
263static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
264static void zone_release(uma_zone_t zone, void **bucket, int cnt);
265static void uma_zero_item(void *item, uma_zone_t zone);
266
267void uma_print_zone(uma_zone_t);
268void uma_print_stats(void);
269static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
270static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
271
272#ifdef INVARIANTS
273static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
274static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
275#endif
276
277SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
278
279SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
280 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
281
282SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
283 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
284
285static int zone_warnings = 1;
286SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
287 "Warn when UMA zones becomes full");
288
289/*
290 * This routine checks to see whether or not it's safe to enable buckets.
291 */
292static void
293bucket_enable(void)
294{
295 bucketdisable = vm_page_count_min();
296}
297
298/*
299 * Initialize bucket_zones, the array of zones of buckets of various sizes.
300 *
301 * For each zone, calculate the memory required for each bucket, consisting
302 * of the header and an array of pointers.
303 */
304static void
305bucket_init(void)
306{
307 struct uma_bucket_zone *ubz;
308 int size;
309
310 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
311 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
312 size += sizeof(void *) * ubz->ubz_entries;
313 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
314 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
315 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
316 }
317}
318
319/*
320 * Given a desired number of entries for a bucket, return the zone from which
321 * to allocate the bucket.
322 */
323static struct uma_bucket_zone *
324bucket_zone_lookup(int entries)
325{
326 struct uma_bucket_zone *ubz;
327
328 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
329 if (ubz->ubz_entries >= entries)
330 return (ubz);
331 ubz--;
332 return (ubz);
333}
334
335static int
336bucket_select(int size)
337{
338 struct uma_bucket_zone *ubz;
339
340 ubz = &bucket_zones[0];
341 if (size > ubz->ubz_maxsize)
342 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
343
344 for (; ubz->ubz_entries != 0; ubz++)
345 if (ubz->ubz_maxsize < size)
346 break;
347 ubz--;
348 return (ubz->ubz_entries);
349}
350
351static uma_bucket_t
352bucket_alloc(uma_zone_t zone, void *udata, int flags)
353{
354 struct uma_bucket_zone *ubz;
355 uma_bucket_t bucket;
356
357 /*
358 * This is to stop us from allocating per cpu buckets while we're
359 * running out of vm.boot_pages. Otherwise, we would exhaust the
360 * boot pages. This also prevents us from allocating buckets in
361 * low memory situations.
362 */
363 if (bucketdisable)
364 return (NULL);
365 /*
366 * To limit bucket recursion we store the original zone flags
367 * in a cookie passed via zalloc_arg/zfree_arg. This allows the
368 * NOVM flag to persist even through deep recursions. We also
369 * store ZFLAG_BUCKET once we have recursed attempting to allocate
370 * a bucket for a bucket zone so we do not allow infinite bucket
371 * recursion. This cookie will even persist to frees of unused
372 * buckets via the allocation path or bucket allocations in the
373 * free path.
374 */
375 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
376 udata = (void *)(uintptr_t)zone->uz_flags;
377 else {
378 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
379 return (NULL);
380 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
381 }
382 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
383 flags |= M_NOVM;
384 ubz = bucket_zone_lookup(zone->uz_count);
385 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
386 ubz++;
387 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
388 if (bucket) {
389#ifdef INVARIANTS
390 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
391#endif
392 bucket->ub_cnt = 0;
393 bucket->ub_entries = ubz->ubz_entries;
394 }
395
396 return (bucket);
397}
398
399static void
400bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
401{
402 struct uma_bucket_zone *ubz;
403
404 KASSERT(bucket->ub_cnt == 0,
405 ("bucket_free: Freeing a non free bucket."));
406 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
407 udata = (void *)(uintptr_t)zone->uz_flags;
408 ubz = bucket_zone_lookup(bucket->ub_entries);
409 uma_zfree_arg(ubz->ubz_zone, bucket, udata);
410}
411
412static void
413bucket_zone_drain(void)
414{
415 struct uma_bucket_zone *ubz;
416
417 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
418 zone_drain(ubz->ubz_zone);
419}
420
421static void
422zone_log_warning(uma_zone_t zone)
423{
424 static const struct timeval warninterval = { 300, 0 };
425
426 if (!zone_warnings || zone->uz_warning == NULL)
427 return;
428
429 if (ratecheck(&zone->uz_ratecheck, &warninterval))
430 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
431}
432
433static inline void
434zone_maxaction(uma_zone_t zone)
435{
436
437 if (zone->uz_maxaction.ta_func != NULL)
438 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
439}
440
441static void
442zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
443{
444 uma_klink_t klink;
445
446 LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
447 kegfn(klink->kl_keg);
448}
449
450/*
451 * Routine called by timeout which is used to fire off some time interval
452 * based calculations. (stats, hash size, etc.)
453 *
454 * Arguments:
455 * arg Unused
456 *
457 * Returns:
458 * Nothing
459 */
460static void
461uma_timeout(void *unused)
462{
463 bucket_enable();
464 zone_foreach(zone_timeout);
465
466 /* Reschedule this event */
467 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
468}
469
470/*
471 * Routine to perform timeout driven calculations. This expands the
472 * hashes and does per cpu statistics aggregation.
473 *
474 * Returns nothing.
475 */
476static void
477keg_timeout(uma_keg_t keg)
478{
479
480 KEG_LOCK(keg);
481 /*
482 * Expand the keg hash table.
483 *
484 * This is done if the number of slabs is larger than the hash size.
485 * What I'm trying to do here is completely reduce collisions. This
486 * may be a little aggressive. Should I allow for two collisions max?
487 */
488 if (keg->uk_flags & UMA_ZONE_HASH &&
489 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
490 struct uma_hash newhash;
491 struct uma_hash oldhash;
492 int ret;
493
494 /*
495 * This is so involved because allocating and freeing
496 * while the keg lock is held will lead to deadlock.
497 * I have to do everything in stages and check for
498 * races.
499 */
500 newhash = keg->uk_hash;
501 KEG_UNLOCK(keg);
502 ret = hash_alloc(&newhash);
503 KEG_LOCK(keg);
504 if (ret) {
505 if (hash_expand(&keg->uk_hash, &newhash)) {
506 oldhash = keg->uk_hash;
507 keg->uk_hash = newhash;
508 } else
509 oldhash = newhash;
510
511 KEG_UNLOCK(keg);
512 hash_free(&oldhash);
513 return;
514 }
515 }
516 KEG_UNLOCK(keg);
517}
518
519static void
520zone_timeout(uma_zone_t zone)
521{
522
523 zone_foreach_keg(zone, &keg_timeout);
524}
525
526/*
527 * Allocate and zero fill the next sized hash table from the appropriate
528 * backing store.
529 *
530 * Arguments:
531 * hash A new hash structure with the old hash size in uh_hashsize
532 *
533 * Returns:
534 * 1 on sucess and 0 on failure.
535 */
536static int
537hash_alloc(struct uma_hash *hash)
538{
539 int oldsize;
540 int alloc;
541
542 oldsize = hash->uh_hashsize;
543
544 /* We're just going to go to a power of two greater */
545 if (oldsize) {
546 hash->uh_hashsize = oldsize * 2;
547 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
548 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
549 M_UMAHASH, M_NOWAIT);
550 } else {
551 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
552 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
553 M_WAITOK);
554 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
555 }
556 if (hash->uh_slab_hash) {
557 bzero(hash->uh_slab_hash, alloc);
558 hash->uh_hashmask = hash->uh_hashsize - 1;
559 return (1);
560 }
561
562 return (0);
563}
564
565/*
566 * Expands the hash table for HASH zones. This is done from zone_timeout
567 * to reduce collisions. This must not be done in the regular allocation
568 * path, otherwise, we can recurse on the vm while allocating pages.
569 *
570 * Arguments:
571 * oldhash The hash you want to expand
572 * newhash The hash structure for the new table
573 *
574 * Returns:
575 * Nothing
576 *
577 * Discussion:
578 */
579static int
580hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
581{
582 uma_slab_t slab;
583 int hval;
584 int i;
585
586 if (!newhash->uh_slab_hash)
587 return (0);
588
589 if (oldhash->uh_hashsize >= newhash->uh_hashsize)
590 return (0);
591
592 /*
593 * I need to investigate hash algorithms for resizing without a
594 * full rehash.
595 */
596
597 for (i = 0; i < oldhash->uh_hashsize; i++)
598 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
599 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
600 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
601 hval = UMA_HASH(newhash, slab->us_data);
602 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
603 slab, us_hlink);
604 }
605
606 return (1);
607}
608
609/*
610 * Free the hash bucket to the appropriate backing store.
611 *
612 * Arguments:
613 * slab_hash The hash bucket we're freeing
614 * hashsize The number of entries in that hash bucket
615 *
616 * Returns:
617 * Nothing
618 */
619static void
620hash_free(struct uma_hash *hash)
621{
622 if (hash->uh_slab_hash == NULL)
623 return;
624 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
625 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
626 else
627 free(hash->uh_slab_hash, M_UMAHASH);
628}
629
630/*
631 * Frees all outstanding items in a bucket
632 *
633 * Arguments:
634 * zone The zone to free to, must be unlocked.
635 * bucket The free/alloc bucket with items, cpu queue must be locked.
636 *
637 * Returns:
638 * Nothing
639 */
640
641static void
642bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
643{
644 int i;
645
646 if (bucket == NULL)
647 return;
648
649 if (zone->uz_fini)
650 for (i = 0; i < bucket->ub_cnt; i++)
651 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
652 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
653 bucket->ub_cnt = 0;
654}
655
656/*
657 * Drains the per cpu caches for a zone.
658 *
659 * NOTE: This may only be called while the zone is being turn down, and not
660 * during normal operation. This is necessary in order that we do not have
661 * to migrate CPUs to drain the per-CPU caches.
662 *
663 * Arguments:
664 * zone The zone to drain, must be unlocked.
665 *
666 * Returns:
667 * Nothing
668 */
669static void
670cache_drain(uma_zone_t zone)
671{
672 uma_cache_t cache;
673 int cpu;
674
675 /*
676 * XXX: It is safe to not lock the per-CPU caches, because we're
677 * tearing down the zone anyway. I.e., there will be no further use
678 * of the caches at this point.
679 *
680 * XXX: It would good to be able to assert that the zone is being
681 * torn down to prevent improper use of cache_drain().
682 *
683 * XXX: We lock the zone before passing into bucket_cache_drain() as
684 * it is used elsewhere. Should the tear-down path be made special
685 * there in some form?
686 */
687 CPU_FOREACH(cpu) {
688 cache = &zone->uz_cpu[cpu];
689 bucket_drain(zone, cache->uc_allocbucket);
690 bucket_drain(zone, cache->uc_freebucket);
691 if (cache->uc_allocbucket != NULL)
692 bucket_free(zone, cache->uc_allocbucket, NULL);
693 if (cache->uc_freebucket != NULL)
694 bucket_free(zone, cache->uc_freebucket, NULL);
695 cache->uc_allocbucket = cache->uc_freebucket = NULL;
696 }
697 ZONE_LOCK(zone);
698 bucket_cache_drain(zone);
699 ZONE_UNLOCK(zone);
700}
701
702static void
703cache_shrink(uma_zone_t zone)
704{
705
706 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
707 return;
708
709 ZONE_LOCK(zone);
710 zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
711 ZONE_UNLOCK(zone);
712}
713
714static void
715cache_drain_safe_cpu(uma_zone_t zone)
716{
717 uma_cache_t cache;
718 uma_bucket_t b1, b2;
719
720 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
721 return;
722
723 b1 = b2 = NULL;
724 ZONE_LOCK(zone);
725 critical_enter();
726 cache = &zone->uz_cpu[curcpu];
727 if (cache->uc_allocbucket) {
728 if (cache->uc_allocbucket->ub_cnt != 0)
729 LIST_INSERT_HEAD(&zone->uz_buckets,
730 cache->uc_allocbucket, ub_link);
731 else
732 b1 = cache->uc_allocbucket;
733 cache->uc_allocbucket = NULL;
734 }
735 if (cache->uc_freebucket) {
736 if (cache->uc_freebucket->ub_cnt != 0)
737 LIST_INSERT_HEAD(&zone->uz_buckets,
738 cache->uc_freebucket, ub_link);
739 else
740 b2 = cache->uc_freebucket;
741 cache->uc_freebucket = NULL;
742 }
743 critical_exit();
744 ZONE_UNLOCK(zone);
745 if (b1)
746 bucket_free(zone, b1, NULL);
747 if (b2)
748 bucket_free(zone, b2, NULL);
749}
750
751/*
752 * Safely drain per-CPU caches of a zone(s) to alloc bucket.
753 * This is an expensive call because it needs to bind to all CPUs
754 * one by one and enter a critical section on each of them in order
755 * to safely access their cache buckets.
756 * Zone lock must not be held on call this function.
757 */
758static void
759cache_drain_safe(uma_zone_t zone)
760{
761 int cpu;
762
763 /*
764 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
765 */
766 if (zone)
767 cache_shrink(zone);
768 else
769 zone_foreach(cache_shrink);
770
771 CPU_FOREACH(cpu) {
772 thread_lock(curthread);
773 sched_bind(curthread, cpu);
774 thread_unlock(curthread);
775
776 if (zone)
777 cache_drain_safe_cpu(zone);
778 else
779 zone_foreach(cache_drain_safe_cpu);
780 }
781 thread_lock(curthread);
782 sched_unbind(curthread);
783 thread_unlock(curthread);
784}
785
786/*
787 * Drain the cached buckets from a zone. Expects a locked zone on entry.
788 */
789static void
790bucket_cache_drain(uma_zone_t zone)
791{
792 uma_bucket_t bucket;
793
794 /*
795 * Drain the bucket queues and free the buckets, we just keep two per
796 * cpu (alloc/free).
797 */
798 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
799 LIST_REMOVE(bucket, ub_link);
800 ZONE_UNLOCK(zone);
801 bucket_drain(zone, bucket);
802 bucket_free(zone, bucket, NULL);
803 ZONE_LOCK(zone);
804 }
805
806 /*
807 * Shrink further bucket sizes. Price of single zone lock collision
808 * is probably lower then price of global cache drain.
809 */
810 if (zone->uz_count > zone->uz_count_min)
811 zone->uz_count--;
812}
813
814static void
815keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
816{
817 uint8_t *mem;
818 int i;
819 uint8_t flags;
820
821 mem = slab->us_data;
822 flags = slab->us_flags;
823 i = start;
824 if (keg->uk_fini != NULL) {
825 for (i--; i > -1; i--)
826 keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
827 keg->uk_size);
828 }
829 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
830 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
831#ifdef UMA_DEBUG
832 printf("%s: Returning %d bytes.\n", keg->uk_name,
833 PAGE_SIZE * keg->uk_ppera);
834#endif
835 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
836}
837
838/*
839 * Frees pages from a keg back to the system. This is done on demand from
840 * the pageout daemon.
841 *
842 * Returns nothing.
843 */
844static void
845keg_drain(uma_keg_t keg)
846{
847 struct slabhead freeslabs = { 0 };
848 uma_slab_t slab;
849 uma_slab_t n;
850
851 /*
852 * We don't want to take pages from statically allocated kegs at this
853 * time
854 */
855 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
856 return;
857
858#ifdef UMA_DEBUG
859 printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
860#endif
861 KEG_LOCK(keg);
862 if (keg->uk_free == 0)
863 goto finished;
864
865 slab = LIST_FIRST(&keg->uk_free_slab);
866 while (slab) {
867 n = LIST_NEXT(slab, us_link);
868
869 /* We have no where to free these to */
870 if (slab->us_flags & UMA_SLAB_BOOT) {
871 slab = n;
872 continue;
873 }
874
875 LIST_REMOVE(slab, us_link);
876 keg->uk_pages -= keg->uk_ppera;
877 keg->uk_free -= keg->uk_ipers;
878
879 if (keg->uk_flags & UMA_ZONE_HASH)
880 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
881
882 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
883
884 slab = n;
885 }
886finished:
887 KEG_UNLOCK(keg);
888
889 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
890 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
891 keg_free_slab(keg, slab, keg->uk_ipers);
892 }
893}
894
895static void
896zone_drain_wait(uma_zone_t zone, int waitok)
897{
898
899 /*
900 * Set draining to interlock with zone_dtor() so we can release our
901 * locks as we go. Only dtor() should do a WAITOK call since it
902 * is the only call that knows the structure will still be available
903 * when it wakes up.
904 */
905 ZONE_LOCK(zone);
906 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
907 if (waitok == M_NOWAIT)
908 goto out;
909 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
910 }
911 zone->uz_flags |= UMA_ZFLAG_DRAINING;
912 bucket_cache_drain(zone);
913 ZONE_UNLOCK(zone);
914 /*
915 * The DRAINING flag protects us from being freed while
916 * we're running. Normally the uma_rwlock would protect us but we
917 * must be able to release and acquire the right lock for each keg.
918 */
919 zone_foreach_keg(zone, &keg_drain);
920 ZONE_LOCK(zone);
921 zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
922 wakeup(zone);
923out:
924 ZONE_UNLOCK(zone);
925}
926
927void
928zone_drain(uma_zone_t zone)
929{
930
931 zone_drain_wait(zone, M_NOWAIT);
932}
933
934/*
935 * Allocate a new slab for a keg. This does not insert the slab onto a list.
936 *
937 * Arguments:
938 * wait Shall we wait?
939 *
940 * Returns:
941 * The slab that was allocated or NULL if there is no memory and the
942 * caller specified M_NOWAIT.
943 */
944static uma_slab_t
945keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
946{
954 uma_slabrefcnt_t slabref;
955 uma_alloc allocf;
956 uma_slab_t slab;
957 uint8_t *mem;
958 uint8_t flags;
959 int i;
960
961 mtx_assert(&keg->uk_lock, MA_OWNED);
962 slab = NULL;
963 mem = NULL;
964
965#ifdef UMA_DEBUG
966 printf("alloc_slab: Allocating a new slab for %s\n", keg->uk_name);
967#endif
968 allocf = keg->uk_allocf;
969 KEG_UNLOCK(keg);
970
971 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
972 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
973 if (slab == NULL)
974 goto out;
975 }
976
977 /*
978 * This reproduces the old vm_zone behavior of zero filling pages the
979 * first time they are added to a zone.
980 *
981 * Malloced items are zeroed in uma_zalloc.
982 */
983
984 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
985 wait |= M_ZERO;
986 else
987 wait &= ~M_ZERO;
988
989 if (keg->uk_flags & UMA_ZONE_NODUMP)
990 wait |= M_NODUMP;
991
992 /* zone is passed for legacy reasons. */
993 mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
994 if (mem == NULL) {
995 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
996 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
997 slab = NULL;
998 goto out;
999 }
1000
1001 /* Point the slab into the allocated memory */
1002 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1003 slab = (uma_slab_t )(mem + keg->uk_pgoff);
1004
1005 if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1006 for (i = 0; i < keg->uk_ppera; i++)
1007 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1008
1009 slab->us_keg = keg;
1010 slab->us_data = mem;
1011 slab->us_freecount = keg->uk_ipers;
1012 slab->us_flags = flags;
1013 BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1014#ifdef INVARIANTS
1015 BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1016#endif
947 uma_alloc allocf;
948 uma_slab_t slab;
949 uint8_t *mem;
950 uint8_t flags;
951 int i;
952
953 mtx_assert(&keg->uk_lock, MA_OWNED);
954 slab = NULL;
955 mem = NULL;
956
957#ifdef UMA_DEBUG
958 printf("alloc_slab: Allocating a new slab for %s\n", keg->uk_name);
959#endif
960 allocf = keg->uk_allocf;
961 KEG_UNLOCK(keg);
962
963 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
964 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
965 if (slab == NULL)
966 goto out;
967 }
968
969 /*
970 * This reproduces the old vm_zone behavior of zero filling pages the
971 * first time they are added to a zone.
972 *
973 * Malloced items are zeroed in uma_zalloc.
974 */
975
976 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
977 wait |= M_ZERO;
978 else
979 wait &= ~M_ZERO;
980
981 if (keg->uk_flags & UMA_ZONE_NODUMP)
982 wait |= M_NODUMP;
983
984 /* zone is passed for legacy reasons. */
985 mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
986 if (mem == NULL) {
987 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
988 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
989 slab = NULL;
990 goto out;
991 }
992
993 /* Point the slab into the allocated memory */
994 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
995 slab = (uma_slab_t )(mem + keg->uk_pgoff);
996
997 if (keg->uk_flags & UMA_ZONE_VTOSLAB)
998 for (i = 0; i < keg->uk_ppera; i++)
999 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1000
1001 slab->us_keg = keg;
1002 slab->us_data = mem;
1003 slab->us_freecount = keg->uk_ipers;
1004 slab->us_flags = flags;
1005 BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1006#ifdef INVARIANTS
1007 BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1008#endif
1017 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1018 slabref = (uma_slabrefcnt_t)slab;
1019 for (i = 0; i < keg->uk_ipers; i++)
1020 slabref->us_refcnt[i] = 0;
1021 }
1022
1023 if (keg->uk_init != NULL) {
1024 for (i = 0; i < keg->uk_ipers; i++)
1025 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1026 keg->uk_size, wait) != 0)
1027 break;
1028 if (i != keg->uk_ipers) {
1029 keg_free_slab(keg, slab, i);
1030 slab = NULL;
1031 goto out;
1032 }
1033 }
1034out:
1035 KEG_LOCK(keg);
1036
1037 if (slab != NULL) {
1038 if (keg->uk_flags & UMA_ZONE_HASH)
1039 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1040
1041 keg->uk_pages += keg->uk_ppera;
1042 keg->uk_free += keg->uk_ipers;
1043 }
1044
1045 return (slab);
1046}
1047
1048/*
1049 * This function is intended to be used early on in place of page_alloc() so
1050 * that we may use the boot time page cache to satisfy allocations before
1051 * the VM is ready.
1052 */
1053static void *
1054startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1055{
1056 uma_keg_t keg;
1057 uma_slab_t tmps;
1058 int pages, check_pages;
1059
1060 keg = zone_first_keg(zone);
1061 pages = howmany(bytes, PAGE_SIZE);
1062 check_pages = pages - 1;
1063 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1064
1065 /*
1066 * Check our small startup cache to see if it has pages remaining.
1067 */
1068 mtx_lock(&uma_boot_pages_mtx);
1069
1070 /* First check if we have enough room. */
1071 tmps = LIST_FIRST(&uma_boot_pages);
1072 while (tmps != NULL && check_pages-- > 0)
1073 tmps = LIST_NEXT(tmps, us_link);
1074 if (tmps != NULL) {
1075 /*
1076 * It's ok to lose tmps references. The last one will
1077 * have tmps->us_data pointing to the start address of
1078 * "pages" contiguous pages of memory.
1079 */
1080 while (pages-- > 0) {
1081 tmps = LIST_FIRST(&uma_boot_pages);
1082 LIST_REMOVE(tmps, us_link);
1083 }
1084 mtx_unlock(&uma_boot_pages_mtx);
1085 *pflag = tmps->us_flags;
1086 return (tmps->us_data);
1087 }
1088 mtx_unlock(&uma_boot_pages_mtx);
1089 if (booted < UMA_STARTUP2)
1090 panic("UMA: Increase vm.boot_pages");
1091 /*
1092 * Now that we've booted reset these users to their real allocator.
1093 */
1094#ifdef UMA_MD_SMALL_ALLOC
1095 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1096#else
1097 keg->uk_allocf = page_alloc;
1098#endif
1099 return keg->uk_allocf(zone, bytes, pflag, wait);
1100}
1101
1102/*
1103 * Allocates a number of pages from the system
1104 *
1105 * Arguments:
1106 * bytes The number of bytes requested
1107 * wait Shall we wait?
1108 *
1109 * Returns:
1110 * A pointer to the alloced memory or possibly
1111 * NULL if M_NOWAIT is set.
1112 */
1113static void *
1114page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1115{
1116 void *p; /* Returned page */
1117
1118 *pflag = UMA_SLAB_KMEM;
1119 p = (void *) kmem_malloc(kmem_arena, bytes, wait);
1120
1121 return (p);
1122}
1123
1124/*
1125 * Allocates a number of pages from within an object
1126 *
1127 * Arguments:
1128 * bytes The number of bytes requested
1129 * wait Shall we wait?
1130 *
1131 * Returns:
1132 * A pointer to the alloced memory or possibly
1133 * NULL if M_NOWAIT is set.
1134 */
1135static void *
1136noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
1137{
1138 TAILQ_HEAD(, vm_page) alloctail;
1139 u_long npages;
1140 vm_offset_t retkva, zkva;
1141 vm_page_t p, p_next;
1142 uma_keg_t keg;
1143
1144 TAILQ_INIT(&alloctail);
1145 keg = zone_first_keg(zone);
1146
1147 npages = howmany(bytes, PAGE_SIZE);
1148 while (npages > 0) {
1149 p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1150 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1151 if (p != NULL) {
1152 /*
1153 * Since the page does not belong to an object, its
1154 * listq is unused.
1155 */
1156 TAILQ_INSERT_TAIL(&alloctail, p, listq);
1157 npages--;
1158 continue;
1159 }
1160 if (wait & M_WAITOK) {
1161 VM_WAIT;
1162 continue;
1163 }
1164
1165 /*
1166 * Page allocation failed, free intermediate pages and
1167 * exit.
1168 */
1169 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1170 vm_page_unwire(p, PQ_NONE);
1171 vm_page_free(p);
1172 }
1173 return (NULL);
1174 }
1175 *flags = UMA_SLAB_PRIV;
1176 zkva = keg->uk_kva +
1177 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1178 retkva = zkva;
1179 TAILQ_FOREACH(p, &alloctail, listq) {
1180 pmap_qenter(zkva, &p, 1);
1181 zkva += PAGE_SIZE;
1182 }
1183
1184 return ((void *)retkva);
1185}
1186
1187/*
1188 * Frees a number of pages to the system
1189 *
1190 * Arguments:
1191 * mem A pointer to the memory to be freed
1192 * size The size of the memory being freed
1193 * flags The original p->us_flags field
1194 *
1195 * Returns:
1196 * Nothing
1197 */
1198static void
1199page_free(void *mem, vm_size_t size, uint8_t flags)
1200{
1201 struct vmem *vmem;
1202
1203 if (flags & UMA_SLAB_KMEM)
1204 vmem = kmem_arena;
1205 else if (flags & UMA_SLAB_KERNEL)
1206 vmem = kernel_arena;
1207 else
1208 panic("UMA: page_free used with invalid flags %d", flags);
1209
1210 kmem_free(vmem, (vm_offset_t)mem, size);
1211}
1212
1213/*
1214 * Zero fill initializer
1215 *
1216 * Arguments/Returns follow uma_init specifications
1217 */
1218static int
1219zero_init(void *mem, int size, int flags)
1220{
1221 bzero(mem, size);
1222 return (0);
1223}
1224
1225/*
1226 * Finish creating a small uma keg. This calculates ipers, and the keg size.
1227 *
1228 * Arguments
1229 * keg The zone we should initialize
1230 *
1231 * Returns
1232 * Nothing
1233 */
1234static void
1235keg_small_init(uma_keg_t keg)
1236{
1237 u_int rsize;
1238 u_int memused;
1239 u_int wastedspace;
1240 u_int shsize;
1241
1242 if (keg->uk_flags & UMA_ZONE_PCPU) {
1243 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1244
1245 keg->uk_slabsize = sizeof(struct pcpu);
1246 keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1247 PAGE_SIZE);
1248 } else {
1249 keg->uk_slabsize = UMA_SLAB_SIZE;
1250 keg->uk_ppera = 1;
1251 }
1252
1253 /*
1254 * Calculate the size of each allocation (rsize) according to
1255 * alignment. If the requested size is smaller than we have
1256 * allocation bits for we round it up.
1257 */
1258 rsize = keg->uk_size;
1259 if (rsize < keg->uk_slabsize / SLAB_SETSIZE)
1260 rsize = keg->uk_slabsize / SLAB_SETSIZE;
1261 if (rsize & keg->uk_align)
1262 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1263 keg->uk_rsize = rsize;
1264
1265 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1266 keg->uk_rsize < sizeof(struct pcpu),
1267 ("%s: size %u too large", __func__, keg->uk_rsize));
1268
1009
1010 if (keg->uk_init != NULL) {
1011 for (i = 0; i < keg->uk_ipers; i++)
1012 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1013 keg->uk_size, wait) != 0)
1014 break;
1015 if (i != keg->uk_ipers) {
1016 keg_free_slab(keg, slab, i);
1017 slab = NULL;
1018 goto out;
1019 }
1020 }
1021out:
1022 KEG_LOCK(keg);
1023
1024 if (slab != NULL) {
1025 if (keg->uk_flags & UMA_ZONE_HASH)
1026 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1027
1028 keg->uk_pages += keg->uk_ppera;
1029 keg->uk_free += keg->uk_ipers;
1030 }
1031
1032 return (slab);
1033}
1034
1035/*
1036 * This function is intended to be used early on in place of page_alloc() so
1037 * that we may use the boot time page cache to satisfy allocations before
1038 * the VM is ready.
1039 */
1040static void *
1041startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1042{
1043 uma_keg_t keg;
1044 uma_slab_t tmps;
1045 int pages, check_pages;
1046
1047 keg = zone_first_keg(zone);
1048 pages = howmany(bytes, PAGE_SIZE);
1049 check_pages = pages - 1;
1050 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1051
1052 /*
1053 * Check our small startup cache to see if it has pages remaining.
1054 */
1055 mtx_lock(&uma_boot_pages_mtx);
1056
1057 /* First check if we have enough room. */
1058 tmps = LIST_FIRST(&uma_boot_pages);
1059 while (tmps != NULL && check_pages-- > 0)
1060 tmps = LIST_NEXT(tmps, us_link);
1061 if (tmps != NULL) {
1062 /*
1063 * It's ok to lose tmps references. The last one will
1064 * have tmps->us_data pointing to the start address of
1065 * "pages" contiguous pages of memory.
1066 */
1067 while (pages-- > 0) {
1068 tmps = LIST_FIRST(&uma_boot_pages);
1069 LIST_REMOVE(tmps, us_link);
1070 }
1071 mtx_unlock(&uma_boot_pages_mtx);
1072 *pflag = tmps->us_flags;
1073 return (tmps->us_data);
1074 }
1075 mtx_unlock(&uma_boot_pages_mtx);
1076 if (booted < UMA_STARTUP2)
1077 panic("UMA: Increase vm.boot_pages");
1078 /*
1079 * Now that we've booted reset these users to their real allocator.
1080 */
1081#ifdef UMA_MD_SMALL_ALLOC
1082 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1083#else
1084 keg->uk_allocf = page_alloc;
1085#endif
1086 return keg->uk_allocf(zone, bytes, pflag, wait);
1087}
1088
1089/*
1090 * Allocates a number of pages from the system
1091 *
1092 * Arguments:
1093 * bytes The number of bytes requested
1094 * wait Shall we wait?
1095 *
1096 * Returns:
1097 * A pointer to the alloced memory or possibly
1098 * NULL if M_NOWAIT is set.
1099 */
1100static void *
1101page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1102{
1103 void *p; /* Returned page */
1104
1105 *pflag = UMA_SLAB_KMEM;
1106 p = (void *) kmem_malloc(kmem_arena, bytes, wait);
1107
1108 return (p);
1109}
1110
1111/*
1112 * Allocates a number of pages from within an object
1113 *
1114 * Arguments:
1115 * bytes The number of bytes requested
1116 * wait Shall we wait?
1117 *
1118 * Returns:
1119 * A pointer to the alloced memory or possibly
1120 * NULL if M_NOWAIT is set.
1121 */
1122static void *
1123noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
1124{
1125 TAILQ_HEAD(, vm_page) alloctail;
1126 u_long npages;
1127 vm_offset_t retkva, zkva;
1128 vm_page_t p, p_next;
1129 uma_keg_t keg;
1130
1131 TAILQ_INIT(&alloctail);
1132 keg = zone_first_keg(zone);
1133
1134 npages = howmany(bytes, PAGE_SIZE);
1135 while (npages > 0) {
1136 p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1137 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1138 if (p != NULL) {
1139 /*
1140 * Since the page does not belong to an object, its
1141 * listq is unused.
1142 */
1143 TAILQ_INSERT_TAIL(&alloctail, p, listq);
1144 npages--;
1145 continue;
1146 }
1147 if (wait & M_WAITOK) {
1148 VM_WAIT;
1149 continue;
1150 }
1151
1152 /*
1153 * Page allocation failed, free intermediate pages and
1154 * exit.
1155 */
1156 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1157 vm_page_unwire(p, PQ_NONE);
1158 vm_page_free(p);
1159 }
1160 return (NULL);
1161 }
1162 *flags = UMA_SLAB_PRIV;
1163 zkva = keg->uk_kva +
1164 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1165 retkva = zkva;
1166 TAILQ_FOREACH(p, &alloctail, listq) {
1167 pmap_qenter(zkva, &p, 1);
1168 zkva += PAGE_SIZE;
1169 }
1170
1171 return ((void *)retkva);
1172}
1173
1174/*
1175 * Frees a number of pages to the system
1176 *
1177 * Arguments:
1178 * mem A pointer to the memory to be freed
1179 * size The size of the memory being freed
1180 * flags The original p->us_flags field
1181 *
1182 * Returns:
1183 * Nothing
1184 */
1185static void
1186page_free(void *mem, vm_size_t size, uint8_t flags)
1187{
1188 struct vmem *vmem;
1189
1190 if (flags & UMA_SLAB_KMEM)
1191 vmem = kmem_arena;
1192 else if (flags & UMA_SLAB_KERNEL)
1193 vmem = kernel_arena;
1194 else
1195 panic("UMA: page_free used with invalid flags %d", flags);
1196
1197 kmem_free(vmem, (vm_offset_t)mem, size);
1198}
1199
1200/*
1201 * Zero fill initializer
1202 *
1203 * Arguments/Returns follow uma_init specifications
1204 */
1205static int
1206zero_init(void *mem, int size, int flags)
1207{
1208 bzero(mem, size);
1209 return (0);
1210}
1211
1212/*
1213 * Finish creating a small uma keg. This calculates ipers, and the keg size.
1214 *
1215 * Arguments
1216 * keg The zone we should initialize
1217 *
1218 * Returns
1219 * Nothing
1220 */
1221static void
1222keg_small_init(uma_keg_t keg)
1223{
1224 u_int rsize;
1225 u_int memused;
1226 u_int wastedspace;
1227 u_int shsize;
1228
1229 if (keg->uk_flags & UMA_ZONE_PCPU) {
1230 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1231
1232 keg->uk_slabsize = sizeof(struct pcpu);
1233 keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1234 PAGE_SIZE);
1235 } else {
1236 keg->uk_slabsize = UMA_SLAB_SIZE;
1237 keg->uk_ppera = 1;
1238 }
1239
1240 /*
1241 * Calculate the size of each allocation (rsize) according to
1242 * alignment. If the requested size is smaller than we have
1243 * allocation bits for we round it up.
1244 */
1245 rsize = keg->uk_size;
1246 if (rsize < keg->uk_slabsize / SLAB_SETSIZE)
1247 rsize = keg->uk_slabsize / SLAB_SETSIZE;
1248 if (rsize & keg->uk_align)
1249 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1250 keg->uk_rsize = rsize;
1251
1252 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1253 keg->uk_rsize < sizeof(struct pcpu),
1254 ("%s: size %u too large", __func__, keg->uk_rsize));
1255
1269 if (keg->uk_flags & UMA_ZONE_REFCNT)
1270 rsize += sizeof(uint32_t);
1271
1272 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1273 shsize = 0;
1274 else
1275 shsize = sizeof(struct uma_slab);
1276
1277 keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize;
1278 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1279 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1280
1281 memused = keg->uk_ipers * rsize + shsize;
1282 wastedspace = keg->uk_slabsize - memused;
1283
1284 /*
1285 * We can't do OFFPAGE if we're internal or if we've been
1286 * asked to not go to the VM for buckets. If we do this we
1287 * may end up going to the VM for slabs which we do not
1288 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1289 * of UMA_ZONE_VM, which clearly forbids it.
1290 */
1291 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1292 (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1293 return;
1294
1295 /*
1296 * See if using an OFFPAGE slab will limit our waste. Only do
1297 * this if it permits more items per-slab.
1298 *
1299 * XXX We could try growing slabsize to limit max waste as well.
1300 * Historically this was not done because the VM could not
1301 * efficiently handle contiguous allocations.
1302 */
1303 if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) &&
1304 (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) {
1305 keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize;
1306 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1307 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1308#ifdef UMA_DEBUG
1309 printf("UMA decided we need offpage slab headers for "
1310 "keg: %s, calculated wastedspace = %d, "
1311 "maximum wasted space allowed = %d, "
1312 "calculated ipers = %d, "
1313 "new wasted space = %d\n", keg->uk_name, wastedspace,
1314 keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1315 keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize);
1316#endif
1317 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1318 }
1319
1320 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1321 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1322 keg->uk_flags |= UMA_ZONE_HASH;
1323}
1324
1325/*
1326 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
1327 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
1328 * more complicated.
1329 *
1330 * Arguments
1331 * keg The keg we should initialize
1332 *
1333 * Returns
1334 * Nothing
1335 */
1336static void
1337keg_large_init(uma_keg_t keg)
1338{
1339 u_int shsize;
1340
1341 KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1342 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1343 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1344 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1345 ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1346
1347 keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1348 keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE;
1349 keg->uk_ipers = 1;
1350 keg->uk_rsize = keg->uk_size;
1351
1352 /* We can't do OFFPAGE if we're internal, bail out here. */
1353 if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1354 return;
1355
1356 /* Check whether we have enough space to not do OFFPAGE. */
1357 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1358 shsize = sizeof(struct uma_slab);
1256 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1257 shsize = 0;
1258 else
1259 shsize = sizeof(struct uma_slab);
1260
1261 keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize;
1262 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1263 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1264
1265 memused = keg->uk_ipers * rsize + shsize;
1266 wastedspace = keg->uk_slabsize - memused;
1267
1268 /*
1269 * We can't do OFFPAGE if we're internal or if we've been
1270 * asked to not go to the VM for buckets. If we do this we
1271 * may end up going to the VM for slabs which we do not
1272 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1273 * of UMA_ZONE_VM, which clearly forbids it.
1274 */
1275 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1276 (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1277 return;
1278
1279 /*
1280 * See if using an OFFPAGE slab will limit our waste. Only do
1281 * this if it permits more items per-slab.
1282 *
1283 * XXX We could try growing slabsize to limit max waste as well.
1284 * Historically this was not done because the VM could not
1285 * efficiently handle contiguous allocations.
1286 */
1287 if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) &&
1288 (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) {
1289 keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize;
1290 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1291 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1292#ifdef UMA_DEBUG
1293 printf("UMA decided we need offpage slab headers for "
1294 "keg: %s, calculated wastedspace = %d, "
1295 "maximum wasted space allowed = %d, "
1296 "calculated ipers = %d, "
1297 "new wasted space = %d\n", keg->uk_name, wastedspace,
1298 keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1299 keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize);
1300#endif
1301 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1302 }
1303
1304 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1305 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1306 keg->uk_flags |= UMA_ZONE_HASH;
1307}
1308
1309/*
1310 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
1311 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
1312 * more complicated.
1313 *
1314 * Arguments
1315 * keg The keg we should initialize
1316 *
1317 * Returns
1318 * Nothing
1319 */
1320static void
1321keg_large_init(uma_keg_t keg)
1322{
1323 u_int shsize;
1324
1325 KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1326 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1327 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1328 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1329 ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1330
1331 keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1332 keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE;
1333 keg->uk_ipers = 1;
1334 keg->uk_rsize = keg->uk_size;
1335
1336 /* We can't do OFFPAGE if we're internal, bail out here. */
1337 if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1338 return;
1339
1340 /* Check whether we have enough space to not do OFFPAGE. */
1341 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1342 shsize = sizeof(struct uma_slab);
1359 if (keg->uk_flags & UMA_ZONE_REFCNT)
1360 shsize += keg->uk_ipers * sizeof(uint32_t);
1361 if (shsize & UMA_ALIGN_PTR)
1362 shsize = (shsize & ~UMA_ALIGN_PTR) +
1363 (UMA_ALIGN_PTR + 1);
1364
1365 if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize)
1366 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1367 }
1368
1369 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1370 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1371 keg->uk_flags |= UMA_ZONE_HASH;
1372}
1373
1374static void
1375keg_cachespread_init(uma_keg_t keg)
1376{
1377 int alignsize;
1378 int trailer;
1379 int pages;
1380 int rsize;
1381
1382 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1383 ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1384
1385 alignsize = keg->uk_align + 1;
1386 rsize = keg->uk_size;
1387 /*
1388 * We want one item to start on every align boundary in a page. To
1389 * do this we will span pages. We will also extend the item by the
1390 * size of align if it is an even multiple of align. Otherwise, it
1391 * would fall on the same boundary every time.
1392 */
1393 if (rsize & keg->uk_align)
1394 rsize = (rsize & ~keg->uk_align) + alignsize;
1395 if ((rsize & alignsize) == 0)
1396 rsize += alignsize;
1397 trailer = rsize - keg->uk_size;
1398 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1399 pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1400 keg->uk_rsize = rsize;
1401 keg->uk_ppera = pages;
1402 keg->uk_slabsize = UMA_SLAB_SIZE;
1403 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1404 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1405 KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1406 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1407 keg->uk_ipers));
1408}
1409
1410/*
1411 * Keg header ctor. This initializes all fields, locks, etc. And inserts
1412 * the keg onto the global keg list.
1413 *
1414 * Arguments/Returns follow uma_ctor specifications
1415 * udata Actually uma_kctor_args
1416 */
1417static int
1418keg_ctor(void *mem, int size, void *udata, int flags)
1419{
1420 struct uma_kctor_args *arg = udata;
1421 uma_keg_t keg = mem;
1422 uma_zone_t zone;
1423
1424 bzero(keg, size);
1425 keg->uk_size = arg->size;
1426 keg->uk_init = arg->uminit;
1427 keg->uk_fini = arg->fini;
1428 keg->uk_align = arg->align;
1429 keg->uk_free = 0;
1430 keg->uk_reserve = 0;
1431 keg->uk_pages = 0;
1432 keg->uk_flags = arg->flags;
1433 keg->uk_allocf = page_alloc;
1434 keg->uk_freef = page_free;
1435 keg->uk_slabzone = NULL;
1436
1437 /*
1438 * The master zone is passed to us at keg-creation time.
1439 */
1440 zone = arg->zone;
1441 keg->uk_name = zone->uz_name;
1442
1443 if (arg->flags & UMA_ZONE_VM)
1444 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1445
1446 if (arg->flags & UMA_ZONE_ZINIT)
1447 keg->uk_init = zero_init;
1448
1343 if (shsize & UMA_ALIGN_PTR)
1344 shsize = (shsize & ~UMA_ALIGN_PTR) +
1345 (UMA_ALIGN_PTR + 1);
1346
1347 if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize)
1348 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1349 }
1350
1351 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1352 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1353 keg->uk_flags |= UMA_ZONE_HASH;
1354}
1355
1356static void
1357keg_cachespread_init(uma_keg_t keg)
1358{
1359 int alignsize;
1360 int trailer;
1361 int pages;
1362 int rsize;
1363
1364 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1365 ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1366
1367 alignsize = keg->uk_align + 1;
1368 rsize = keg->uk_size;
1369 /*
1370 * We want one item to start on every align boundary in a page. To
1371 * do this we will span pages. We will also extend the item by the
1372 * size of align if it is an even multiple of align. Otherwise, it
1373 * would fall on the same boundary every time.
1374 */
1375 if (rsize & keg->uk_align)
1376 rsize = (rsize & ~keg->uk_align) + alignsize;
1377 if ((rsize & alignsize) == 0)
1378 rsize += alignsize;
1379 trailer = rsize - keg->uk_size;
1380 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1381 pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1382 keg->uk_rsize = rsize;
1383 keg->uk_ppera = pages;
1384 keg->uk_slabsize = UMA_SLAB_SIZE;
1385 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1386 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1387 KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1388 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1389 keg->uk_ipers));
1390}
1391
1392/*
1393 * Keg header ctor. This initializes all fields, locks, etc. And inserts
1394 * the keg onto the global keg list.
1395 *
1396 * Arguments/Returns follow uma_ctor specifications
1397 * udata Actually uma_kctor_args
1398 */
1399static int
1400keg_ctor(void *mem, int size, void *udata, int flags)
1401{
1402 struct uma_kctor_args *arg = udata;
1403 uma_keg_t keg = mem;
1404 uma_zone_t zone;
1405
1406 bzero(keg, size);
1407 keg->uk_size = arg->size;
1408 keg->uk_init = arg->uminit;
1409 keg->uk_fini = arg->fini;
1410 keg->uk_align = arg->align;
1411 keg->uk_free = 0;
1412 keg->uk_reserve = 0;
1413 keg->uk_pages = 0;
1414 keg->uk_flags = arg->flags;
1415 keg->uk_allocf = page_alloc;
1416 keg->uk_freef = page_free;
1417 keg->uk_slabzone = NULL;
1418
1419 /*
1420 * The master zone is passed to us at keg-creation time.
1421 */
1422 zone = arg->zone;
1423 keg->uk_name = zone->uz_name;
1424
1425 if (arg->flags & UMA_ZONE_VM)
1426 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1427
1428 if (arg->flags & UMA_ZONE_ZINIT)
1429 keg->uk_init = zero_init;
1430
1449 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1431 if (arg->flags & UMA_ZONE_MALLOC)
1450 keg->uk_flags |= UMA_ZONE_VTOSLAB;
1451
1452 if (arg->flags & UMA_ZONE_PCPU)
1453#ifdef SMP
1454 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1455#else
1456 keg->uk_flags &= ~UMA_ZONE_PCPU;
1457#endif
1458
1459 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1460 keg_cachespread_init(keg);
1432 keg->uk_flags |= UMA_ZONE_VTOSLAB;
1433
1434 if (arg->flags & UMA_ZONE_PCPU)
1435#ifdef SMP
1436 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1437#else
1438 keg->uk_flags &= ~UMA_ZONE_PCPU;
1439#endif
1440
1441 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1442 keg_cachespread_init(keg);
1461 } else if (keg->uk_flags & UMA_ZONE_REFCNT) {
1462 if (keg->uk_size >
1463 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) -
1464 sizeof(uint32_t)))
1465 keg_large_init(keg);
1466 else
1467 keg_small_init(keg);
1468 } else {
1469 if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1470 keg_large_init(keg);
1471 else
1472 keg_small_init(keg);
1473 }
1474
1443 } else {
1444 if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1445 keg_large_init(keg);
1446 else
1447 keg_small_init(keg);
1448 }
1449
1475 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1476 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1477 if (keg->uk_ipers > uma_max_ipers_ref)
1478 panic("Too many ref items per zone: %d > %d\n",
1479 keg->uk_ipers, uma_max_ipers_ref);
1480 keg->uk_slabzone = slabrefzone;
1481 } else
1482 keg->uk_slabzone = slabzone;
1483 }
1450 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1451 keg->uk_slabzone = slabzone;
1484
1485 /*
1486 * If we haven't booted yet we need allocations to go through the
1487 * startup cache until the vm is ready.
1488 */
1489 if (keg->uk_ppera == 1) {
1490#ifdef UMA_MD_SMALL_ALLOC
1491 keg->uk_allocf = uma_small_alloc;
1492 keg->uk_freef = uma_small_free;
1493
1494 if (booted < UMA_STARTUP)
1495 keg->uk_allocf = startup_alloc;
1496#else
1497 if (booted < UMA_STARTUP2)
1498 keg->uk_allocf = startup_alloc;
1499#endif
1500 } else if (booted < UMA_STARTUP2 &&
1501 (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1502 keg->uk_allocf = startup_alloc;
1503
1504 /*
1505 * Initialize keg's lock
1506 */
1507 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1508
1509 /*
1510 * If we're putting the slab header in the actual page we need to
1511 * figure out where in each page it goes. This calculates a right
1512 * justified offset into the memory on an ALIGN_PTR boundary.
1513 */
1514 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1515 u_int totsize;
1516
1517 /* Size of the slab struct and free list */
1518 totsize = sizeof(struct uma_slab);
1519
1452
1453 /*
1454 * If we haven't booted yet we need allocations to go through the
1455 * startup cache until the vm is ready.
1456 */
1457 if (keg->uk_ppera == 1) {
1458#ifdef UMA_MD_SMALL_ALLOC
1459 keg->uk_allocf = uma_small_alloc;
1460 keg->uk_freef = uma_small_free;
1461
1462 if (booted < UMA_STARTUP)
1463 keg->uk_allocf = startup_alloc;
1464#else
1465 if (booted < UMA_STARTUP2)
1466 keg->uk_allocf = startup_alloc;
1467#endif
1468 } else if (booted < UMA_STARTUP2 &&
1469 (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1470 keg->uk_allocf = startup_alloc;
1471
1472 /*
1473 * Initialize keg's lock
1474 */
1475 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1476
1477 /*
1478 * If we're putting the slab header in the actual page we need to
1479 * figure out where in each page it goes. This calculates a right
1480 * justified offset into the memory on an ALIGN_PTR boundary.
1481 */
1482 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1483 u_int totsize;
1484
1485 /* Size of the slab struct and free list */
1486 totsize = sizeof(struct uma_slab);
1487
1520 /* Size of the reference counts. */
1521 if (keg->uk_flags & UMA_ZONE_REFCNT)
1522 totsize += keg->uk_ipers * sizeof(uint32_t);
1523
1524 if (totsize & UMA_ALIGN_PTR)
1525 totsize = (totsize & ~UMA_ALIGN_PTR) +
1526 (UMA_ALIGN_PTR + 1);
1527 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1528
1529 /*
1530 * The only way the following is possible is if with our
1531 * UMA_ALIGN_PTR adjustments we are now bigger than
1532 * UMA_SLAB_SIZE. I haven't checked whether this is
1533 * mathematically possible for all cases, so we make
1534 * sure here anyway.
1535 */
1536 totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1488 if (totsize & UMA_ALIGN_PTR)
1489 totsize = (totsize & ~UMA_ALIGN_PTR) +
1490 (UMA_ALIGN_PTR + 1);
1491 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1492
1493 /*
1494 * The only way the following is possible is if with our
1495 * UMA_ALIGN_PTR adjustments we are now bigger than
1496 * UMA_SLAB_SIZE. I haven't checked whether this is
1497 * mathematically possible for all cases, so we make
1498 * sure here anyway.
1499 */
1500 totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1537 if (keg->uk_flags & UMA_ZONE_REFCNT)
1538 totsize += keg->uk_ipers * sizeof(uint32_t);
1539 if (totsize > PAGE_SIZE * keg->uk_ppera) {
1540 printf("zone %s ipers %d rsize %d size %d\n",
1541 zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1542 keg->uk_size);
1543 panic("UMA slab won't fit.");
1544 }
1545 }
1546
1547 if (keg->uk_flags & UMA_ZONE_HASH)
1548 hash_alloc(&keg->uk_hash);
1549
1550#ifdef UMA_DEBUG
1551 printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
1552 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1553 keg->uk_ipers, keg->uk_ppera,
1554 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1555#endif
1556
1557 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1558
1559 rw_wlock(&uma_rwlock);
1560 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1561 rw_wunlock(&uma_rwlock);
1562 return (0);
1563}
1564
1565/*
1566 * Zone header ctor. This initializes all fields, locks, etc.
1567 *
1568 * Arguments/Returns follow uma_ctor specifications
1569 * udata Actually uma_zctor_args
1570 */
1571static int
1572zone_ctor(void *mem, int size, void *udata, int flags)
1573{
1574 struct uma_zctor_args *arg = udata;
1575 uma_zone_t zone = mem;
1576 uma_zone_t z;
1577 uma_keg_t keg;
1578
1579 bzero(zone, size);
1580 zone->uz_name = arg->name;
1581 zone->uz_ctor = arg->ctor;
1582 zone->uz_dtor = arg->dtor;
1583 zone->uz_slab = zone_fetch_slab;
1584 zone->uz_init = NULL;
1585 zone->uz_fini = NULL;
1586 zone->uz_allocs = 0;
1587 zone->uz_frees = 0;
1588 zone->uz_fails = 0;
1589 zone->uz_sleeps = 0;
1590 zone->uz_count = 0;
1591 zone->uz_count_min = 0;
1592 zone->uz_flags = 0;
1593 zone->uz_warning = NULL;
1594 timevalclear(&zone->uz_ratecheck);
1595 keg = arg->keg;
1596
1597 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1598
1599 /*
1600 * This is a pure cache zone, no kegs.
1601 */
1602 if (arg->import) {
1603 if (arg->flags & UMA_ZONE_VM)
1604 arg->flags |= UMA_ZFLAG_CACHEONLY;
1605 zone->uz_flags = arg->flags;
1606 zone->uz_size = arg->size;
1607 zone->uz_import = arg->import;
1608 zone->uz_release = arg->release;
1609 zone->uz_arg = arg->arg;
1610 zone->uz_lockptr = &zone->uz_lock;
1611 rw_wlock(&uma_rwlock);
1612 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1613 rw_wunlock(&uma_rwlock);
1614 goto out;
1615 }
1616
1617 /*
1618 * Use the regular zone/keg/slab allocator.
1619 */
1620 zone->uz_import = (uma_import)zone_import;
1621 zone->uz_release = (uma_release)zone_release;
1622 zone->uz_arg = zone;
1623
1624 if (arg->flags & UMA_ZONE_SECONDARY) {
1625 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1626 zone->uz_init = arg->uminit;
1627 zone->uz_fini = arg->fini;
1628 zone->uz_lockptr = &keg->uk_lock;
1629 zone->uz_flags |= UMA_ZONE_SECONDARY;
1630 rw_wlock(&uma_rwlock);
1631 ZONE_LOCK(zone);
1632 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1633 if (LIST_NEXT(z, uz_link) == NULL) {
1634 LIST_INSERT_AFTER(z, zone, uz_link);
1635 break;
1636 }
1637 }
1638 ZONE_UNLOCK(zone);
1639 rw_wunlock(&uma_rwlock);
1640 } else if (keg == NULL) {
1641 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1642 arg->align, arg->flags)) == NULL)
1643 return (ENOMEM);
1644 } else {
1645 struct uma_kctor_args karg;
1646 int error;
1647
1648 /* We should only be here from uma_startup() */
1649 karg.size = arg->size;
1650 karg.uminit = arg->uminit;
1651 karg.fini = arg->fini;
1652 karg.align = arg->align;
1653 karg.flags = arg->flags;
1654 karg.zone = zone;
1655 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1656 flags);
1657 if (error)
1658 return (error);
1659 }
1660
1661 /*
1662 * Link in the first keg.
1663 */
1664 zone->uz_klink.kl_keg = keg;
1665 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1666 zone->uz_lockptr = &keg->uk_lock;
1667 zone->uz_size = keg->uk_size;
1668 zone->uz_flags |= (keg->uk_flags &
1669 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1670
1671 /*
1672 * Some internal zones don't have room allocated for the per cpu
1673 * caches. If we're internal, bail out here.
1674 */
1675 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1676 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1677 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1678 return (0);
1679 }
1680
1681out:
1682 if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1683 zone->uz_count = bucket_select(zone->uz_size);
1684 else
1685 zone->uz_count = BUCKET_MAX;
1686 zone->uz_count_min = zone->uz_count;
1687
1688 return (0);
1689}
1690
1691/*
1692 * Keg header dtor. This frees all data, destroys locks, frees the hash
1693 * table and removes the keg from the global list.
1694 *
1695 * Arguments/Returns follow uma_dtor specifications
1696 * udata unused
1697 */
1698static void
1699keg_dtor(void *arg, int size, void *udata)
1700{
1701 uma_keg_t keg;
1702
1703 keg = (uma_keg_t)arg;
1704 KEG_LOCK(keg);
1705 if (keg->uk_free != 0) {
1706 printf("Freed UMA keg (%s) was not empty (%d items). "
1707 " Lost %d pages of memory.\n",
1708 keg->uk_name ? keg->uk_name : "",
1709 keg->uk_free, keg->uk_pages);
1710 }
1711 KEG_UNLOCK(keg);
1712
1713 hash_free(&keg->uk_hash);
1714
1715 KEG_LOCK_FINI(keg);
1716}
1717
1718/*
1719 * Zone header dtor.
1720 *
1721 * Arguments/Returns follow uma_dtor specifications
1722 * udata unused
1723 */
1724static void
1725zone_dtor(void *arg, int size, void *udata)
1726{
1727 uma_klink_t klink;
1728 uma_zone_t zone;
1729 uma_keg_t keg;
1730
1731 zone = (uma_zone_t)arg;
1732 keg = zone_first_keg(zone);
1733
1734 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1735 cache_drain(zone);
1736
1737 rw_wlock(&uma_rwlock);
1738 LIST_REMOVE(zone, uz_link);
1739 rw_wunlock(&uma_rwlock);
1740 /*
1741 * XXX there are some races here where
1742 * the zone can be drained but zone lock
1743 * released and then refilled before we
1744 * remove it... we dont care for now
1745 */
1746 zone_drain_wait(zone, M_WAITOK);
1747 /*
1748 * Unlink all of our kegs.
1749 */
1750 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1751 klink->kl_keg = NULL;
1752 LIST_REMOVE(klink, kl_link);
1753 if (klink == &zone->uz_klink)
1754 continue;
1755 free(klink, M_TEMP);
1756 }
1757 /*
1758 * We only destroy kegs from non secondary zones.
1759 */
1760 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1761 rw_wlock(&uma_rwlock);
1762 LIST_REMOVE(keg, uk_link);
1763 rw_wunlock(&uma_rwlock);
1764 zone_free_item(kegs, keg, NULL, SKIP_NONE);
1765 }
1766 ZONE_LOCK_FINI(zone);
1767}
1768
1769/*
1770 * Traverses every zone in the system and calls a callback
1771 *
1772 * Arguments:
1773 * zfunc A pointer to a function which accepts a zone
1774 * as an argument.
1775 *
1776 * Returns:
1777 * Nothing
1778 */
1779static void
1780zone_foreach(void (*zfunc)(uma_zone_t))
1781{
1782 uma_keg_t keg;
1783 uma_zone_t zone;
1784
1785 rw_rlock(&uma_rwlock);
1786 LIST_FOREACH(keg, &uma_kegs, uk_link) {
1787 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1788 zfunc(zone);
1789 }
1790 rw_runlock(&uma_rwlock);
1791}
1792
1793/* Public functions */
1794/* See uma.h */
1795void
1796uma_startup(void *bootmem, int boot_pages)
1797{
1798 struct uma_zctor_args args;
1799 uma_slab_t slab;
1501 if (totsize > PAGE_SIZE * keg->uk_ppera) {
1502 printf("zone %s ipers %d rsize %d size %d\n",
1503 zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1504 keg->uk_size);
1505 panic("UMA slab won't fit.");
1506 }
1507 }
1508
1509 if (keg->uk_flags & UMA_ZONE_HASH)
1510 hash_alloc(&keg->uk_hash);
1511
1512#ifdef UMA_DEBUG
1513 printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
1514 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1515 keg->uk_ipers, keg->uk_ppera,
1516 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1517#endif
1518
1519 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1520
1521 rw_wlock(&uma_rwlock);
1522 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1523 rw_wunlock(&uma_rwlock);
1524 return (0);
1525}
1526
1527/*
1528 * Zone header ctor. This initializes all fields, locks, etc.
1529 *
1530 * Arguments/Returns follow uma_ctor specifications
1531 * udata Actually uma_zctor_args
1532 */
1533static int
1534zone_ctor(void *mem, int size, void *udata, int flags)
1535{
1536 struct uma_zctor_args *arg = udata;
1537 uma_zone_t zone = mem;
1538 uma_zone_t z;
1539 uma_keg_t keg;
1540
1541 bzero(zone, size);
1542 zone->uz_name = arg->name;
1543 zone->uz_ctor = arg->ctor;
1544 zone->uz_dtor = arg->dtor;
1545 zone->uz_slab = zone_fetch_slab;
1546 zone->uz_init = NULL;
1547 zone->uz_fini = NULL;
1548 zone->uz_allocs = 0;
1549 zone->uz_frees = 0;
1550 zone->uz_fails = 0;
1551 zone->uz_sleeps = 0;
1552 zone->uz_count = 0;
1553 zone->uz_count_min = 0;
1554 zone->uz_flags = 0;
1555 zone->uz_warning = NULL;
1556 timevalclear(&zone->uz_ratecheck);
1557 keg = arg->keg;
1558
1559 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1560
1561 /*
1562 * This is a pure cache zone, no kegs.
1563 */
1564 if (arg->import) {
1565 if (arg->flags & UMA_ZONE_VM)
1566 arg->flags |= UMA_ZFLAG_CACHEONLY;
1567 zone->uz_flags = arg->flags;
1568 zone->uz_size = arg->size;
1569 zone->uz_import = arg->import;
1570 zone->uz_release = arg->release;
1571 zone->uz_arg = arg->arg;
1572 zone->uz_lockptr = &zone->uz_lock;
1573 rw_wlock(&uma_rwlock);
1574 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1575 rw_wunlock(&uma_rwlock);
1576 goto out;
1577 }
1578
1579 /*
1580 * Use the regular zone/keg/slab allocator.
1581 */
1582 zone->uz_import = (uma_import)zone_import;
1583 zone->uz_release = (uma_release)zone_release;
1584 zone->uz_arg = zone;
1585
1586 if (arg->flags & UMA_ZONE_SECONDARY) {
1587 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1588 zone->uz_init = arg->uminit;
1589 zone->uz_fini = arg->fini;
1590 zone->uz_lockptr = &keg->uk_lock;
1591 zone->uz_flags |= UMA_ZONE_SECONDARY;
1592 rw_wlock(&uma_rwlock);
1593 ZONE_LOCK(zone);
1594 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1595 if (LIST_NEXT(z, uz_link) == NULL) {
1596 LIST_INSERT_AFTER(z, zone, uz_link);
1597 break;
1598 }
1599 }
1600 ZONE_UNLOCK(zone);
1601 rw_wunlock(&uma_rwlock);
1602 } else if (keg == NULL) {
1603 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1604 arg->align, arg->flags)) == NULL)
1605 return (ENOMEM);
1606 } else {
1607 struct uma_kctor_args karg;
1608 int error;
1609
1610 /* We should only be here from uma_startup() */
1611 karg.size = arg->size;
1612 karg.uminit = arg->uminit;
1613 karg.fini = arg->fini;
1614 karg.align = arg->align;
1615 karg.flags = arg->flags;
1616 karg.zone = zone;
1617 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1618 flags);
1619 if (error)
1620 return (error);
1621 }
1622
1623 /*
1624 * Link in the first keg.
1625 */
1626 zone->uz_klink.kl_keg = keg;
1627 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1628 zone->uz_lockptr = &keg->uk_lock;
1629 zone->uz_size = keg->uk_size;
1630 zone->uz_flags |= (keg->uk_flags &
1631 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1632
1633 /*
1634 * Some internal zones don't have room allocated for the per cpu
1635 * caches. If we're internal, bail out here.
1636 */
1637 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1638 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1639 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1640 return (0);
1641 }
1642
1643out:
1644 if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1645 zone->uz_count = bucket_select(zone->uz_size);
1646 else
1647 zone->uz_count = BUCKET_MAX;
1648 zone->uz_count_min = zone->uz_count;
1649
1650 return (0);
1651}
1652
1653/*
1654 * Keg header dtor. This frees all data, destroys locks, frees the hash
1655 * table and removes the keg from the global list.
1656 *
1657 * Arguments/Returns follow uma_dtor specifications
1658 * udata unused
1659 */
1660static void
1661keg_dtor(void *arg, int size, void *udata)
1662{
1663 uma_keg_t keg;
1664
1665 keg = (uma_keg_t)arg;
1666 KEG_LOCK(keg);
1667 if (keg->uk_free != 0) {
1668 printf("Freed UMA keg (%s) was not empty (%d items). "
1669 " Lost %d pages of memory.\n",
1670 keg->uk_name ? keg->uk_name : "",
1671 keg->uk_free, keg->uk_pages);
1672 }
1673 KEG_UNLOCK(keg);
1674
1675 hash_free(&keg->uk_hash);
1676
1677 KEG_LOCK_FINI(keg);
1678}
1679
1680/*
1681 * Zone header dtor.
1682 *
1683 * Arguments/Returns follow uma_dtor specifications
1684 * udata unused
1685 */
1686static void
1687zone_dtor(void *arg, int size, void *udata)
1688{
1689 uma_klink_t klink;
1690 uma_zone_t zone;
1691 uma_keg_t keg;
1692
1693 zone = (uma_zone_t)arg;
1694 keg = zone_first_keg(zone);
1695
1696 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1697 cache_drain(zone);
1698
1699 rw_wlock(&uma_rwlock);
1700 LIST_REMOVE(zone, uz_link);
1701 rw_wunlock(&uma_rwlock);
1702 /*
1703 * XXX there are some races here where
1704 * the zone can be drained but zone lock
1705 * released and then refilled before we
1706 * remove it... we dont care for now
1707 */
1708 zone_drain_wait(zone, M_WAITOK);
1709 /*
1710 * Unlink all of our kegs.
1711 */
1712 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1713 klink->kl_keg = NULL;
1714 LIST_REMOVE(klink, kl_link);
1715 if (klink == &zone->uz_klink)
1716 continue;
1717 free(klink, M_TEMP);
1718 }
1719 /*
1720 * We only destroy kegs from non secondary zones.
1721 */
1722 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1723 rw_wlock(&uma_rwlock);
1724 LIST_REMOVE(keg, uk_link);
1725 rw_wunlock(&uma_rwlock);
1726 zone_free_item(kegs, keg, NULL, SKIP_NONE);
1727 }
1728 ZONE_LOCK_FINI(zone);
1729}
1730
1731/*
1732 * Traverses every zone in the system and calls a callback
1733 *
1734 * Arguments:
1735 * zfunc A pointer to a function which accepts a zone
1736 * as an argument.
1737 *
1738 * Returns:
1739 * Nothing
1740 */
1741static void
1742zone_foreach(void (*zfunc)(uma_zone_t))
1743{
1744 uma_keg_t keg;
1745 uma_zone_t zone;
1746
1747 rw_rlock(&uma_rwlock);
1748 LIST_FOREACH(keg, &uma_kegs, uk_link) {
1749 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1750 zfunc(zone);
1751 }
1752 rw_runlock(&uma_rwlock);
1753}
1754
1755/* Public functions */
1756/* See uma.h */
1757void
1758uma_startup(void *bootmem, int boot_pages)
1759{
1760 struct uma_zctor_args args;
1761 uma_slab_t slab;
1800 u_int slabsize;
1801 int i;
1802
1803#ifdef UMA_DEBUG
1804 printf("Creating uma keg headers zone and keg.\n");
1805#endif
1806 rw_init(&uma_rwlock, "UMA lock");
1807
1808 /* "manually" create the initial zone */
1809 memset(&args, 0, sizeof(args));
1810 args.name = "UMA Kegs";
1811 args.size = sizeof(struct uma_keg);
1812 args.ctor = keg_ctor;
1813 args.dtor = keg_dtor;
1814 args.uminit = zero_init;
1815 args.fini = NULL;
1816 args.keg = &masterkeg;
1817 args.align = 32 - 1;
1818 args.flags = UMA_ZFLAG_INTERNAL;
1819 /* The initial zone has no Per cpu queues so it's smaller */
1820 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1821
1822#ifdef UMA_DEBUG
1823 printf("Filling boot free list.\n");
1824#endif
1825 for (i = 0; i < boot_pages; i++) {
1826 slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
1827 slab->us_data = (uint8_t *)slab;
1828 slab->us_flags = UMA_SLAB_BOOT;
1829 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1830 }
1831 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1832
1833#ifdef UMA_DEBUG
1834 printf("Creating uma zone headers zone and keg.\n");
1835#endif
1836 args.name = "UMA Zones";
1837 args.size = sizeof(struct uma_zone) +
1838 (sizeof(struct uma_cache) * (mp_maxid + 1));
1839 args.ctor = zone_ctor;
1840 args.dtor = zone_dtor;
1841 args.uminit = zero_init;
1842 args.fini = NULL;
1843 args.keg = NULL;
1844 args.align = 32 - 1;
1845 args.flags = UMA_ZFLAG_INTERNAL;
1846 /* The initial zone has no Per cpu queues so it's smaller */
1847 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1848
1849#ifdef UMA_DEBUG
1850 printf("Creating slab and hash zones.\n");
1851#endif
1852
1853 /* Now make a zone for slab headers */
1854 slabzone = uma_zcreate("UMA Slabs",
1855 sizeof(struct uma_slab),
1856 NULL, NULL, NULL, NULL,
1857 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1858
1762 int i;
1763
1764#ifdef UMA_DEBUG
1765 printf("Creating uma keg headers zone and keg.\n");
1766#endif
1767 rw_init(&uma_rwlock, "UMA lock");
1768
1769 /* "manually" create the initial zone */
1770 memset(&args, 0, sizeof(args));
1771 args.name = "UMA Kegs";
1772 args.size = sizeof(struct uma_keg);
1773 args.ctor = keg_ctor;
1774 args.dtor = keg_dtor;
1775 args.uminit = zero_init;
1776 args.fini = NULL;
1777 args.keg = &masterkeg;
1778 args.align = 32 - 1;
1779 args.flags = UMA_ZFLAG_INTERNAL;
1780 /* The initial zone has no Per cpu queues so it's smaller */
1781 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1782
1783#ifdef UMA_DEBUG
1784 printf("Filling boot free list.\n");
1785#endif
1786 for (i = 0; i < boot_pages; i++) {
1787 slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
1788 slab->us_data = (uint8_t *)slab;
1789 slab->us_flags = UMA_SLAB_BOOT;
1790 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1791 }
1792 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1793
1794#ifdef UMA_DEBUG
1795 printf("Creating uma zone headers zone and keg.\n");
1796#endif
1797 args.name = "UMA Zones";
1798 args.size = sizeof(struct uma_zone) +
1799 (sizeof(struct uma_cache) * (mp_maxid + 1));
1800 args.ctor = zone_ctor;
1801 args.dtor = zone_dtor;
1802 args.uminit = zero_init;
1803 args.fini = NULL;
1804 args.keg = NULL;
1805 args.align = 32 - 1;
1806 args.flags = UMA_ZFLAG_INTERNAL;
1807 /* The initial zone has no Per cpu queues so it's smaller */
1808 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1809
1810#ifdef UMA_DEBUG
1811 printf("Creating slab and hash zones.\n");
1812#endif
1813
1814 /* Now make a zone for slab headers */
1815 slabzone = uma_zcreate("UMA Slabs",
1816 sizeof(struct uma_slab),
1817 NULL, NULL, NULL, NULL,
1818 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1819
1859 /*
1860 * We also create a zone for the bigger slabs with reference
1861 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1862 */
1863 slabsize = sizeof(struct uma_slab_refcnt);
1864 slabsize += uma_max_ipers_ref * sizeof(uint32_t);
1865 slabrefzone = uma_zcreate("UMA RCntSlabs",
1866 slabsize,
1867 NULL, NULL, NULL, NULL,
1868 UMA_ALIGN_PTR,
1869 UMA_ZFLAG_INTERNAL);
1870
1871 hashzone = uma_zcreate("UMA Hash",
1872 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1873 NULL, NULL, NULL, NULL,
1874 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1875
1876 bucket_init();
1877
1878 booted = UMA_STARTUP;
1879
1880#ifdef UMA_DEBUG
1881 printf("UMA startup complete.\n");
1882#endif
1883}
1884
1885/* see uma.h */
1886void
1887uma_startup2(void)
1888{
1889 booted = UMA_STARTUP2;
1890 bucket_enable();
1891 sx_init(&uma_drain_lock, "umadrain");
1892#ifdef UMA_DEBUG
1893 printf("UMA startup2 complete.\n");
1894#endif
1895}
1896
1897/*
1898 * Initialize our callout handle
1899 *
1900 */
1901
1902static void
1903uma_startup3(void)
1904{
1905#ifdef UMA_DEBUG
1906 printf("Starting callout.\n");
1907#endif
1908 callout_init(&uma_callout, 1);
1909 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1910#ifdef UMA_DEBUG
1911 printf("UMA startup3 complete.\n");
1912#endif
1913}
1914
1915static uma_keg_t
1916uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1917 int align, uint32_t flags)
1918{
1919 struct uma_kctor_args args;
1920
1921 args.size = size;
1922 args.uminit = uminit;
1923 args.fini = fini;
1924 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1925 args.flags = flags;
1926 args.zone = zone;
1927 return (zone_alloc_item(kegs, &args, M_WAITOK));
1928}
1929
1930/* See uma.h */
1931void
1932uma_set_align(int align)
1933{
1934
1935 if (align != UMA_ALIGN_CACHE)
1936 uma_align_cache = align;
1937}
1938
1939/* See uma.h */
1940uma_zone_t
1941uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1942 uma_init uminit, uma_fini fini, int align, uint32_t flags)
1943
1944{
1945 struct uma_zctor_args args;
1946 uma_zone_t res;
1947 bool locked;
1948
1949 /* This stuff is essential for the zone ctor */
1950 memset(&args, 0, sizeof(args));
1951 args.name = name;
1952 args.size = size;
1953 args.ctor = ctor;
1954 args.dtor = dtor;
1955 args.uminit = uminit;
1956 args.fini = fini;
1957#ifdef INVARIANTS
1958 /*
1959 * If a zone is being created with an empty constructor and
1960 * destructor, pass UMA constructor/destructor which checks for
1961 * memory use after free.
1962 */
1963 if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
1964 ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
1965 args.ctor = trash_ctor;
1966 args.dtor = trash_dtor;
1967 args.uminit = trash_init;
1968 args.fini = trash_fini;
1969 }
1970#endif
1971 args.align = align;
1972 args.flags = flags;
1973 args.keg = NULL;
1974
1975 if (booted < UMA_STARTUP2) {
1976 locked = false;
1977 } else {
1978 sx_slock(&uma_drain_lock);
1979 locked = true;
1980 }
1981 res = zone_alloc_item(zones, &args, M_WAITOK);
1982 if (locked)
1983 sx_sunlock(&uma_drain_lock);
1984 return (res);
1985}
1986
1987/* See uma.h */
1988uma_zone_t
1989uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1990 uma_init zinit, uma_fini zfini, uma_zone_t master)
1991{
1992 struct uma_zctor_args args;
1993 uma_keg_t keg;
1994 uma_zone_t res;
1995 bool locked;
1996
1997 keg = zone_first_keg(master);
1998 memset(&args, 0, sizeof(args));
1999 args.name = name;
2000 args.size = keg->uk_size;
2001 args.ctor = ctor;
2002 args.dtor = dtor;
2003 args.uminit = zinit;
2004 args.fini = zfini;
2005 args.align = keg->uk_align;
2006 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2007 args.keg = keg;
2008
2009 if (booted < UMA_STARTUP2) {
2010 locked = false;
2011 } else {
2012 sx_slock(&uma_drain_lock);
2013 locked = true;
2014 }
2015 /* XXX Attaches only one keg of potentially many. */
2016 res = zone_alloc_item(zones, &args, M_WAITOK);
2017 if (locked)
2018 sx_sunlock(&uma_drain_lock);
2019 return (res);
2020}
2021
2022/* See uma.h */
2023uma_zone_t
2024uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2025 uma_init zinit, uma_fini zfini, uma_import zimport,
2026 uma_release zrelease, void *arg, int flags)
2027{
2028 struct uma_zctor_args args;
2029
2030 memset(&args, 0, sizeof(args));
2031 args.name = name;
2032 args.size = size;
2033 args.ctor = ctor;
2034 args.dtor = dtor;
2035 args.uminit = zinit;
2036 args.fini = zfini;
2037 args.import = zimport;
2038 args.release = zrelease;
2039 args.arg = arg;
2040 args.align = 0;
2041 args.flags = flags;
2042
2043 return (zone_alloc_item(zones, &args, M_WAITOK));
2044}
2045
2046static void
2047zone_lock_pair(uma_zone_t a, uma_zone_t b)
2048{
2049 if (a < b) {
2050 ZONE_LOCK(a);
2051 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
2052 } else {
2053 ZONE_LOCK(b);
2054 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
2055 }
2056}
2057
2058static void
2059zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2060{
2061
2062 ZONE_UNLOCK(a);
2063 ZONE_UNLOCK(b);
2064}
2065
2066int
2067uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2068{
2069 uma_klink_t klink;
2070 uma_klink_t kl;
2071 int error;
2072
2073 error = 0;
2074 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2075
2076 zone_lock_pair(zone, master);
2077 /*
2078 * zone must use vtoslab() to resolve objects and must already be
2079 * a secondary.
2080 */
2081 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2082 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2083 error = EINVAL;
2084 goto out;
2085 }
2086 /*
2087 * The new master must also use vtoslab().
2088 */
2089 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2090 error = EINVAL;
2091 goto out;
2092 }
1820 hashzone = uma_zcreate("UMA Hash",
1821 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1822 NULL, NULL, NULL, NULL,
1823 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1824
1825 bucket_init();
1826
1827 booted = UMA_STARTUP;
1828
1829#ifdef UMA_DEBUG
1830 printf("UMA startup complete.\n");
1831#endif
1832}
1833
1834/* see uma.h */
1835void
1836uma_startup2(void)
1837{
1838 booted = UMA_STARTUP2;
1839 bucket_enable();
1840 sx_init(&uma_drain_lock, "umadrain");
1841#ifdef UMA_DEBUG
1842 printf("UMA startup2 complete.\n");
1843#endif
1844}
1845
1846/*
1847 * Initialize our callout handle
1848 *
1849 */
1850
1851static void
1852uma_startup3(void)
1853{
1854#ifdef UMA_DEBUG
1855 printf("Starting callout.\n");
1856#endif
1857 callout_init(&uma_callout, 1);
1858 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1859#ifdef UMA_DEBUG
1860 printf("UMA startup3 complete.\n");
1861#endif
1862}
1863
1864static uma_keg_t
1865uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1866 int align, uint32_t flags)
1867{
1868 struct uma_kctor_args args;
1869
1870 args.size = size;
1871 args.uminit = uminit;
1872 args.fini = fini;
1873 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1874 args.flags = flags;
1875 args.zone = zone;
1876 return (zone_alloc_item(kegs, &args, M_WAITOK));
1877}
1878
1879/* See uma.h */
1880void
1881uma_set_align(int align)
1882{
1883
1884 if (align != UMA_ALIGN_CACHE)
1885 uma_align_cache = align;
1886}
1887
1888/* See uma.h */
1889uma_zone_t
1890uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1891 uma_init uminit, uma_fini fini, int align, uint32_t flags)
1892
1893{
1894 struct uma_zctor_args args;
1895 uma_zone_t res;
1896 bool locked;
1897
1898 /* This stuff is essential for the zone ctor */
1899 memset(&args, 0, sizeof(args));
1900 args.name = name;
1901 args.size = size;
1902 args.ctor = ctor;
1903 args.dtor = dtor;
1904 args.uminit = uminit;
1905 args.fini = fini;
1906#ifdef INVARIANTS
1907 /*
1908 * If a zone is being created with an empty constructor and
1909 * destructor, pass UMA constructor/destructor which checks for
1910 * memory use after free.
1911 */
1912 if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
1913 ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
1914 args.ctor = trash_ctor;
1915 args.dtor = trash_dtor;
1916 args.uminit = trash_init;
1917 args.fini = trash_fini;
1918 }
1919#endif
1920 args.align = align;
1921 args.flags = flags;
1922 args.keg = NULL;
1923
1924 if (booted < UMA_STARTUP2) {
1925 locked = false;
1926 } else {
1927 sx_slock(&uma_drain_lock);
1928 locked = true;
1929 }
1930 res = zone_alloc_item(zones, &args, M_WAITOK);
1931 if (locked)
1932 sx_sunlock(&uma_drain_lock);
1933 return (res);
1934}
1935
1936/* See uma.h */
1937uma_zone_t
1938uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1939 uma_init zinit, uma_fini zfini, uma_zone_t master)
1940{
1941 struct uma_zctor_args args;
1942 uma_keg_t keg;
1943 uma_zone_t res;
1944 bool locked;
1945
1946 keg = zone_first_keg(master);
1947 memset(&args, 0, sizeof(args));
1948 args.name = name;
1949 args.size = keg->uk_size;
1950 args.ctor = ctor;
1951 args.dtor = dtor;
1952 args.uminit = zinit;
1953 args.fini = zfini;
1954 args.align = keg->uk_align;
1955 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1956 args.keg = keg;
1957
1958 if (booted < UMA_STARTUP2) {
1959 locked = false;
1960 } else {
1961 sx_slock(&uma_drain_lock);
1962 locked = true;
1963 }
1964 /* XXX Attaches only one keg of potentially many. */
1965 res = zone_alloc_item(zones, &args, M_WAITOK);
1966 if (locked)
1967 sx_sunlock(&uma_drain_lock);
1968 return (res);
1969}
1970
1971/* See uma.h */
1972uma_zone_t
1973uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1974 uma_init zinit, uma_fini zfini, uma_import zimport,
1975 uma_release zrelease, void *arg, int flags)
1976{
1977 struct uma_zctor_args args;
1978
1979 memset(&args, 0, sizeof(args));
1980 args.name = name;
1981 args.size = size;
1982 args.ctor = ctor;
1983 args.dtor = dtor;
1984 args.uminit = zinit;
1985 args.fini = zfini;
1986 args.import = zimport;
1987 args.release = zrelease;
1988 args.arg = arg;
1989 args.align = 0;
1990 args.flags = flags;
1991
1992 return (zone_alloc_item(zones, &args, M_WAITOK));
1993}
1994
1995static void
1996zone_lock_pair(uma_zone_t a, uma_zone_t b)
1997{
1998 if (a < b) {
1999 ZONE_LOCK(a);
2000 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
2001 } else {
2002 ZONE_LOCK(b);
2003 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
2004 }
2005}
2006
2007static void
2008zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2009{
2010
2011 ZONE_UNLOCK(a);
2012 ZONE_UNLOCK(b);
2013}
2014
2015int
2016uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2017{
2018 uma_klink_t klink;
2019 uma_klink_t kl;
2020 int error;
2021
2022 error = 0;
2023 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2024
2025 zone_lock_pair(zone, master);
2026 /*
2027 * zone must use vtoslab() to resolve objects and must already be
2028 * a secondary.
2029 */
2030 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2031 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2032 error = EINVAL;
2033 goto out;
2034 }
2035 /*
2036 * The new master must also use vtoslab().
2037 */
2038 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2039 error = EINVAL;
2040 goto out;
2041 }
2042
2093 /*
2043 /*
2094 * Both must either be refcnt, or not be refcnt.
2095 */
2096 if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
2097 (master->uz_flags & UMA_ZONE_REFCNT)) {
2098 error = EINVAL;
2099 goto out;
2100 }
2101 /*
2102 * The underlying object must be the same size. rsize
2103 * may be different.
2104 */
2105 if (master->uz_size != zone->uz_size) {
2106 error = E2BIG;
2107 goto out;
2108 }
2109 /*
2110 * Put it at the end of the list.
2111 */
2112 klink->kl_keg = zone_first_keg(master);
2113 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2114 if (LIST_NEXT(kl, kl_link) == NULL) {
2115 LIST_INSERT_AFTER(kl, klink, kl_link);
2116 break;
2117 }
2118 }
2119 klink = NULL;
2120 zone->uz_flags |= UMA_ZFLAG_MULTI;
2121 zone->uz_slab = zone_fetch_slab_multi;
2122
2123out:
2124 zone_unlock_pair(zone, master);
2125 if (klink != NULL)
2126 free(klink, M_TEMP);
2127
2128 return (error);
2129}
2130
2131
2132/* See uma.h */
2133void
2134uma_zdestroy(uma_zone_t zone)
2135{
2136
2137 sx_slock(&uma_drain_lock);
2138 zone_free_item(zones, zone, NULL, SKIP_NONE);
2139 sx_sunlock(&uma_drain_lock);
2140}
2141
2142/* See uma.h */
2143void *
2144uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2145{
2146 void *item;
2147 uma_cache_t cache;
2148 uma_bucket_t bucket;
2149 int lockfail;
2150 int cpu;
2151
2152 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2153 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2154
2155 /* This is the fast path allocation */
2156#ifdef UMA_DEBUG_ALLOC_1
2157 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
2158#endif
2159 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
2160 zone->uz_name, flags);
2161
2162 if (flags & M_WAITOK) {
2163 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2164 "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2165 }
2166 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2167 ("uma_zalloc_arg: called with spinlock or critical section held"));
2168
2169#ifdef DEBUG_MEMGUARD
2170 if (memguard_cmp_zone(zone)) {
2171 item = memguard_alloc(zone->uz_size, flags);
2172 if (item != NULL) {
2173 /*
2174 * Avoid conflict with the use-after-free
2175 * protecting infrastructure from INVARIANTS.
2176 */
2177 if (zone->uz_init != NULL &&
2178 zone->uz_init != mtrash_init &&
2179 zone->uz_init(item, zone->uz_size, flags) != 0)
2180 return (NULL);
2181 if (zone->uz_ctor != NULL &&
2182 zone->uz_ctor != mtrash_ctor &&
2183 zone->uz_ctor(item, zone->uz_size, udata,
2184 flags) != 0) {
2185 zone->uz_fini(item, zone->uz_size);
2186 return (NULL);
2187 }
2188 return (item);
2189 }
2190 /* This is unfortunate but should not be fatal. */
2191 }
2192#endif
2193 /*
2194 * If possible, allocate from the per-CPU cache. There are two
2195 * requirements for safe access to the per-CPU cache: (1) the thread
2196 * accessing the cache must not be preempted or yield during access,
2197 * and (2) the thread must not migrate CPUs without switching which
2198 * cache it accesses. We rely on a critical section to prevent
2199 * preemption and migration. We release the critical section in
2200 * order to acquire the zone mutex if we are unable to allocate from
2201 * the current cache; when we re-acquire the critical section, we
2202 * must detect and handle migration if it has occurred.
2203 */
2204 critical_enter();
2205 cpu = curcpu;
2206 cache = &zone->uz_cpu[cpu];
2207
2208zalloc_start:
2209 bucket = cache->uc_allocbucket;
2210 if (bucket != NULL && bucket->ub_cnt > 0) {
2211 bucket->ub_cnt--;
2212 item = bucket->ub_bucket[bucket->ub_cnt];
2213#ifdef INVARIANTS
2214 bucket->ub_bucket[bucket->ub_cnt] = NULL;
2215#endif
2216 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2217 cache->uc_allocs++;
2218 critical_exit();
2219 if (zone->uz_ctor != NULL &&
2220 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2221 atomic_add_long(&zone->uz_fails, 1);
2222 zone_free_item(zone, item, udata, SKIP_DTOR);
2223 return (NULL);
2224 }
2225#ifdef INVARIANTS
2226 uma_dbg_alloc(zone, NULL, item);
2227#endif
2228 if (flags & M_ZERO)
2229 uma_zero_item(item, zone);
2230 return (item);
2231 }
2232
2233 /*
2234 * We have run out of items in our alloc bucket.
2235 * See if we can switch with our free bucket.
2236 */
2237 bucket = cache->uc_freebucket;
2238 if (bucket != NULL && bucket->ub_cnt > 0) {
2239#ifdef UMA_DEBUG_ALLOC
2240 printf("uma_zalloc: Swapping empty with alloc.\n");
2241#endif
2242 cache->uc_freebucket = cache->uc_allocbucket;
2243 cache->uc_allocbucket = bucket;
2244 goto zalloc_start;
2245 }
2246
2247 /*
2248 * Discard any empty allocation bucket while we hold no locks.
2249 */
2250 bucket = cache->uc_allocbucket;
2251 cache->uc_allocbucket = NULL;
2252 critical_exit();
2253 if (bucket != NULL)
2254 bucket_free(zone, bucket, udata);
2255
2256 /* Short-circuit for zones without buckets and low memory. */
2257 if (zone->uz_count == 0 || bucketdisable)
2258 goto zalloc_item;
2259
2260 /*
2261 * Attempt to retrieve the item from the per-CPU cache has failed, so
2262 * we must go back to the zone. This requires the zone lock, so we
2263 * must drop the critical section, then re-acquire it when we go back
2264 * to the cache. Since the critical section is released, we may be
2265 * preempted or migrate. As such, make sure not to maintain any
2266 * thread-local state specific to the cache from prior to releasing
2267 * the critical section.
2268 */
2269 lockfail = 0;
2270 if (ZONE_TRYLOCK(zone) == 0) {
2271 /* Record contention to size the buckets. */
2272 ZONE_LOCK(zone);
2273 lockfail = 1;
2274 }
2275 critical_enter();
2276 cpu = curcpu;
2277 cache = &zone->uz_cpu[cpu];
2278
2279 /*
2280 * Since we have locked the zone we may as well send back our stats.
2281 */
2282 atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2283 atomic_add_long(&zone->uz_frees, cache->uc_frees);
2284 cache->uc_allocs = 0;
2285 cache->uc_frees = 0;
2286
2287 /* See if we lost the race to fill the cache. */
2288 if (cache->uc_allocbucket != NULL) {
2289 ZONE_UNLOCK(zone);
2290 goto zalloc_start;
2291 }
2292
2293 /*
2294 * Check the zone's cache of buckets.
2295 */
2296 if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2297 KASSERT(bucket->ub_cnt != 0,
2298 ("uma_zalloc_arg: Returning an empty bucket."));
2299
2300 LIST_REMOVE(bucket, ub_link);
2301 cache->uc_allocbucket = bucket;
2302 ZONE_UNLOCK(zone);
2303 goto zalloc_start;
2304 }
2305 /* We are no longer associated with this CPU. */
2306 critical_exit();
2307
2308 /*
2309 * We bump the uz count when the cache size is insufficient to
2310 * handle the working set.
2311 */
2312 if (lockfail && zone->uz_count < BUCKET_MAX)
2313 zone->uz_count++;
2314 ZONE_UNLOCK(zone);
2315
2316 /*
2317 * Now lets just fill a bucket and put it on the free list. If that
2318 * works we'll restart the allocation from the begining and it
2319 * will use the just filled bucket.
2320 */
2321 bucket = zone_alloc_bucket(zone, udata, flags);
2322 if (bucket != NULL) {
2323 ZONE_LOCK(zone);
2324 critical_enter();
2325 cpu = curcpu;
2326 cache = &zone->uz_cpu[cpu];
2327 /*
2328 * See if we lost the race or were migrated. Cache the
2329 * initialized bucket to make this less likely or claim
2330 * the memory directly.
2331 */
2332 if (cache->uc_allocbucket == NULL)
2333 cache->uc_allocbucket = bucket;
2334 else
2335 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2336 ZONE_UNLOCK(zone);
2337 goto zalloc_start;
2338 }
2339
2340 /*
2341 * We may not be able to get a bucket so return an actual item.
2342 */
2343#ifdef UMA_DEBUG
2344 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2345#endif
2346
2347zalloc_item:
2348 item = zone_alloc_item(zone, udata, flags);
2349
2350 return (item);
2351}
2352
2353static uma_slab_t
2354keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2355{
2356 uma_slab_t slab;
2357 int reserve;
2358
2359 mtx_assert(&keg->uk_lock, MA_OWNED);
2360 slab = NULL;
2361 reserve = 0;
2362 if ((flags & M_USE_RESERVE) == 0)
2363 reserve = keg->uk_reserve;
2364
2365 for (;;) {
2366 /*
2367 * Find a slab with some space. Prefer slabs that are partially
2368 * used over those that are totally full. This helps to reduce
2369 * fragmentation.
2370 */
2371 if (keg->uk_free > reserve) {
2372 if (!LIST_EMPTY(&keg->uk_part_slab)) {
2373 slab = LIST_FIRST(&keg->uk_part_slab);
2374 } else {
2375 slab = LIST_FIRST(&keg->uk_free_slab);
2376 LIST_REMOVE(slab, us_link);
2377 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2378 us_link);
2379 }
2380 MPASS(slab->us_keg == keg);
2381 return (slab);
2382 }
2383
2384 /*
2385 * M_NOVM means don't ask at all!
2386 */
2387 if (flags & M_NOVM)
2388 break;
2389
2390 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2391 keg->uk_flags |= UMA_ZFLAG_FULL;
2392 /*
2393 * If this is not a multi-zone, set the FULL bit.
2394 * Otherwise slab_multi() takes care of it.
2395 */
2396 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2397 zone->uz_flags |= UMA_ZFLAG_FULL;
2398 zone_log_warning(zone);
2399 zone_maxaction(zone);
2400 }
2401 if (flags & M_NOWAIT)
2402 break;
2403 zone->uz_sleeps++;
2404 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2405 continue;
2406 }
2407 slab = keg_alloc_slab(keg, zone, flags);
2408 /*
2409 * If we got a slab here it's safe to mark it partially used
2410 * and return. We assume that the caller is going to remove
2411 * at least one item.
2412 */
2413 if (slab) {
2414 MPASS(slab->us_keg == keg);
2415 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2416 return (slab);
2417 }
2418 /*
2419 * We might not have been able to get a slab but another cpu
2420 * could have while we were unlocked. Check again before we
2421 * fail.
2422 */
2423 flags |= M_NOVM;
2424 }
2425 return (slab);
2426}
2427
2428static uma_slab_t
2429zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2430{
2431 uma_slab_t slab;
2432
2433 if (keg == NULL) {
2434 keg = zone_first_keg(zone);
2435 KEG_LOCK(keg);
2436 }
2437
2438 for (;;) {
2439 slab = keg_fetch_slab(keg, zone, flags);
2440 if (slab)
2441 return (slab);
2442 if (flags & (M_NOWAIT | M_NOVM))
2443 break;
2444 }
2445 KEG_UNLOCK(keg);
2446 return (NULL);
2447}
2448
2449/*
2450 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2451 * with the keg locked. On NULL no lock is held.
2452 *
2453 * The last pointer is used to seed the search. It is not required.
2454 */
2455static uma_slab_t
2456zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2457{
2458 uma_klink_t klink;
2459 uma_slab_t slab;
2460 uma_keg_t keg;
2461 int flags;
2462 int empty;
2463 int full;
2464
2465 /*
2466 * Don't wait on the first pass. This will skip limit tests
2467 * as well. We don't want to block if we can find a provider
2468 * without blocking.
2469 */
2470 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2471 /*
2472 * Use the last slab allocated as a hint for where to start
2473 * the search.
2474 */
2475 if (last != NULL) {
2476 slab = keg_fetch_slab(last, zone, flags);
2477 if (slab)
2478 return (slab);
2479 KEG_UNLOCK(last);
2480 }
2481 /*
2482 * Loop until we have a slab incase of transient failures
2483 * while M_WAITOK is specified. I'm not sure this is 100%
2484 * required but we've done it for so long now.
2485 */
2486 for (;;) {
2487 empty = 0;
2488 full = 0;
2489 /*
2490 * Search the available kegs for slabs. Be careful to hold the
2491 * correct lock while calling into the keg layer.
2492 */
2493 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2494 keg = klink->kl_keg;
2495 KEG_LOCK(keg);
2496 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2497 slab = keg_fetch_slab(keg, zone, flags);
2498 if (slab)
2499 return (slab);
2500 }
2501 if (keg->uk_flags & UMA_ZFLAG_FULL)
2502 full++;
2503 else
2504 empty++;
2505 KEG_UNLOCK(keg);
2506 }
2507 if (rflags & (M_NOWAIT | M_NOVM))
2508 break;
2509 flags = rflags;
2510 /*
2511 * All kegs are full. XXX We can't atomically check all kegs
2512 * and sleep so just sleep for a short period and retry.
2513 */
2514 if (full && !empty) {
2515 ZONE_LOCK(zone);
2516 zone->uz_flags |= UMA_ZFLAG_FULL;
2517 zone->uz_sleeps++;
2518 zone_log_warning(zone);
2519 zone_maxaction(zone);
2520 msleep(zone, zone->uz_lockptr, PVM,
2521 "zonelimit", hz/100);
2522 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2523 ZONE_UNLOCK(zone);
2524 continue;
2525 }
2526 }
2527 return (NULL);
2528}
2529
2530static void *
2531slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2532{
2533 void *item;
2534 uint8_t freei;
2535
2536 MPASS(keg == slab->us_keg);
2537 mtx_assert(&keg->uk_lock, MA_OWNED);
2538
2539 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2540 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2541 item = slab->us_data + (keg->uk_rsize * freei);
2542 slab->us_freecount--;
2543 keg->uk_free--;
2544
2545 /* Move this slab to the full list */
2546 if (slab->us_freecount == 0) {
2547 LIST_REMOVE(slab, us_link);
2548 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2549 }
2550
2551 return (item);
2552}
2553
2554static int
2555zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2556{
2557 uma_slab_t slab;
2558 uma_keg_t keg;
2559 int i;
2560
2561 slab = NULL;
2562 keg = NULL;
2563 /* Try to keep the buckets totally full */
2564 for (i = 0; i < max; ) {
2565 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2566 break;
2567 keg = slab->us_keg;
2568 while (slab->us_freecount && i < max) {
2569 bucket[i++] = slab_alloc_item(keg, slab);
2570 if (keg->uk_free <= keg->uk_reserve)
2571 break;
2572 }
2573 /* Don't grab more than one slab at a time. */
2574 flags &= ~M_WAITOK;
2575 flags |= M_NOWAIT;
2576 }
2577 if (slab != NULL)
2578 KEG_UNLOCK(keg);
2579
2580 return i;
2581}
2582
2583static uma_bucket_t
2584zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2585{
2586 uma_bucket_t bucket;
2587 int max;
2588
2589 /* Don't wait for buckets, preserve caller's NOVM setting. */
2590 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2591 if (bucket == NULL)
2592 return (NULL);
2593
2594 max = MIN(bucket->ub_entries, zone->uz_count);
2595 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2596 max, flags);
2597
2598 /*
2599 * Initialize the memory if necessary.
2600 */
2601 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2602 int i;
2603
2604 for (i = 0; i < bucket->ub_cnt; i++)
2605 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2606 flags) != 0)
2607 break;
2608 /*
2609 * If we couldn't initialize the whole bucket, put the
2610 * rest back onto the freelist.
2611 */
2612 if (i != bucket->ub_cnt) {
2613 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2614 bucket->ub_cnt - i);
2615#ifdef INVARIANTS
2616 bzero(&bucket->ub_bucket[i],
2617 sizeof(void *) * (bucket->ub_cnt - i));
2618#endif
2619 bucket->ub_cnt = i;
2620 }
2621 }
2622
2623 if (bucket->ub_cnt == 0) {
2624 bucket_free(zone, bucket, udata);
2625 atomic_add_long(&zone->uz_fails, 1);
2626 return (NULL);
2627 }
2628
2629 return (bucket);
2630}
2631
2632/*
2633 * Allocates a single item from a zone.
2634 *
2635 * Arguments
2636 * zone The zone to alloc for.
2637 * udata The data to be passed to the constructor.
2638 * flags M_WAITOK, M_NOWAIT, M_ZERO.
2639 *
2640 * Returns
2641 * NULL if there is no memory and M_NOWAIT is set
2642 * An item if successful
2643 */
2644
2645static void *
2646zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2647{
2648 void *item;
2649
2650 item = NULL;
2651
2652#ifdef UMA_DEBUG_ALLOC
2653 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2654#endif
2655 if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2656 goto fail;
2657 atomic_add_long(&zone->uz_allocs, 1);
2658
2659 /*
2660 * We have to call both the zone's init (not the keg's init)
2661 * and the zone's ctor. This is because the item is going from
2662 * a keg slab directly to the user, and the user is expecting it
2663 * to be both zone-init'd as well as zone-ctor'd.
2664 */
2665 if (zone->uz_init != NULL) {
2666 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2667 zone_free_item(zone, item, udata, SKIP_FINI);
2668 goto fail;
2669 }
2670 }
2671 if (zone->uz_ctor != NULL) {
2672 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2673 zone_free_item(zone, item, udata, SKIP_DTOR);
2674 goto fail;
2675 }
2676 }
2677#ifdef INVARIANTS
2678 uma_dbg_alloc(zone, NULL, item);
2679#endif
2680 if (flags & M_ZERO)
2681 uma_zero_item(item, zone);
2682
2683 return (item);
2684
2685fail:
2686 atomic_add_long(&zone->uz_fails, 1);
2687 return (NULL);
2688}
2689
2690/* See uma.h */
2691void
2692uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2693{
2694 uma_cache_t cache;
2695 uma_bucket_t bucket;
2696 int lockfail;
2697 int cpu;
2698
2699 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2700 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2701
2702#ifdef UMA_DEBUG_ALLOC_1
2703 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2704#endif
2705 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2706 zone->uz_name);
2707
2708 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2709 ("uma_zfree_arg: called with spinlock or critical section held"));
2710
2711 /* uma_zfree(..., NULL) does nothing, to match free(9). */
2712 if (item == NULL)
2713 return;
2714#ifdef DEBUG_MEMGUARD
2715 if (is_memguard_addr(item)) {
2716 if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
2717 zone->uz_dtor(item, zone->uz_size, udata);
2718 if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
2719 zone->uz_fini(item, zone->uz_size);
2720 memguard_free(item);
2721 return;
2722 }
2723#endif
2724#ifdef INVARIANTS
2725 if (zone->uz_flags & UMA_ZONE_MALLOC)
2726 uma_dbg_free(zone, udata, item);
2727 else
2728 uma_dbg_free(zone, NULL, item);
2729#endif
2730 if (zone->uz_dtor != NULL)
2731 zone->uz_dtor(item, zone->uz_size, udata);
2732
2733 /*
2734 * The race here is acceptable. If we miss it we'll just have to wait
2735 * a little longer for the limits to be reset.
2736 */
2737 if (zone->uz_flags & UMA_ZFLAG_FULL)
2738 goto zfree_item;
2739
2740 /*
2741 * If possible, free to the per-CPU cache. There are two
2742 * requirements for safe access to the per-CPU cache: (1) the thread
2743 * accessing the cache must not be preempted or yield during access,
2744 * and (2) the thread must not migrate CPUs without switching which
2745 * cache it accesses. We rely on a critical section to prevent
2746 * preemption and migration. We release the critical section in
2747 * order to acquire the zone mutex if we are unable to free to the
2748 * current cache; when we re-acquire the critical section, we must
2749 * detect and handle migration if it has occurred.
2750 */
2751zfree_restart:
2752 critical_enter();
2753 cpu = curcpu;
2754 cache = &zone->uz_cpu[cpu];
2755
2756zfree_start:
2757 /*
2758 * Try to free into the allocbucket first to give LIFO ordering
2759 * for cache-hot datastructures. Spill over into the freebucket
2760 * if necessary. Alloc will swap them if one runs dry.
2761 */
2762 bucket = cache->uc_allocbucket;
2763 if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2764 bucket = cache->uc_freebucket;
2765 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2766 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2767 ("uma_zfree: Freeing to non free bucket index."));
2768 bucket->ub_bucket[bucket->ub_cnt] = item;
2769 bucket->ub_cnt++;
2770 cache->uc_frees++;
2771 critical_exit();
2772 return;
2773 }
2774
2775 /*
2776 * We must go back the zone, which requires acquiring the zone lock,
2777 * which in turn means we must release and re-acquire the critical
2778 * section. Since the critical section is released, we may be
2779 * preempted or migrate. As such, make sure not to maintain any
2780 * thread-local state specific to the cache from prior to releasing
2781 * the critical section.
2782 */
2783 critical_exit();
2784 if (zone->uz_count == 0 || bucketdisable)
2785 goto zfree_item;
2786
2787 lockfail = 0;
2788 if (ZONE_TRYLOCK(zone) == 0) {
2789 /* Record contention to size the buckets. */
2790 ZONE_LOCK(zone);
2791 lockfail = 1;
2792 }
2793 critical_enter();
2794 cpu = curcpu;
2795 cache = &zone->uz_cpu[cpu];
2796
2797 /*
2798 * Since we have locked the zone we may as well send back our stats.
2799 */
2800 atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2801 atomic_add_long(&zone->uz_frees, cache->uc_frees);
2802 cache->uc_allocs = 0;
2803 cache->uc_frees = 0;
2804
2805 bucket = cache->uc_freebucket;
2806 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2807 ZONE_UNLOCK(zone);
2808 goto zfree_start;
2809 }
2810 cache->uc_freebucket = NULL;
2811
2812 /* Can we throw this on the zone full list? */
2813 if (bucket != NULL) {
2814#ifdef UMA_DEBUG_ALLOC
2815 printf("uma_zfree: Putting old bucket on the free list.\n");
2816#endif
2817 /* ub_cnt is pointing to the last free item */
2818 KASSERT(bucket->ub_cnt != 0,
2819 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2820 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2821 }
2822
2823 /* We are no longer associated with this CPU. */
2824 critical_exit();
2825
2826 /*
2827 * We bump the uz count when the cache size is insufficient to
2828 * handle the working set.
2829 */
2830 if (lockfail && zone->uz_count < BUCKET_MAX)
2831 zone->uz_count++;
2832 ZONE_UNLOCK(zone);
2833
2834#ifdef UMA_DEBUG_ALLOC
2835 printf("uma_zfree: Allocating new free bucket.\n");
2836#endif
2837 bucket = bucket_alloc(zone, udata, M_NOWAIT);
2838 if (bucket) {
2839 critical_enter();
2840 cpu = curcpu;
2841 cache = &zone->uz_cpu[cpu];
2842 if (cache->uc_freebucket == NULL) {
2843 cache->uc_freebucket = bucket;
2844 goto zfree_start;
2845 }
2846 /*
2847 * We lost the race, start over. We have to drop our
2848 * critical section to free the bucket.
2849 */
2850 critical_exit();
2851 bucket_free(zone, bucket, udata);
2852 goto zfree_restart;
2853 }
2854
2855 /*
2856 * If nothing else caught this, we'll just do an internal free.
2857 */
2858zfree_item:
2859 zone_free_item(zone, item, udata, SKIP_DTOR);
2860
2861 return;
2862}
2863
2864static void
2865slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
2866{
2867 uint8_t freei;
2868
2869 mtx_assert(&keg->uk_lock, MA_OWNED);
2870 MPASS(keg == slab->us_keg);
2871
2872 /* Do we need to remove from any lists? */
2873 if (slab->us_freecount+1 == keg->uk_ipers) {
2874 LIST_REMOVE(slab, us_link);
2875 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2876 } else if (slab->us_freecount == 0) {
2877 LIST_REMOVE(slab, us_link);
2878 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2879 }
2880
2881 /* Slab management. */
2882 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2883 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2884 slab->us_freecount++;
2885
2886 /* Keg statistics. */
2887 keg->uk_free++;
2888}
2889
2890static void
2891zone_release(uma_zone_t zone, void **bucket, int cnt)
2892{
2893 void *item;
2894 uma_slab_t slab;
2895 uma_keg_t keg;
2896 uint8_t *mem;
2897 int clearfull;
2898 int i;
2899
2900 clearfull = 0;
2901 keg = zone_first_keg(zone);
2902 KEG_LOCK(keg);
2903 for (i = 0; i < cnt; i++) {
2904 item = bucket[i];
2905 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2906 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2907 if (zone->uz_flags & UMA_ZONE_HASH) {
2908 slab = hash_sfind(&keg->uk_hash, mem);
2909 } else {
2910 mem += keg->uk_pgoff;
2911 slab = (uma_slab_t)mem;
2912 }
2913 } else {
2914 slab = vtoslab((vm_offset_t)item);
2915 if (slab->us_keg != keg) {
2916 KEG_UNLOCK(keg);
2917 keg = slab->us_keg;
2918 KEG_LOCK(keg);
2919 }
2920 }
2921 slab_free_item(keg, slab, item);
2922 if (keg->uk_flags & UMA_ZFLAG_FULL) {
2923 if (keg->uk_pages < keg->uk_maxpages) {
2924 keg->uk_flags &= ~UMA_ZFLAG_FULL;
2925 clearfull = 1;
2926 }
2927
2928 /*
2929 * We can handle one more allocation. Since we're
2930 * clearing ZFLAG_FULL, wake up all procs blocked
2931 * on pages. This should be uncommon, so keeping this
2932 * simple for now (rather than adding count of blocked
2933 * threads etc).
2934 */
2935 wakeup(keg);
2936 }
2937 }
2938 KEG_UNLOCK(keg);
2939 if (clearfull) {
2940 ZONE_LOCK(zone);
2941 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2942 wakeup(zone);
2943 ZONE_UNLOCK(zone);
2944 }
2945
2946}
2947
2948/*
2949 * Frees a single item to any zone.
2950 *
2951 * Arguments:
2952 * zone The zone to free to
2953 * item The item we're freeing
2954 * udata User supplied data for the dtor
2955 * skip Skip dtors and finis
2956 */
2957static void
2958zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2959{
2960
2961#ifdef INVARIANTS
2962 if (skip == SKIP_NONE) {
2963 if (zone->uz_flags & UMA_ZONE_MALLOC)
2964 uma_dbg_free(zone, udata, item);
2965 else
2966 uma_dbg_free(zone, NULL, item);
2967 }
2968#endif
2969 if (skip < SKIP_DTOR && zone->uz_dtor)
2970 zone->uz_dtor(item, zone->uz_size, udata);
2971
2972 if (skip < SKIP_FINI && zone->uz_fini)
2973 zone->uz_fini(item, zone->uz_size);
2974
2975 atomic_add_long(&zone->uz_frees, 1);
2976 zone->uz_release(zone->uz_arg, &item, 1);
2977}
2978
2979/* See uma.h */
2980int
2981uma_zone_set_max(uma_zone_t zone, int nitems)
2982{
2983 uma_keg_t keg;
2984
2985 keg = zone_first_keg(zone);
2986 if (keg == NULL)
2987 return (0);
2988 KEG_LOCK(keg);
2989 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2990 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2991 keg->uk_maxpages += keg->uk_ppera;
2992 nitems = keg->uk_maxpages * keg->uk_ipers;
2993 KEG_UNLOCK(keg);
2994
2995 return (nitems);
2996}
2997
2998/* See uma.h */
2999int
3000uma_zone_get_max(uma_zone_t zone)
3001{
3002 int nitems;
3003 uma_keg_t keg;
3004
3005 keg = zone_first_keg(zone);
3006 if (keg == NULL)
3007 return (0);
3008 KEG_LOCK(keg);
3009 nitems = keg->uk_maxpages * keg->uk_ipers;
3010 KEG_UNLOCK(keg);
3011
3012 return (nitems);
3013}
3014
3015/* See uma.h */
3016void
3017uma_zone_set_warning(uma_zone_t zone, const char *warning)
3018{
3019
3020 ZONE_LOCK(zone);
3021 zone->uz_warning = warning;
3022 ZONE_UNLOCK(zone);
3023}
3024
3025/* See uma.h */
3026void
3027uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3028{
3029
3030 ZONE_LOCK(zone);
3031 TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3032 ZONE_UNLOCK(zone);
3033}
3034
3035/* See uma.h */
3036int
3037uma_zone_get_cur(uma_zone_t zone)
3038{
3039 int64_t nitems;
3040 u_int i;
3041
3042 ZONE_LOCK(zone);
3043 nitems = zone->uz_allocs - zone->uz_frees;
3044 CPU_FOREACH(i) {
3045 /*
3046 * See the comment in sysctl_vm_zone_stats() regarding the
3047 * safety of accessing the per-cpu caches. With the zone lock
3048 * held, it is safe, but can potentially result in stale data.
3049 */
3050 nitems += zone->uz_cpu[i].uc_allocs -
3051 zone->uz_cpu[i].uc_frees;
3052 }
3053 ZONE_UNLOCK(zone);
3054
3055 return (nitems < 0 ? 0 : nitems);
3056}
3057
3058/* See uma.h */
3059void
3060uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3061{
3062 uma_keg_t keg;
3063
3064 keg = zone_first_keg(zone);
3065 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3066 KEG_LOCK(keg);
3067 KASSERT(keg->uk_pages == 0,
3068 ("uma_zone_set_init on non-empty keg"));
3069 keg->uk_init = uminit;
3070 KEG_UNLOCK(keg);
3071}
3072
3073/* See uma.h */
3074void
3075uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3076{
3077 uma_keg_t keg;
3078
3079 keg = zone_first_keg(zone);
3080 KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
3081 KEG_LOCK(keg);
3082 KASSERT(keg->uk_pages == 0,
3083 ("uma_zone_set_fini on non-empty keg"));
3084 keg->uk_fini = fini;
3085 KEG_UNLOCK(keg);
3086}
3087
3088/* See uma.h */
3089void
3090uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3091{
3092
3093 ZONE_LOCK(zone);
3094 KASSERT(zone_first_keg(zone)->uk_pages == 0,
3095 ("uma_zone_set_zinit on non-empty keg"));
3096 zone->uz_init = zinit;
3097 ZONE_UNLOCK(zone);
3098}
3099
3100/* See uma.h */
3101void
3102uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3103{
3104
3105 ZONE_LOCK(zone);
3106 KASSERT(zone_first_keg(zone)->uk_pages == 0,
3107 ("uma_zone_set_zfini on non-empty keg"));
3108 zone->uz_fini = zfini;
3109 ZONE_UNLOCK(zone);
3110}
3111
3112/* See uma.h */
3113/* XXX uk_freef is not actually used with the zone locked */
3114void
3115uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3116{
3117 uma_keg_t keg;
3118
3119 keg = zone_first_keg(zone);
3120 KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3121 KEG_LOCK(keg);
3122 keg->uk_freef = freef;
3123 KEG_UNLOCK(keg);
3124}
3125
3126/* See uma.h */
3127/* XXX uk_allocf is not actually used with the zone locked */
3128void
3129uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3130{
3131 uma_keg_t keg;
3132
3133 keg = zone_first_keg(zone);
3134 KEG_LOCK(keg);
3135 keg->uk_allocf = allocf;
3136 KEG_UNLOCK(keg);
3137}
3138
3139/* See uma.h */
3140void
3141uma_zone_reserve(uma_zone_t zone, int items)
3142{
3143 uma_keg_t keg;
3144
3145 keg = zone_first_keg(zone);
3146 if (keg == NULL)
3147 return;
3148 KEG_LOCK(keg);
3149 keg->uk_reserve = items;
3150 KEG_UNLOCK(keg);
3151
3152 return;
3153}
3154
3155/* See uma.h */
3156int
3157uma_zone_reserve_kva(uma_zone_t zone, int count)
3158{
3159 uma_keg_t keg;
3160 vm_offset_t kva;
3161 u_int pages;
3162
3163 keg = zone_first_keg(zone);
3164 if (keg == NULL)
3165 return (0);
3166 pages = count / keg->uk_ipers;
3167
3168 if (pages * keg->uk_ipers < count)
3169 pages++;
3170
3171#ifdef UMA_MD_SMALL_ALLOC
3172 if (keg->uk_ppera > 1) {
3173#else
3174 if (1) {
3175#endif
3176 kva = kva_alloc((vm_size_t)pages * UMA_SLAB_SIZE);
3177 if (kva == 0)
3178 return (0);
3179 } else
3180 kva = 0;
3181 KEG_LOCK(keg);
3182 keg->uk_kva = kva;
3183 keg->uk_offset = 0;
3184 keg->uk_maxpages = pages;
3185#ifdef UMA_MD_SMALL_ALLOC
3186 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3187#else
3188 keg->uk_allocf = noobj_alloc;
3189#endif
3190 keg->uk_flags |= UMA_ZONE_NOFREE;
3191 KEG_UNLOCK(keg);
3192
3193 return (1);
3194}
3195
3196/* See uma.h */
3197void
3198uma_prealloc(uma_zone_t zone, int items)
3199{
3200 int slabs;
3201 uma_slab_t slab;
3202 uma_keg_t keg;
3203
3204 keg = zone_first_keg(zone);
3205 if (keg == NULL)
3206 return;
3207 KEG_LOCK(keg);
3208 slabs = items / keg->uk_ipers;
3209 if (slabs * keg->uk_ipers < items)
3210 slabs++;
3211 while (slabs > 0) {
3212 slab = keg_alloc_slab(keg, zone, M_WAITOK);
3213 if (slab == NULL)
3214 break;
3215 MPASS(slab->us_keg == keg);
3216 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3217 slabs--;
3218 }
3219 KEG_UNLOCK(keg);
3220}
3221
3222/* See uma.h */
2044 * The underlying object must be the same size. rsize
2045 * may be different.
2046 */
2047 if (master->uz_size != zone->uz_size) {
2048 error = E2BIG;
2049 goto out;
2050 }
2051 /*
2052 * Put it at the end of the list.
2053 */
2054 klink->kl_keg = zone_first_keg(master);
2055 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2056 if (LIST_NEXT(kl, kl_link) == NULL) {
2057 LIST_INSERT_AFTER(kl, klink, kl_link);
2058 break;
2059 }
2060 }
2061 klink = NULL;
2062 zone->uz_flags |= UMA_ZFLAG_MULTI;
2063 zone->uz_slab = zone_fetch_slab_multi;
2064
2065out:
2066 zone_unlock_pair(zone, master);
2067 if (klink != NULL)
2068 free(klink, M_TEMP);
2069
2070 return (error);
2071}
2072
2073
2074/* See uma.h */
2075void
2076uma_zdestroy(uma_zone_t zone)
2077{
2078
2079 sx_slock(&uma_drain_lock);
2080 zone_free_item(zones, zone, NULL, SKIP_NONE);
2081 sx_sunlock(&uma_drain_lock);
2082}
2083
2084/* See uma.h */
2085void *
2086uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2087{
2088 void *item;
2089 uma_cache_t cache;
2090 uma_bucket_t bucket;
2091 int lockfail;
2092 int cpu;
2093
2094 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2095 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2096
2097 /* This is the fast path allocation */
2098#ifdef UMA_DEBUG_ALLOC_1
2099 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
2100#endif
2101 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
2102 zone->uz_name, flags);
2103
2104 if (flags & M_WAITOK) {
2105 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2106 "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2107 }
2108 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2109 ("uma_zalloc_arg: called with spinlock or critical section held"));
2110
2111#ifdef DEBUG_MEMGUARD
2112 if (memguard_cmp_zone(zone)) {
2113 item = memguard_alloc(zone->uz_size, flags);
2114 if (item != NULL) {
2115 /*
2116 * Avoid conflict with the use-after-free
2117 * protecting infrastructure from INVARIANTS.
2118 */
2119 if (zone->uz_init != NULL &&
2120 zone->uz_init != mtrash_init &&
2121 zone->uz_init(item, zone->uz_size, flags) != 0)
2122 return (NULL);
2123 if (zone->uz_ctor != NULL &&
2124 zone->uz_ctor != mtrash_ctor &&
2125 zone->uz_ctor(item, zone->uz_size, udata,
2126 flags) != 0) {
2127 zone->uz_fini(item, zone->uz_size);
2128 return (NULL);
2129 }
2130 return (item);
2131 }
2132 /* This is unfortunate but should not be fatal. */
2133 }
2134#endif
2135 /*
2136 * If possible, allocate from the per-CPU cache. There are two
2137 * requirements for safe access to the per-CPU cache: (1) the thread
2138 * accessing the cache must not be preempted or yield during access,
2139 * and (2) the thread must not migrate CPUs without switching which
2140 * cache it accesses. We rely on a critical section to prevent
2141 * preemption and migration. We release the critical section in
2142 * order to acquire the zone mutex if we are unable to allocate from
2143 * the current cache; when we re-acquire the critical section, we
2144 * must detect and handle migration if it has occurred.
2145 */
2146 critical_enter();
2147 cpu = curcpu;
2148 cache = &zone->uz_cpu[cpu];
2149
2150zalloc_start:
2151 bucket = cache->uc_allocbucket;
2152 if (bucket != NULL && bucket->ub_cnt > 0) {
2153 bucket->ub_cnt--;
2154 item = bucket->ub_bucket[bucket->ub_cnt];
2155#ifdef INVARIANTS
2156 bucket->ub_bucket[bucket->ub_cnt] = NULL;
2157#endif
2158 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2159 cache->uc_allocs++;
2160 critical_exit();
2161 if (zone->uz_ctor != NULL &&
2162 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2163 atomic_add_long(&zone->uz_fails, 1);
2164 zone_free_item(zone, item, udata, SKIP_DTOR);
2165 return (NULL);
2166 }
2167#ifdef INVARIANTS
2168 uma_dbg_alloc(zone, NULL, item);
2169#endif
2170 if (flags & M_ZERO)
2171 uma_zero_item(item, zone);
2172 return (item);
2173 }
2174
2175 /*
2176 * We have run out of items in our alloc bucket.
2177 * See if we can switch with our free bucket.
2178 */
2179 bucket = cache->uc_freebucket;
2180 if (bucket != NULL && bucket->ub_cnt > 0) {
2181#ifdef UMA_DEBUG_ALLOC
2182 printf("uma_zalloc: Swapping empty with alloc.\n");
2183#endif
2184 cache->uc_freebucket = cache->uc_allocbucket;
2185 cache->uc_allocbucket = bucket;
2186 goto zalloc_start;
2187 }
2188
2189 /*
2190 * Discard any empty allocation bucket while we hold no locks.
2191 */
2192 bucket = cache->uc_allocbucket;
2193 cache->uc_allocbucket = NULL;
2194 critical_exit();
2195 if (bucket != NULL)
2196 bucket_free(zone, bucket, udata);
2197
2198 /* Short-circuit for zones without buckets and low memory. */
2199 if (zone->uz_count == 0 || bucketdisable)
2200 goto zalloc_item;
2201
2202 /*
2203 * Attempt to retrieve the item from the per-CPU cache has failed, so
2204 * we must go back to the zone. This requires the zone lock, so we
2205 * must drop the critical section, then re-acquire it when we go back
2206 * to the cache. Since the critical section is released, we may be
2207 * preempted or migrate. As such, make sure not to maintain any
2208 * thread-local state specific to the cache from prior to releasing
2209 * the critical section.
2210 */
2211 lockfail = 0;
2212 if (ZONE_TRYLOCK(zone) == 0) {
2213 /* Record contention to size the buckets. */
2214 ZONE_LOCK(zone);
2215 lockfail = 1;
2216 }
2217 critical_enter();
2218 cpu = curcpu;
2219 cache = &zone->uz_cpu[cpu];
2220
2221 /*
2222 * Since we have locked the zone we may as well send back our stats.
2223 */
2224 atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2225 atomic_add_long(&zone->uz_frees, cache->uc_frees);
2226 cache->uc_allocs = 0;
2227 cache->uc_frees = 0;
2228
2229 /* See if we lost the race to fill the cache. */
2230 if (cache->uc_allocbucket != NULL) {
2231 ZONE_UNLOCK(zone);
2232 goto zalloc_start;
2233 }
2234
2235 /*
2236 * Check the zone's cache of buckets.
2237 */
2238 if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2239 KASSERT(bucket->ub_cnt != 0,
2240 ("uma_zalloc_arg: Returning an empty bucket."));
2241
2242 LIST_REMOVE(bucket, ub_link);
2243 cache->uc_allocbucket = bucket;
2244 ZONE_UNLOCK(zone);
2245 goto zalloc_start;
2246 }
2247 /* We are no longer associated with this CPU. */
2248 critical_exit();
2249
2250 /*
2251 * We bump the uz count when the cache size is insufficient to
2252 * handle the working set.
2253 */
2254 if (lockfail && zone->uz_count < BUCKET_MAX)
2255 zone->uz_count++;
2256 ZONE_UNLOCK(zone);
2257
2258 /*
2259 * Now lets just fill a bucket and put it on the free list. If that
2260 * works we'll restart the allocation from the begining and it
2261 * will use the just filled bucket.
2262 */
2263 bucket = zone_alloc_bucket(zone, udata, flags);
2264 if (bucket != NULL) {
2265 ZONE_LOCK(zone);
2266 critical_enter();
2267 cpu = curcpu;
2268 cache = &zone->uz_cpu[cpu];
2269 /*
2270 * See if we lost the race or were migrated. Cache the
2271 * initialized bucket to make this less likely or claim
2272 * the memory directly.
2273 */
2274 if (cache->uc_allocbucket == NULL)
2275 cache->uc_allocbucket = bucket;
2276 else
2277 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2278 ZONE_UNLOCK(zone);
2279 goto zalloc_start;
2280 }
2281
2282 /*
2283 * We may not be able to get a bucket so return an actual item.
2284 */
2285#ifdef UMA_DEBUG
2286 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2287#endif
2288
2289zalloc_item:
2290 item = zone_alloc_item(zone, udata, flags);
2291
2292 return (item);
2293}
2294
2295static uma_slab_t
2296keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2297{
2298 uma_slab_t slab;
2299 int reserve;
2300
2301 mtx_assert(&keg->uk_lock, MA_OWNED);
2302 slab = NULL;
2303 reserve = 0;
2304 if ((flags & M_USE_RESERVE) == 0)
2305 reserve = keg->uk_reserve;
2306
2307 for (;;) {
2308 /*
2309 * Find a slab with some space. Prefer slabs that are partially
2310 * used over those that are totally full. This helps to reduce
2311 * fragmentation.
2312 */
2313 if (keg->uk_free > reserve) {
2314 if (!LIST_EMPTY(&keg->uk_part_slab)) {
2315 slab = LIST_FIRST(&keg->uk_part_slab);
2316 } else {
2317 slab = LIST_FIRST(&keg->uk_free_slab);
2318 LIST_REMOVE(slab, us_link);
2319 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2320 us_link);
2321 }
2322 MPASS(slab->us_keg == keg);
2323 return (slab);
2324 }
2325
2326 /*
2327 * M_NOVM means don't ask at all!
2328 */
2329 if (flags & M_NOVM)
2330 break;
2331
2332 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2333 keg->uk_flags |= UMA_ZFLAG_FULL;
2334 /*
2335 * If this is not a multi-zone, set the FULL bit.
2336 * Otherwise slab_multi() takes care of it.
2337 */
2338 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2339 zone->uz_flags |= UMA_ZFLAG_FULL;
2340 zone_log_warning(zone);
2341 zone_maxaction(zone);
2342 }
2343 if (flags & M_NOWAIT)
2344 break;
2345 zone->uz_sleeps++;
2346 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2347 continue;
2348 }
2349 slab = keg_alloc_slab(keg, zone, flags);
2350 /*
2351 * If we got a slab here it's safe to mark it partially used
2352 * and return. We assume that the caller is going to remove
2353 * at least one item.
2354 */
2355 if (slab) {
2356 MPASS(slab->us_keg == keg);
2357 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2358 return (slab);
2359 }
2360 /*
2361 * We might not have been able to get a slab but another cpu
2362 * could have while we were unlocked. Check again before we
2363 * fail.
2364 */
2365 flags |= M_NOVM;
2366 }
2367 return (slab);
2368}
2369
2370static uma_slab_t
2371zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2372{
2373 uma_slab_t slab;
2374
2375 if (keg == NULL) {
2376 keg = zone_first_keg(zone);
2377 KEG_LOCK(keg);
2378 }
2379
2380 for (;;) {
2381 slab = keg_fetch_slab(keg, zone, flags);
2382 if (slab)
2383 return (slab);
2384 if (flags & (M_NOWAIT | M_NOVM))
2385 break;
2386 }
2387 KEG_UNLOCK(keg);
2388 return (NULL);
2389}
2390
2391/*
2392 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2393 * with the keg locked. On NULL no lock is held.
2394 *
2395 * The last pointer is used to seed the search. It is not required.
2396 */
2397static uma_slab_t
2398zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2399{
2400 uma_klink_t klink;
2401 uma_slab_t slab;
2402 uma_keg_t keg;
2403 int flags;
2404 int empty;
2405 int full;
2406
2407 /*
2408 * Don't wait on the first pass. This will skip limit tests
2409 * as well. We don't want to block if we can find a provider
2410 * without blocking.
2411 */
2412 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2413 /*
2414 * Use the last slab allocated as a hint for where to start
2415 * the search.
2416 */
2417 if (last != NULL) {
2418 slab = keg_fetch_slab(last, zone, flags);
2419 if (slab)
2420 return (slab);
2421 KEG_UNLOCK(last);
2422 }
2423 /*
2424 * Loop until we have a slab incase of transient failures
2425 * while M_WAITOK is specified. I'm not sure this is 100%
2426 * required but we've done it for so long now.
2427 */
2428 for (;;) {
2429 empty = 0;
2430 full = 0;
2431 /*
2432 * Search the available kegs for slabs. Be careful to hold the
2433 * correct lock while calling into the keg layer.
2434 */
2435 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2436 keg = klink->kl_keg;
2437 KEG_LOCK(keg);
2438 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2439 slab = keg_fetch_slab(keg, zone, flags);
2440 if (slab)
2441 return (slab);
2442 }
2443 if (keg->uk_flags & UMA_ZFLAG_FULL)
2444 full++;
2445 else
2446 empty++;
2447 KEG_UNLOCK(keg);
2448 }
2449 if (rflags & (M_NOWAIT | M_NOVM))
2450 break;
2451 flags = rflags;
2452 /*
2453 * All kegs are full. XXX We can't atomically check all kegs
2454 * and sleep so just sleep for a short period and retry.
2455 */
2456 if (full && !empty) {
2457 ZONE_LOCK(zone);
2458 zone->uz_flags |= UMA_ZFLAG_FULL;
2459 zone->uz_sleeps++;
2460 zone_log_warning(zone);
2461 zone_maxaction(zone);
2462 msleep(zone, zone->uz_lockptr, PVM,
2463 "zonelimit", hz/100);
2464 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2465 ZONE_UNLOCK(zone);
2466 continue;
2467 }
2468 }
2469 return (NULL);
2470}
2471
2472static void *
2473slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2474{
2475 void *item;
2476 uint8_t freei;
2477
2478 MPASS(keg == slab->us_keg);
2479 mtx_assert(&keg->uk_lock, MA_OWNED);
2480
2481 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2482 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2483 item = slab->us_data + (keg->uk_rsize * freei);
2484 slab->us_freecount--;
2485 keg->uk_free--;
2486
2487 /* Move this slab to the full list */
2488 if (slab->us_freecount == 0) {
2489 LIST_REMOVE(slab, us_link);
2490 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2491 }
2492
2493 return (item);
2494}
2495
2496static int
2497zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2498{
2499 uma_slab_t slab;
2500 uma_keg_t keg;
2501 int i;
2502
2503 slab = NULL;
2504 keg = NULL;
2505 /* Try to keep the buckets totally full */
2506 for (i = 0; i < max; ) {
2507 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2508 break;
2509 keg = slab->us_keg;
2510 while (slab->us_freecount && i < max) {
2511 bucket[i++] = slab_alloc_item(keg, slab);
2512 if (keg->uk_free <= keg->uk_reserve)
2513 break;
2514 }
2515 /* Don't grab more than one slab at a time. */
2516 flags &= ~M_WAITOK;
2517 flags |= M_NOWAIT;
2518 }
2519 if (slab != NULL)
2520 KEG_UNLOCK(keg);
2521
2522 return i;
2523}
2524
2525static uma_bucket_t
2526zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2527{
2528 uma_bucket_t bucket;
2529 int max;
2530
2531 /* Don't wait for buckets, preserve caller's NOVM setting. */
2532 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2533 if (bucket == NULL)
2534 return (NULL);
2535
2536 max = MIN(bucket->ub_entries, zone->uz_count);
2537 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2538 max, flags);
2539
2540 /*
2541 * Initialize the memory if necessary.
2542 */
2543 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2544 int i;
2545
2546 for (i = 0; i < bucket->ub_cnt; i++)
2547 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2548 flags) != 0)
2549 break;
2550 /*
2551 * If we couldn't initialize the whole bucket, put the
2552 * rest back onto the freelist.
2553 */
2554 if (i != bucket->ub_cnt) {
2555 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2556 bucket->ub_cnt - i);
2557#ifdef INVARIANTS
2558 bzero(&bucket->ub_bucket[i],
2559 sizeof(void *) * (bucket->ub_cnt - i));
2560#endif
2561 bucket->ub_cnt = i;
2562 }
2563 }
2564
2565 if (bucket->ub_cnt == 0) {
2566 bucket_free(zone, bucket, udata);
2567 atomic_add_long(&zone->uz_fails, 1);
2568 return (NULL);
2569 }
2570
2571 return (bucket);
2572}
2573
2574/*
2575 * Allocates a single item from a zone.
2576 *
2577 * Arguments
2578 * zone The zone to alloc for.
2579 * udata The data to be passed to the constructor.
2580 * flags M_WAITOK, M_NOWAIT, M_ZERO.
2581 *
2582 * Returns
2583 * NULL if there is no memory and M_NOWAIT is set
2584 * An item if successful
2585 */
2586
2587static void *
2588zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2589{
2590 void *item;
2591
2592 item = NULL;
2593
2594#ifdef UMA_DEBUG_ALLOC
2595 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2596#endif
2597 if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2598 goto fail;
2599 atomic_add_long(&zone->uz_allocs, 1);
2600
2601 /*
2602 * We have to call both the zone's init (not the keg's init)
2603 * and the zone's ctor. This is because the item is going from
2604 * a keg slab directly to the user, and the user is expecting it
2605 * to be both zone-init'd as well as zone-ctor'd.
2606 */
2607 if (zone->uz_init != NULL) {
2608 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2609 zone_free_item(zone, item, udata, SKIP_FINI);
2610 goto fail;
2611 }
2612 }
2613 if (zone->uz_ctor != NULL) {
2614 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2615 zone_free_item(zone, item, udata, SKIP_DTOR);
2616 goto fail;
2617 }
2618 }
2619#ifdef INVARIANTS
2620 uma_dbg_alloc(zone, NULL, item);
2621#endif
2622 if (flags & M_ZERO)
2623 uma_zero_item(item, zone);
2624
2625 return (item);
2626
2627fail:
2628 atomic_add_long(&zone->uz_fails, 1);
2629 return (NULL);
2630}
2631
2632/* See uma.h */
2633void
2634uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2635{
2636 uma_cache_t cache;
2637 uma_bucket_t bucket;
2638 int lockfail;
2639 int cpu;
2640
2641 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2642 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2643
2644#ifdef UMA_DEBUG_ALLOC_1
2645 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2646#endif
2647 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2648 zone->uz_name);
2649
2650 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2651 ("uma_zfree_arg: called with spinlock or critical section held"));
2652
2653 /* uma_zfree(..., NULL) does nothing, to match free(9). */
2654 if (item == NULL)
2655 return;
2656#ifdef DEBUG_MEMGUARD
2657 if (is_memguard_addr(item)) {
2658 if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
2659 zone->uz_dtor(item, zone->uz_size, udata);
2660 if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
2661 zone->uz_fini(item, zone->uz_size);
2662 memguard_free(item);
2663 return;
2664 }
2665#endif
2666#ifdef INVARIANTS
2667 if (zone->uz_flags & UMA_ZONE_MALLOC)
2668 uma_dbg_free(zone, udata, item);
2669 else
2670 uma_dbg_free(zone, NULL, item);
2671#endif
2672 if (zone->uz_dtor != NULL)
2673 zone->uz_dtor(item, zone->uz_size, udata);
2674
2675 /*
2676 * The race here is acceptable. If we miss it we'll just have to wait
2677 * a little longer for the limits to be reset.
2678 */
2679 if (zone->uz_flags & UMA_ZFLAG_FULL)
2680 goto zfree_item;
2681
2682 /*
2683 * If possible, free to the per-CPU cache. There are two
2684 * requirements for safe access to the per-CPU cache: (1) the thread
2685 * accessing the cache must not be preempted or yield during access,
2686 * and (2) the thread must not migrate CPUs without switching which
2687 * cache it accesses. We rely on a critical section to prevent
2688 * preemption and migration. We release the critical section in
2689 * order to acquire the zone mutex if we are unable to free to the
2690 * current cache; when we re-acquire the critical section, we must
2691 * detect and handle migration if it has occurred.
2692 */
2693zfree_restart:
2694 critical_enter();
2695 cpu = curcpu;
2696 cache = &zone->uz_cpu[cpu];
2697
2698zfree_start:
2699 /*
2700 * Try to free into the allocbucket first to give LIFO ordering
2701 * for cache-hot datastructures. Spill over into the freebucket
2702 * if necessary. Alloc will swap them if one runs dry.
2703 */
2704 bucket = cache->uc_allocbucket;
2705 if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2706 bucket = cache->uc_freebucket;
2707 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2708 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2709 ("uma_zfree: Freeing to non free bucket index."));
2710 bucket->ub_bucket[bucket->ub_cnt] = item;
2711 bucket->ub_cnt++;
2712 cache->uc_frees++;
2713 critical_exit();
2714 return;
2715 }
2716
2717 /*
2718 * We must go back the zone, which requires acquiring the zone lock,
2719 * which in turn means we must release and re-acquire the critical
2720 * section. Since the critical section is released, we may be
2721 * preempted or migrate. As such, make sure not to maintain any
2722 * thread-local state specific to the cache from prior to releasing
2723 * the critical section.
2724 */
2725 critical_exit();
2726 if (zone->uz_count == 0 || bucketdisable)
2727 goto zfree_item;
2728
2729 lockfail = 0;
2730 if (ZONE_TRYLOCK(zone) == 0) {
2731 /* Record contention to size the buckets. */
2732 ZONE_LOCK(zone);
2733 lockfail = 1;
2734 }
2735 critical_enter();
2736 cpu = curcpu;
2737 cache = &zone->uz_cpu[cpu];
2738
2739 /*
2740 * Since we have locked the zone we may as well send back our stats.
2741 */
2742 atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2743 atomic_add_long(&zone->uz_frees, cache->uc_frees);
2744 cache->uc_allocs = 0;
2745 cache->uc_frees = 0;
2746
2747 bucket = cache->uc_freebucket;
2748 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2749 ZONE_UNLOCK(zone);
2750 goto zfree_start;
2751 }
2752 cache->uc_freebucket = NULL;
2753
2754 /* Can we throw this on the zone full list? */
2755 if (bucket != NULL) {
2756#ifdef UMA_DEBUG_ALLOC
2757 printf("uma_zfree: Putting old bucket on the free list.\n");
2758#endif
2759 /* ub_cnt is pointing to the last free item */
2760 KASSERT(bucket->ub_cnt != 0,
2761 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2762 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2763 }
2764
2765 /* We are no longer associated with this CPU. */
2766 critical_exit();
2767
2768 /*
2769 * We bump the uz count when the cache size is insufficient to
2770 * handle the working set.
2771 */
2772 if (lockfail && zone->uz_count < BUCKET_MAX)
2773 zone->uz_count++;
2774 ZONE_UNLOCK(zone);
2775
2776#ifdef UMA_DEBUG_ALLOC
2777 printf("uma_zfree: Allocating new free bucket.\n");
2778#endif
2779 bucket = bucket_alloc(zone, udata, M_NOWAIT);
2780 if (bucket) {
2781 critical_enter();
2782 cpu = curcpu;
2783 cache = &zone->uz_cpu[cpu];
2784 if (cache->uc_freebucket == NULL) {
2785 cache->uc_freebucket = bucket;
2786 goto zfree_start;
2787 }
2788 /*
2789 * We lost the race, start over. We have to drop our
2790 * critical section to free the bucket.
2791 */
2792 critical_exit();
2793 bucket_free(zone, bucket, udata);
2794 goto zfree_restart;
2795 }
2796
2797 /*
2798 * If nothing else caught this, we'll just do an internal free.
2799 */
2800zfree_item:
2801 zone_free_item(zone, item, udata, SKIP_DTOR);
2802
2803 return;
2804}
2805
2806static void
2807slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
2808{
2809 uint8_t freei;
2810
2811 mtx_assert(&keg->uk_lock, MA_OWNED);
2812 MPASS(keg == slab->us_keg);
2813
2814 /* Do we need to remove from any lists? */
2815 if (slab->us_freecount+1 == keg->uk_ipers) {
2816 LIST_REMOVE(slab, us_link);
2817 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2818 } else if (slab->us_freecount == 0) {
2819 LIST_REMOVE(slab, us_link);
2820 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2821 }
2822
2823 /* Slab management. */
2824 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2825 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2826 slab->us_freecount++;
2827
2828 /* Keg statistics. */
2829 keg->uk_free++;
2830}
2831
2832static void
2833zone_release(uma_zone_t zone, void **bucket, int cnt)
2834{
2835 void *item;
2836 uma_slab_t slab;
2837 uma_keg_t keg;
2838 uint8_t *mem;
2839 int clearfull;
2840 int i;
2841
2842 clearfull = 0;
2843 keg = zone_first_keg(zone);
2844 KEG_LOCK(keg);
2845 for (i = 0; i < cnt; i++) {
2846 item = bucket[i];
2847 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2848 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2849 if (zone->uz_flags & UMA_ZONE_HASH) {
2850 slab = hash_sfind(&keg->uk_hash, mem);
2851 } else {
2852 mem += keg->uk_pgoff;
2853 slab = (uma_slab_t)mem;
2854 }
2855 } else {
2856 slab = vtoslab((vm_offset_t)item);
2857 if (slab->us_keg != keg) {
2858 KEG_UNLOCK(keg);
2859 keg = slab->us_keg;
2860 KEG_LOCK(keg);
2861 }
2862 }
2863 slab_free_item(keg, slab, item);
2864 if (keg->uk_flags & UMA_ZFLAG_FULL) {
2865 if (keg->uk_pages < keg->uk_maxpages) {
2866 keg->uk_flags &= ~UMA_ZFLAG_FULL;
2867 clearfull = 1;
2868 }
2869
2870 /*
2871 * We can handle one more allocation. Since we're
2872 * clearing ZFLAG_FULL, wake up all procs blocked
2873 * on pages. This should be uncommon, so keeping this
2874 * simple for now (rather than adding count of blocked
2875 * threads etc).
2876 */
2877 wakeup(keg);
2878 }
2879 }
2880 KEG_UNLOCK(keg);
2881 if (clearfull) {
2882 ZONE_LOCK(zone);
2883 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2884 wakeup(zone);
2885 ZONE_UNLOCK(zone);
2886 }
2887
2888}
2889
2890/*
2891 * Frees a single item to any zone.
2892 *
2893 * Arguments:
2894 * zone The zone to free to
2895 * item The item we're freeing
2896 * udata User supplied data for the dtor
2897 * skip Skip dtors and finis
2898 */
2899static void
2900zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2901{
2902
2903#ifdef INVARIANTS
2904 if (skip == SKIP_NONE) {
2905 if (zone->uz_flags & UMA_ZONE_MALLOC)
2906 uma_dbg_free(zone, udata, item);
2907 else
2908 uma_dbg_free(zone, NULL, item);
2909 }
2910#endif
2911 if (skip < SKIP_DTOR && zone->uz_dtor)
2912 zone->uz_dtor(item, zone->uz_size, udata);
2913
2914 if (skip < SKIP_FINI && zone->uz_fini)
2915 zone->uz_fini(item, zone->uz_size);
2916
2917 atomic_add_long(&zone->uz_frees, 1);
2918 zone->uz_release(zone->uz_arg, &item, 1);
2919}
2920
2921/* See uma.h */
2922int
2923uma_zone_set_max(uma_zone_t zone, int nitems)
2924{
2925 uma_keg_t keg;
2926
2927 keg = zone_first_keg(zone);
2928 if (keg == NULL)
2929 return (0);
2930 KEG_LOCK(keg);
2931 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2932 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2933 keg->uk_maxpages += keg->uk_ppera;
2934 nitems = keg->uk_maxpages * keg->uk_ipers;
2935 KEG_UNLOCK(keg);
2936
2937 return (nitems);
2938}
2939
2940/* See uma.h */
2941int
2942uma_zone_get_max(uma_zone_t zone)
2943{
2944 int nitems;
2945 uma_keg_t keg;
2946
2947 keg = zone_first_keg(zone);
2948 if (keg == NULL)
2949 return (0);
2950 KEG_LOCK(keg);
2951 nitems = keg->uk_maxpages * keg->uk_ipers;
2952 KEG_UNLOCK(keg);
2953
2954 return (nitems);
2955}
2956
2957/* See uma.h */
2958void
2959uma_zone_set_warning(uma_zone_t zone, const char *warning)
2960{
2961
2962 ZONE_LOCK(zone);
2963 zone->uz_warning = warning;
2964 ZONE_UNLOCK(zone);
2965}
2966
2967/* See uma.h */
2968void
2969uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
2970{
2971
2972 ZONE_LOCK(zone);
2973 TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
2974 ZONE_UNLOCK(zone);
2975}
2976
2977/* See uma.h */
2978int
2979uma_zone_get_cur(uma_zone_t zone)
2980{
2981 int64_t nitems;
2982 u_int i;
2983
2984 ZONE_LOCK(zone);
2985 nitems = zone->uz_allocs - zone->uz_frees;
2986 CPU_FOREACH(i) {
2987 /*
2988 * See the comment in sysctl_vm_zone_stats() regarding the
2989 * safety of accessing the per-cpu caches. With the zone lock
2990 * held, it is safe, but can potentially result in stale data.
2991 */
2992 nitems += zone->uz_cpu[i].uc_allocs -
2993 zone->uz_cpu[i].uc_frees;
2994 }
2995 ZONE_UNLOCK(zone);
2996
2997 return (nitems < 0 ? 0 : nitems);
2998}
2999
3000/* See uma.h */
3001void
3002uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3003{
3004 uma_keg_t keg;
3005
3006 keg = zone_first_keg(zone);
3007 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3008 KEG_LOCK(keg);
3009 KASSERT(keg->uk_pages == 0,
3010 ("uma_zone_set_init on non-empty keg"));
3011 keg->uk_init = uminit;
3012 KEG_UNLOCK(keg);
3013}
3014
3015/* See uma.h */
3016void
3017uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3018{
3019 uma_keg_t keg;
3020
3021 keg = zone_first_keg(zone);
3022 KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
3023 KEG_LOCK(keg);
3024 KASSERT(keg->uk_pages == 0,
3025 ("uma_zone_set_fini on non-empty keg"));
3026 keg->uk_fini = fini;
3027 KEG_UNLOCK(keg);
3028}
3029
3030/* See uma.h */
3031void
3032uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3033{
3034
3035 ZONE_LOCK(zone);
3036 KASSERT(zone_first_keg(zone)->uk_pages == 0,
3037 ("uma_zone_set_zinit on non-empty keg"));
3038 zone->uz_init = zinit;
3039 ZONE_UNLOCK(zone);
3040}
3041
3042/* See uma.h */
3043void
3044uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3045{
3046
3047 ZONE_LOCK(zone);
3048 KASSERT(zone_first_keg(zone)->uk_pages == 0,
3049 ("uma_zone_set_zfini on non-empty keg"));
3050 zone->uz_fini = zfini;
3051 ZONE_UNLOCK(zone);
3052}
3053
3054/* See uma.h */
3055/* XXX uk_freef is not actually used with the zone locked */
3056void
3057uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3058{
3059 uma_keg_t keg;
3060
3061 keg = zone_first_keg(zone);
3062 KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3063 KEG_LOCK(keg);
3064 keg->uk_freef = freef;
3065 KEG_UNLOCK(keg);
3066}
3067
3068/* See uma.h */
3069/* XXX uk_allocf is not actually used with the zone locked */
3070void
3071uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3072{
3073 uma_keg_t keg;
3074
3075 keg = zone_first_keg(zone);
3076 KEG_LOCK(keg);
3077 keg->uk_allocf = allocf;
3078 KEG_UNLOCK(keg);
3079}
3080
3081/* See uma.h */
3082void
3083uma_zone_reserve(uma_zone_t zone, int items)
3084{
3085 uma_keg_t keg;
3086
3087 keg = zone_first_keg(zone);
3088 if (keg == NULL)
3089 return;
3090 KEG_LOCK(keg);
3091 keg->uk_reserve = items;
3092 KEG_UNLOCK(keg);
3093
3094 return;
3095}
3096
3097/* See uma.h */
3098int
3099uma_zone_reserve_kva(uma_zone_t zone, int count)
3100{
3101 uma_keg_t keg;
3102 vm_offset_t kva;
3103 u_int pages;
3104
3105 keg = zone_first_keg(zone);
3106 if (keg == NULL)
3107 return (0);
3108 pages = count / keg->uk_ipers;
3109
3110 if (pages * keg->uk_ipers < count)
3111 pages++;
3112
3113#ifdef UMA_MD_SMALL_ALLOC
3114 if (keg->uk_ppera > 1) {
3115#else
3116 if (1) {
3117#endif
3118 kva = kva_alloc((vm_size_t)pages * UMA_SLAB_SIZE);
3119 if (kva == 0)
3120 return (0);
3121 } else
3122 kva = 0;
3123 KEG_LOCK(keg);
3124 keg->uk_kva = kva;
3125 keg->uk_offset = 0;
3126 keg->uk_maxpages = pages;
3127#ifdef UMA_MD_SMALL_ALLOC
3128 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3129#else
3130 keg->uk_allocf = noobj_alloc;
3131#endif
3132 keg->uk_flags |= UMA_ZONE_NOFREE;
3133 KEG_UNLOCK(keg);
3134
3135 return (1);
3136}
3137
3138/* See uma.h */
3139void
3140uma_prealloc(uma_zone_t zone, int items)
3141{
3142 int slabs;
3143 uma_slab_t slab;
3144 uma_keg_t keg;
3145
3146 keg = zone_first_keg(zone);
3147 if (keg == NULL)
3148 return;
3149 KEG_LOCK(keg);
3150 slabs = items / keg->uk_ipers;
3151 if (slabs * keg->uk_ipers < items)
3152 slabs++;
3153 while (slabs > 0) {
3154 slab = keg_alloc_slab(keg, zone, M_WAITOK);
3155 if (slab == NULL)
3156 break;
3157 MPASS(slab->us_keg == keg);
3158 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3159 slabs--;
3160 }
3161 KEG_UNLOCK(keg);
3162}
3163
3164/* See uma.h */
3223uint32_t *
3224uma_find_refcnt(uma_zone_t zone, void *item)
3225{
3226 uma_slabrefcnt_t slabref;
3227 uma_slab_t slab;
3228 uma_keg_t keg;
3229 uint32_t *refcnt;
3230 int idx;
3231
3232 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
3233 slabref = (uma_slabrefcnt_t)slab;
3234 keg = slab->us_keg;
3235 KASSERT(keg->uk_flags & UMA_ZONE_REFCNT,
3236 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
3237 idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3238 refcnt = &slabref->us_refcnt[idx];
3239 return refcnt;
3240}
3241
3242/* See uma.h */
3243static void
3244uma_reclaim_locked(bool kmem_danger)
3245{
3246
3247#ifdef UMA_DEBUG
3248 printf("UMA: vm asked us to release pages!\n");
3249#endif
3250 sx_assert(&uma_drain_lock, SA_XLOCKED);
3251 bucket_enable();
3252 zone_foreach(zone_drain);
3253 if (vm_page_count_min() || kmem_danger) {
3254 cache_drain_safe(NULL);
3255 zone_foreach(zone_drain);
3256 }
3257 /*
3258 * Some slabs may have been freed but this zone will be visited early
3259 * we visit again so that we can free pages that are empty once other
3260 * zones are drained. We have to do the same for buckets.
3261 */
3262 zone_drain(slabzone);
3165static void
3166uma_reclaim_locked(bool kmem_danger)
3167{
3168
3169#ifdef UMA_DEBUG
3170 printf("UMA: vm asked us to release pages!\n");
3171#endif
3172 sx_assert(&uma_drain_lock, SA_XLOCKED);
3173 bucket_enable();
3174 zone_foreach(zone_drain);
3175 if (vm_page_count_min() || kmem_danger) {
3176 cache_drain_safe(NULL);
3177 zone_foreach(zone_drain);
3178 }
3179 /*
3180 * Some slabs may have been freed but this zone will be visited early
3181 * we visit again so that we can free pages that are empty once other
3182 * zones are drained. We have to do the same for buckets.
3183 */
3184 zone_drain(slabzone);
3263 zone_drain(slabrefzone);
3264 bucket_zone_drain();
3265}
3266
3267void
3268uma_reclaim(void)
3269{
3270
3271 sx_xlock(&uma_drain_lock);
3272 uma_reclaim_locked(false);
3273 sx_xunlock(&uma_drain_lock);
3274}
3275
3276static int uma_reclaim_needed;
3277
3278void
3279uma_reclaim_wakeup(void)
3280{
3281
3282 uma_reclaim_needed = 1;
3283 wakeup(&uma_reclaim_needed);
3284}
3285
3286void
3287uma_reclaim_worker(void *arg __unused)
3288{
3289
3290 sx_xlock(&uma_drain_lock);
3291 for (;;) {
3292 sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
3293 "umarcl", 0);
3294 if (uma_reclaim_needed) {
3295 uma_reclaim_needed = 0;
3296 uma_reclaim_locked(true);
3297 }
3298 }
3299}
3300
3301/* See uma.h */
3302int
3303uma_zone_exhausted(uma_zone_t zone)
3304{
3305 int full;
3306
3307 ZONE_LOCK(zone);
3308 full = (zone->uz_flags & UMA_ZFLAG_FULL);
3309 ZONE_UNLOCK(zone);
3310 return (full);
3311}
3312
3313int
3314uma_zone_exhausted_nolock(uma_zone_t zone)
3315{
3316 return (zone->uz_flags & UMA_ZFLAG_FULL);
3317}
3318
3319void *
3320uma_large_malloc(vm_size_t size, int wait)
3321{
3322 void *mem;
3323 uma_slab_t slab;
3324 uint8_t flags;
3325
3326 slab = zone_alloc_item(slabzone, NULL, wait);
3327 if (slab == NULL)
3328 return (NULL);
3329 mem = page_alloc(NULL, size, &flags, wait);
3330 if (mem) {
3331 vsetslab((vm_offset_t)mem, slab);
3332 slab->us_data = mem;
3333 slab->us_flags = flags | UMA_SLAB_MALLOC;
3334 slab->us_size = size;
3335 } else {
3336 zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3337 }
3338
3339 return (mem);
3340}
3341
3342void
3343uma_large_free(uma_slab_t slab)
3344{
3345
3346 page_free(slab->us_data, slab->us_size, slab->us_flags);
3347 zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3348}
3349
3350static void
3351uma_zero_item(void *item, uma_zone_t zone)
3352{
3353
3354 if (zone->uz_flags & UMA_ZONE_PCPU) {
3355 for (int i = 0; i < mp_ncpus; i++)
3356 bzero(zpcpu_get_cpu(item, i), zone->uz_size);
3357 } else
3358 bzero(item, zone->uz_size);
3359}
3360
3361void
3362uma_print_stats(void)
3363{
3364 zone_foreach(uma_print_zone);
3365}
3366
3367static void
3368slab_print(uma_slab_t slab)
3369{
3370 printf("slab: keg %p, data %p, freecount %d\n",
3371 slab->us_keg, slab->us_data, slab->us_freecount);
3372}
3373
3374static void
3375cache_print(uma_cache_t cache)
3376{
3377 printf("alloc: %p(%d), free: %p(%d)\n",
3378 cache->uc_allocbucket,
3379 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3380 cache->uc_freebucket,
3381 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3382}
3383
3384static void
3385uma_print_keg(uma_keg_t keg)
3386{
3387 uma_slab_t slab;
3388
3389 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3390 "out %d free %d limit %d\n",
3391 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3392 keg->uk_ipers, keg->uk_ppera,
3393 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3394 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3395 printf("Part slabs:\n");
3396 LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3397 slab_print(slab);
3398 printf("Free slabs:\n");
3399 LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3400 slab_print(slab);
3401 printf("Full slabs:\n");
3402 LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3403 slab_print(slab);
3404}
3405
3406void
3407uma_print_zone(uma_zone_t zone)
3408{
3409 uma_cache_t cache;
3410 uma_klink_t kl;
3411 int i;
3412
3413 printf("zone: %s(%p) size %d flags %#x\n",
3414 zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3415 LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3416 uma_print_keg(kl->kl_keg);
3417 CPU_FOREACH(i) {
3418 cache = &zone->uz_cpu[i];
3419 printf("CPU %d Cache:\n", i);
3420 cache_print(cache);
3421 }
3422}
3423
3424#ifdef DDB
3425/*
3426 * Generate statistics across both the zone and its per-cpu cache's. Return
3427 * desired statistics if the pointer is non-NULL for that statistic.
3428 *
3429 * Note: does not update the zone statistics, as it can't safely clear the
3430 * per-CPU cache statistic.
3431 *
3432 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3433 * safe from off-CPU; we should modify the caches to track this information
3434 * directly so that we don't have to.
3435 */
3436static void
3437uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3438 uint64_t *freesp, uint64_t *sleepsp)
3439{
3440 uma_cache_t cache;
3441 uint64_t allocs, frees, sleeps;
3442 int cachefree, cpu;
3443
3444 allocs = frees = sleeps = 0;
3445 cachefree = 0;
3446 CPU_FOREACH(cpu) {
3447 cache = &z->uz_cpu[cpu];
3448 if (cache->uc_allocbucket != NULL)
3449 cachefree += cache->uc_allocbucket->ub_cnt;
3450 if (cache->uc_freebucket != NULL)
3451 cachefree += cache->uc_freebucket->ub_cnt;
3452 allocs += cache->uc_allocs;
3453 frees += cache->uc_frees;
3454 }
3455 allocs += z->uz_allocs;
3456 frees += z->uz_frees;
3457 sleeps += z->uz_sleeps;
3458 if (cachefreep != NULL)
3459 *cachefreep = cachefree;
3460 if (allocsp != NULL)
3461 *allocsp = allocs;
3462 if (freesp != NULL)
3463 *freesp = frees;
3464 if (sleepsp != NULL)
3465 *sleepsp = sleeps;
3466}
3467#endif /* DDB */
3468
3469static int
3470sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3471{
3472 uma_keg_t kz;
3473 uma_zone_t z;
3474 int count;
3475
3476 count = 0;
3477 rw_rlock(&uma_rwlock);
3478 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3479 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3480 count++;
3481 }
3482 rw_runlock(&uma_rwlock);
3483 return (sysctl_handle_int(oidp, &count, 0, req));
3484}
3485
3486static int
3487sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3488{
3489 struct uma_stream_header ush;
3490 struct uma_type_header uth;
3491 struct uma_percpu_stat ups;
3492 uma_bucket_t bucket;
3493 struct sbuf sbuf;
3494 uma_cache_t cache;
3495 uma_klink_t kl;
3496 uma_keg_t kz;
3497 uma_zone_t z;
3498 uma_keg_t k;
3499 int count, error, i;
3500
3501 error = sysctl_wire_old_buffer(req, 0);
3502 if (error != 0)
3503 return (error);
3504 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3505 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
3506
3507 count = 0;
3508 rw_rlock(&uma_rwlock);
3509 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3510 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3511 count++;
3512 }
3513
3514 /*
3515 * Insert stream header.
3516 */
3517 bzero(&ush, sizeof(ush));
3518 ush.ush_version = UMA_STREAM_VERSION;
3519 ush.ush_maxcpus = (mp_maxid + 1);
3520 ush.ush_count = count;
3521 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3522
3523 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3524 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3525 bzero(&uth, sizeof(uth));
3526 ZONE_LOCK(z);
3527 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3528 uth.uth_align = kz->uk_align;
3529 uth.uth_size = kz->uk_size;
3530 uth.uth_rsize = kz->uk_rsize;
3531 LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3532 k = kl->kl_keg;
3533 uth.uth_maxpages += k->uk_maxpages;
3534 uth.uth_pages += k->uk_pages;
3535 uth.uth_keg_free += k->uk_free;
3536 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3537 * k->uk_ipers;
3538 }
3539
3540 /*
3541 * A zone is secondary is it is not the first entry
3542 * on the keg's zone list.
3543 */
3544 if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3545 (LIST_FIRST(&kz->uk_zones) != z))
3546 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3547
3548 LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3549 uth.uth_zone_free += bucket->ub_cnt;
3550 uth.uth_allocs = z->uz_allocs;
3551 uth.uth_frees = z->uz_frees;
3552 uth.uth_fails = z->uz_fails;
3553 uth.uth_sleeps = z->uz_sleeps;
3554 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3555 /*
3556 * While it is not normally safe to access the cache
3557 * bucket pointers while not on the CPU that owns the
3558 * cache, we only allow the pointers to be exchanged
3559 * without the zone lock held, not invalidated, so
3560 * accept the possible race associated with bucket
3561 * exchange during monitoring.
3562 */
3563 for (i = 0; i < (mp_maxid + 1); i++) {
3564 bzero(&ups, sizeof(ups));
3565 if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3566 goto skip;
3567 if (CPU_ABSENT(i))
3568 goto skip;
3569 cache = &z->uz_cpu[i];
3570 if (cache->uc_allocbucket != NULL)
3571 ups.ups_cache_free +=
3572 cache->uc_allocbucket->ub_cnt;
3573 if (cache->uc_freebucket != NULL)
3574 ups.ups_cache_free +=
3575 cache->uc_freebucket->ub_cnt;
3576 ups.ups_allocs = cache->uc_allocs;
3577 ups.ups_frees = cache->uc_frees;
3578skip:
3579 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3580 }
3581 ZONE_UNLOCK(z);
3582 }
3583 }
3584 rw_runlock(&uma_rwlock);
3585 error = sbuf_finish(&sbuf);
3586 sbuf_delete(&sbuf);
3587 return (error);
3588}
3589
3590int
3591sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
3592{
3593 uma_zone_t zone = *(uma_zone_t *)arg1;
3594 int error, max;
3595
3596 max = uma_zone_get_max(zone);
3597 error = sysctl_handle_int(oidp, &max, 0, req);
3598 if (error || !req->newptr)
3599 return (error);
3600
3601 uma_zone_set_max(zone, max);
3602
3603 return (0);
3604}
3605
3606int
3607sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
3608{
3609 uma_zone_t zone = *(uma_zone_t *)arg1;
3610 int cur;
3611
3612 cur = uma_zone_get_cur(zone);
3613 return (sysctl_handle_int(oidp, &cur, 0, req));
3614}
3615
3616#ifdef INVARIANTS
3617static uma_slab_t
3618uma_dbg_getslab(uma_zone_t zone, void *item)
3619{
3620 uma_slab_t slab;
3621 uma_keg_t keg;
3622 uint8_t *mem;
3623
3624 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3625 if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
3626 slab = vtoslab((vm_offset_t)mem);
3627 } else {
3628 /*
3629 * It is safe to return the slab here even though the
3630 * zone is unlocked because the item's allocation state
3631 * essentially holds a reference.
3632 */
3633 ZONE_LOCK(zone);
3634 keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
3635 if (keg->uk_flags & UMA_ZONE_HASH)
3636 slab = hash_sfind(&keg->uk_hash, mem);
3637 else
3638 slab = (uma_slab_t)(mem + keg->uk_pgoff);
3639 ZONE_UNLOCK(zone);
3640 }
3641
3642 return (slab);
3643}
3644
3645/*
3646 * Set up the slab's freei data such that uma_dbg_free can function.
3647 *
3648 */
3649static void
3650uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
3651{
3652 uma_keg_t keg;
3653 int freei;
3654
3655 if (zone_first_keg(zone) == NULL)
3656 return;
3657 if (slab == NULL) {
3658 slab = uma_dbg_getslab(zone, item);
3659 if (slab == NULL)
3660 panic("uma: item %p did not belong to zone %s\n",
3661 item, zone->uz_name);
3662 }
3663 keg = slab->us_keg;
3664 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3665
3666 if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3667 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
3668 item, zone, zone->uz_name, slab, freei);
3669 BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3670
3671 return;
3672}
3673
3674/*
3675 * Verifies freed addresses. Checks for alignment, valid slab membership
3676 * and duplicate frees.
3677 *
3678 */
3679static void
3680uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
3681{
3682 uma_keg_t keg;
3683 int freei;
3684
3685 if (zone_first_keg(zone) == NULL)
3686 return;
3687 if (slab == NULL) {
3688 slab = uma_dbg_getslab(zone, item);
3689 if (slab == NULL)
3690 panic("uma: Freed item %p did not belong to zone %s\n",
3691 item, zone->uz_name);
3692 }
3693 keg = slab->us_keg;
3694 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3695
3696 if (freei >= keg->uk_ipers)
3697 panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
3698 item, zone, zone->uz_name, slab, freei);
3699
3700 if (((freei * keg->uk_rsize) + slab->us_data) != item)
3701 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
3702 item, zone, zone->uz_name, slab, freei);
3703
3704 if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3705 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
3706 item, zone, zone->uz_name, slab, freei);
3707
3708 BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3709}
3710#endif /* INVARIANTS */
3711
3712#ifdef DDB
3713DB_SHOW_COMMAND(uma, db_show_uma)
3714{
3715 uint64_t allocs, frees, sleeps;
3716 uma_bucket_t bucket;
3717 uma_keg_t kz;
3718 uma_zone_t z;
3719 int cachefree;
3720
3721 db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
3722 "Free", "Requests", "Sleeps", "Bucket");
3723 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3724 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3725 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3726 allocs = z->uz_allocs;
3727 frees = z->uz_frees;
3728 sleeps = z->uz_sleeps;
3729 cachefree = 0;
3730 } else
3731 uma_zone_sumstat(z, &cachefree, &allocs,
3732 &frees, &sleeps);
3733 if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3734 (LIST_FIRST(&kz->uk_zones) != z)))
3735 cachefree += kz->uk_free;
3736 LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3737 cachefree += bucket->ub_cnt;
3738 db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
3739 z->uz_name, (uintmax_t)kz->uk_size,
3740 (intmax_t)(allocs - frees), cachefree,
3741 (uintmax_t)allocs, sleeps, z->uz_count);
3742 if (db_pager_quit)
3743 return;
3744 }
3745 }
3746}
3747
3748DB_SHOW_COMMAND(umacache, db_show_umacache)
3749{
3750 uint64_t allocs, frees;
3751 uma_bucket_t bucket;
3752 uma_zone_t z;
3753 int cachefree;
3754
3755 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3756 "Requests", "Bucket");
3757 LIST_FOREACH(z, &uma_cachezones, uz_link) {
3758 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
3759 LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3760 cachefree += bucket->ub_cnt;
3761 db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
3762 z->uz_name, (uintmax_t)z->uz_size,
3763 (intmax_t)(allocs - frees), cachefree,
3764 (uintmax_t)allocs, z->uz_count);
3765 if (db_pager_quit)
3766 return;
3767 }
3768}
3769#endif /* DDB */
3185 bucket_zone_drain();
3186}
3187
3188void
3189uma_reclaim(void)
3190{
3191
3192 sx_xlock(&uma_drain_lock);
3193 uma_reclaim_locked(false);
3194 sx_xunlock(&uma_drain_lock);
3195}
3196
3197static int uma_reclaim_needed;
3198
3199void
3200uma_reclaim_wakeup(void)
3201{
3202
3203 uma_reclaim_needed = 1;
3204 wakeup(&uma_reclaim_needed);
3205}
3206
3207void
3208uma_reclaim_worker(void *arg __unused)
3209{
3210
3211 sx_xlock(&uma_drain_lock);
3212 for (;;) {
3213 sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
3214 "umarcl", 0);
3215 if (uma_reclaim_needed) {
3216 uma_reclaim_needed = 0;
3217 uma_reclaim_locked(true);
3218 }
3219 }
3220}
3221
3222/* See uma.h */
3223int
3224uma_zone_exhausted(uma_zone_t zone)
3225{
3226 int full;
3227
3228 ZONE_LOCK(zone);
3229 full = (zone->uz_flags & UMA_ZFLAG_FULL);
3230 ZONE_UNLOCK(zone);
3231 return (full);
3232}
3233
3234int
3235uma_zone_exhausted_nolock(uma_zone_t zone)
3236{
3237 return (zone->uz_flags & UMA_ZFLAG_FULL);
3238}
3239
3240void *
3241uma_large_malloc(vm_size_t size, int wait)
3242{
3243 void *mem;
3244 uma_slab_t slab;
3245 uint8_t flags;
3246
3247 slab = zone_alloc_item(slabzone, NULL, wait);
3248 if (slab == NULL)
3249 return (NULL);
3250 mem = page_alloc(NULL, size, &flags, wait);
3251 if (mem) {
3252 vsetslab((vm_offset_t)mem, slab);
3253 slab->us_data = mem;
3254 slab->us_flags = flags | UMA_SLAB_MALLOC;
3255 slab->us_size = size;
3256 } else {
3257 zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3258 }
3259
3260 return (mem);
3261}
3262
3263void
3264uma_large_free(uma_slab_t slab)
3265{
3266
3267 page_free(slab->us_data, slab->us_size, slab->us_flags);
3268 zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3269}
3270
3271static void
3272uma_zero_item(void *item, uma_zone_t zone)
3273{
3274
3275 if (zone->uz_flags & UMA_ZONE_PCPU) {
3276 for (int i = 0; i < mp_ncpus; i++)
3277 bzero(zpcpu_get_cpu(item, i), zone->uz_size);
3278 } else
3279 bzero(item, zone->uz_size);
3280}
3281
3282void
3283uma_print_stats(void)
3284{
3285 zone_foreach(uma_print_zone);
3286}
3287
3288static void
3289slab_print(uma_slab_t slab)
3290{
3291 printf("slab: keg %p, data %p, freecount %d\n",
3292 slab->us_keg, slab->us_data, slab->us_freecount);
3293}
3294
3295static void
3296cache_print(uma_cache_t cache)
3297{
3298 printf("alloc: %p(%d), free: %p(%d)\n",
3299 cache->uc_allocbucket,
3300 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3301 cache->uc_freebucket,
3302 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3303}
3304
3305static void
3306uma_print_keg(uma_keg_t keg)
3307{
3308 uma_slab_t slab;
3309
3310 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3311 "out %d free %d limit %d\n",
3312 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3313 keg->uk_ipers, keg->uk_ppera,
3314 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3315 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3316 printf("Part slabs:\n");
3317 LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3318 slab_print(slab);
3319 printf("Free slabs:\n");
3320 LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3321 slab_print(slab);
3322 printf("Full slabs:\n");
3323 LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3324 slab_print(slab);
3325}
3326
3327void
3328uma_print_zone(uma_zone_t zone)
3329{
3330 uma_cache_t cache;
3331 uma_klink_t kl;
3332 int i;
3333
3334 printf("zone: %s(%p) size %d flags %#x\n",
3335 zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3336 LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3337 uma_print_keg(kl->kl_keg);
3338 CPU_FOREACH(i) {
3339 cache = &zone->uz_cpu[i];
3340 printf("CPU %d Cache:\n", i);
3341 cache_print(cache);
3342 }
3343}
3344
3345#ifdef DDB
3346/*
3347 * Generate statistics across both the zone and its per-cpu cache's. Return
3348 * desired statistics if the pointer is non-NULL for that statistic.
3349 *
3350 * Note: does not update the zone statistics, as it can't safely clear the
3351 * per-CPU cache statistic.
3352 *
3353 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3354 * safe from off-CPU; we should modify the caches to track this information
3355 * directly so that we don't have to.
3356 */
3357static void
3358uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3359 uint64_t *freesp, uint64_t *sleepsp)
3360{
3361 uma_cache_t cache;
3362 uint64_t allocs, frees, sleeps;
3363 int cachefree, cpu;
3364
3365 allocs = frees = sleeps = 0;
3366 cachefree = 0;
3367 CPU_FOREACH(cpu) {
3368 cache = &z->uz_cpu[cpu];
3369 if (cache->uc_allocbucket != NULL)
3370 cachefree += cache->uc_allocbucket->ub_cnt;
3371 if (cache->uc_freebucket != NULL)
3372 cachefree += cache->uc_freebucket->ub_cnt;
3373 allocs += cache->uc_allocs;
3374 frees += cache->uc_frees;
3375 }
3376 allocs += z->uz_allocs;
3377 frees += z->uz_frees;
3378 sleeps += z->uz_sleeps;
3379 if (cachefreep != NULL)
3380 *cachefreep = cachefree;
3381 if (allocsp != NULL)
3382 *allocsp = allocs;
3383 if (freesp != NULL)
3384 *freesp = frees;
3385 if (sleepsp != NULL)
3386 *sleepsp = sleeps;
3387}
3388#endif /* DDB */
3389
3390static int
3391sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3392{
3393 uma_keg_t kz;
3394 uma_zone_t z;
3395 int count;
3396
3397 count = 0;
3398 rw_rlock(&uma_rwlock);
3399 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3400 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3401 count++;
3402 }
3403 rw_runlock(&uma_rwlock);
3404 return (sysctl_handle_int(oidp, &count, 0, req));
3405}
3406
3407static int
3408sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3409{
3410 struct uma_stream_header ush;
3411 struct uma_type_header uth;
3412 struct uma_percpu_stat ups;
3413 uma_bucket_t bucket;
3414 struct sbuf sbuf;
3415 uma_cache_t cache;
3416 uma_klink_t kl;
3417 uma_keg_t kz;
3418 uma_zone_t z;
3419 uma_keg_t k;
3420 int count, error, i;
3421
3422 error = sysctl_wire_old_buffer(req, 0);
3423 if (error != 0)
3424 return (error);
3425 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3426 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
3427
3428 count = 0;
3429 rw_rlock(&uma_rwlock);
3430 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3431 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3432 count++;
3433 }
3434
3435 /*
3436 * Insert stream header.
3437 */
3438 bzero(&ush, sizeof(ush));
3439 ush.ush_version = UMA_STREAM_VERSION;
3440 ush.ush_maxcpus = (mp_maxid + 1);
3441 ush.ush_count = count;
3442 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3443
3444 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3445 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3446 bzero(&uth, sizeof(uth));
3447 ZONE_LOCK(z);
3448 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3449 uth.uth_align = kz->uk_align;
3450 uth.uth_size = kz->uk_size;
3451 uth.uth_rsize = kz->uk_rsize;
3452 LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3453 k = kl->kl_keg;
3454 uth.uth_maxpages += k->uk_maxpages;
3455 uth.uth_pages += k->uk_pages;
3456 uth.uth_keg_free += k->uk_free;
3457 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3458 * k->uk_ipers;
3459 }
3460
3461 /*
3462 * A zone is secondary is it is not the first entry
3463 * on the keg's zone list.
3464 */
3465 if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3466 (LIST_FIRST(&kz->uk_zones) != z))
3467 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3468
3469 LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3470 uth.uth_zone_free += bucket->ub_cnt;
3471 uth.uth_allocs = z->uz_allocs;
3472 uth.uth_frees = z->uz_frees;
3473 uth.uth_fails = z->uz_fails;
3474 uth.uth_sleeps = z->uz_sleeps;
3475 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3476 /*
3477 * While it is not normally safe to access the cache
3478 * bucket pointers while not on the CPU that owns the
3479 * cache, we only allow the pointers to be exchanged
3480 * without the zone lock held, not invalidated, so
3481 * accept the possible race associated with bucket
3482 * exchange during monitoring.
3483 */
3484 for (i = 0; i < (mp_maxid + 1); i++) {
3485 bzero(&ups, sizeof(ups));
3486 if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3487 goto skip;
3488 if (CPU_ABSENT(i))
3489 goto skip;
3490 cache = &z->uz_cpu[i];
3491 if (cache->uc_allocbucket != NULL)
3492 ups.ups_cache_free +=
3493 cache->uc_allocbucket->ub_cnt;
3494 if (cache->uc_freebucket != NULL)
3495 ups.ups_cache_free +=
3496 cache->uc_freebucket->ub_cnt;
3497 ups.ups_allocs = cache->uc_allocs;
3498 ups.ups_frees = cache->uc_frees;
3499skip:
3500 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3501 }
3502 ZONE_UNLOCK(z);
3503 }
3504 }
3505 rw_runlock(&uma_rwlock);
3506 error = sbuf_finish(&sbuf);
3507 sbuf_delete(&sbuf);
3508 return (error);
3509}
3510
3511int
3512sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
3513{
3514 uma_zone_t zone = *(uma_zone_t *)arg1;
3515 int error, max;
3516
3517 max = uma_zone_get_max(zone);
3518 error = sysctl_handle_int(oidp, &max, 0, req);
3519 if (error || !req->newptr)
3520 return (error);
3521
3522 uma_zone_set_max(zone, max);
3523
3524 return (0);
3525}
3526
3527int
3528sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
3529{
3530 uma_zone_t zone = *(uma_zone_t *)arg1;
3531 int cur;
3532
3533 cur = uma_zone_get_cur(zone);
3534 return (sysctl_handle_int(oidp, &cur, 0, req));
3535}
3536
3537#ifdef INVARIANTS
3538static uma_slab_t
3539uma_dbg_getslab(uma_zone_t zone, void *item)
3540{
3541 uma_slab_t slab;
3542 uma_keg_t keg;
3543 uint8_t *mem;
3544
3545 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3546 if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
3547 slab = vtoslab((vm_offset_t)mem);
3548 } else {
3549 /*
3550 * It is safe to return the slab here even though the
3551 * zone is unlocked because the item's allocation state
3552 * essentially holds a reference.
3553 */
3554 ZONE_LOCK(zone);
3555 keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
3556 if (keg->uk_flags & UMA_ZONE_HASH)
3557 slab = hash_sfind(&keg->uk_hash, mem);
3558 else
3559 slab = (uma_slab_t)(mem + keg->uk_pgoff);
3560 ZONE_UNLOCK(zone);
3561 }
3562
3563 return (slab);
3564}
3565
3566/*
3567 * Set up the slab's freei data such that uma_dbg_free can function.
3568 *
3569 */
3570static void
3571uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
3572{
3573 uma_keg_t keg;
3574 int freei;
3575
3576 if (zone_first_keg(zone) == NULL)
3577 return;
3578 if (slab == NULL) {
3579 slab = uma_dbg_getslab(zone, item);
3580 if (slab == NULL)
3581 panic("uma: item %p did not belong to zone %s\n",
3582 item, zone->uz_name);
3583 }
3584 keg = slab->us_keg;
3585 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3586
3587 if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3588 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
3589 item, zone, zone->uz_name, slab, freei);
3590 BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3591
3592 return;
3593}
3594
3595/*
3596 * Verifies freed addresses. Checks for alignment, valid slab membership
3597 * and duplicate frees.
3598 *
3599 */
3600static void
3601uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
3602{
3603 uma_keg_t keg;
3604 int freei;
3605
3606 if (zone_first_keg(zone) == NULL)
3607 return;
3608 if (slab == NULL) {
3609 slab = uma_dbg_getslab(zone, item);
3610 if (slab == NULL)
3611 panic("uma: Freed item %p did not belong to zone %s\n",
3612 item, zone->uz_name);
3613 }
3614 keg = slab->us_keg;
3615 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3616
3617 if (freei >= keg->uk_ipers)
3618 panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
3619 item, zone, zone->uz_name, slab, freei);
3620
3621 if (((freei * keg->uk_rsize) + slab->us_data) != item)
3622 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
3623 item, zone, zone->uz_name, slab, freei);
3624
3625 if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3626 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
3627 item, zone, zone->uz_name, slab, freei);
3628
3629 BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3630}
3631#endif /* INVARIANTS */
3632
3633#ifdef DDB
3634DB_SHOW_COMMAND(uma, db_show_uma)
3635{
3636 uint64_t allocs, frees, sleeps;
3637 uma_bucket_t bucket;
3638 uma_keg_t kz;
3639 uma_zone_t z;
3640 int cachefree;
3641
3642 db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
3643 "Free", "Requests", "Sleeps", "Bucket");
3644 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3645 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3646 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3647 allocs = z->uz_allocs;
3648 frees = z->uz_frees;
3649 sleeps = z->uz_sleeps;
3650 cachefree = 0;
3651 } else
3652 uma_zone_sumstat(z, &cachefree, &allocs,
3653 &frees, &sleeps);
3654 if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3655 (LIST_FIRST(&kz->uk_zones) != z)))
3656 cachefree += kz->uk_free;
3657 LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3658 cachefree += bucket->ub_cnt;
3659 db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
3660 z->uz_name, (uintmax_t)kz->uk_size,
3661 (intmax_t)(allocs - frees), cachefree,
3662 (uintmax_t)allocs, sleeps, z->uz_count);
3663 if (db_pager_quit)
3664 return;
3665 }
3666 }
3667}
3668
3669DB_SHOW_COMMAND(umacache, db_show_umacache)
3670{
3671 uint64_t allocs, frees;
3672 uma_bucket_t bucket;
3673 uma_zone_t z;
3674 int cachefree;
3675
3676 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3677 "Requests", "Bucket");
3678 LIST_FOREACH(z, &uma_cachezones, uz_link) {
3679 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
3680 LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3681 cachefree += bucket->ub_cnt;
3682 db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
3683 z->uz_name, (uintmax_t)z->uz_size,
3684 (intmax_t)(allocs - frees), cachefree,
3685 (uintmax_t)allocs, z->uz_count);
3686 if (db_pager_quit)
3687 return;
3688 }
3689}
3690#endif /* DDB */