Deleted Added
full compact
uma_core.c (222184) uma_core.c (226313)
1/*-
2 * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * uma_core.c Implementation of the Universal Memory allocator
31 *
32 * This allocator is intended to replace the multitude of similar object caches
33 * in the standard FreeBSD kernel. The intent is to be flexible as well as
34 * effecient. A primary design goal is to return unused memory to the rest of
35 * the system. This will make the system as a whole more flexible due to the
36 * ability to move memory to subsystems which most need it instead of leaving
37 * pools of reserved memory unused.
38 *
39 * The basic ideas stem from similar slab/zone based allocators whose algorithms
40 * are well known.
41 *
42 */
43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * uma_core.c Implementation of the Universal Memory allocator
31 *
32 * This allocator is intended to replace the multitude of similar object caches
33 * in the standard FreeBSD kernel. The intent is to be flexible as well as
34 * effecient. A primary design goal is to return unused memory to the rest of
35 * the system. This will make the system as a whole more flexible due to the
36 * ability to move memory to subsystems which most need it instead of leaving
37 * pools of reserved memory unused.
38 *
39 * The basic ideas stem from similar slab/zone based allocators whose algorithms
40 * are well known.
41 *
42 */
43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 222184 2011-05-22 17:46:16Z alc $");
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 226313 2011-10-12 18:08:28Z glebius $");
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59
60#include "opt_ddb.h"
61#include "opt_param.h"
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59
60#include "opt_ddb.h"
61#include "opt_param.h"
62#include "opt_vm.h"
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/types.h>
67#include <sys/queue.h>
68#include <sys/malloc.h>
69#include <sys/ktr.h>
70#include <sys/lock.h>
71#include <sys/sysctl.h>
72#include <sys/mutex.h>
73#include <sys/proc.h>
74#include <sys/sbuf.h>
75#include <sys/smp.h>
76#include <sys/vmmeter.h>
77
78#include <vm/vm.h>
79#include <vm/vm_object.h>
80#include <vm/vm_page.h>
81#include <vm/vm_param.h>
82#include <vm/vm_map.h>
83#include <vm/vm_kern.h>
84#include <vm/vm_extern.h>
85#include <vm/uma.h>
86#include <vm/uma_int.h>
87#include <vm/uma_dbg.h>
88
89#include <ddb/ddb.h>
90
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/kernel.h>
67#include <sys/types.h>
68#include <sys/queue.h>
69#include <sys/malloc.h>
70#include <sys/ktr.h>
71#include <sys/lock.h>
72#include <sys/sysctl.h>
73#include <sys/mutex.h>
74#include <sys/proc.h>
75#include <sys/sbuf.h>
76#include <sys/smp.h>
77#include <sys/vmmeter.h>
78
79#include <vm/vm.h>
80#include <vm/vm_object.h>
81#include <vm/vm_page.h>
82#include <vm/vm_param.h>
83#include <vm/vm_map.h>
84#include <vm/vm_kern.h>
85#include <vm/vm_extern.h>
86#include <vm/uma.h>
87#include <vm/uma_int.h>
88#include <vm/uma_dbg.h>
89
90#include <ddb/ddb.h>
91
92#ifdef DEBUG_MEMGUARD
93#include <vm/memguard.h>
94#endif
95
91/*
92 * This is the zone and keg from which all zones are spawned. The idea is that
93 * even the zone & keg heads are allocated from the allocator, so we use the
94 * bss section to bootstrap us.
95 */
96static struct uma_keg masterkeg;
97static struct uma_zone masterzone_k;
98static struct uma_zone masterzone_z;
99static uma_zone_t kegs = &masterzone_k;
100static uma_zone_t zones = &masterzone_z;
101
102/* This is the zone from which all of uma_slab_t's are allocated. */
103static uma_zone_t slabzone;
104static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
105
106/*
107 * The initial hash tables come out of this zone so they can be allocated
108 * prior to malloc coming up.
109 */
110static uma_zone_t hashzone;
111
112/* The boot-time adjusted value for cache line alignment. */
113int uma_align_cache = 64 - 1;
114
115static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
116
117/*
118 * Are we allowed to allocate buckets?
119 */
120static int bucketdisable = 1;
121
122/* Linked list of all kegs in the system */
123static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
124
125/* This mutex protects the keg list */
126static struct mtx uma_mtx;
127
128/* Linked list of boot time pages */
129static LIST_HEAD(,uma_slab) uma_boot_pages =
130 LIST_HEAD_INITIALIZER(uma_boot_pages);
131
132/* This mutex protects the boot time pages list */
133static struct mtx uma_boot_pages_mtx;
134
135/* Is the VM done starting up? */
136static int booted = 0;
137#define UMA_STARTUP 1
138#define UMA_STARTUP2 2
139
140/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
141static u_int uma_max_ipers;
142static u_int uma_max_ipers_ref;
143
144/*
145 * This is the handle used to schedule events that need to happen
146 * outside of the allocation fast path.
147 */
148static struct callout uma_callout;
149#define UMA_TIMEOUT 20 /* Seconds for callout interval. */
150
151/*
152 * This structure is passed as the zone ctor arg so that I don't have to create
153 * a special allocation function just for zones.
154 */
155struct uma_zctor_args {
156 char *name;
157 size_t size;
158 uma_ctor ctor;
159 uma_dtor dtor;
160 uma_init uminit;
161 uma_fini fini;
162 uma_keg_t keg;
163 int align;
164 u_int32_t flags;
165};
166
167struct uma_kctor_args {
168 uma_zone_t zone;
169 size_t size;
170 uma_init uminit;
171 uma_fini fini;
172 int align;
173 u_int32_t flags;
174};
175
176struct uma_bucket_zone {
177 uma_zone_t ubz_zone;
178 char *ubz_name;
179 int ubz_entries;
180};
181
182#define BUCKET_MAX 128
183
184struct uma_bucket_zone bucket_zones[] = {
185 { NULL, "16 Bucket", 16 },
186 { NULL, "32 Bucket", 32 },
187 { NULL, "64 Bucket", 64 },
188 { NULL, "128 Bucket", 128 },
189 { NULL, NULL, 0}
190};
191
192#define BUCKET_SHIFT 4
193#define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1)
194
195/*
196 * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
197 * of approximately the right size.
198 */
199static uint8_t bucket_size[BUCKET_ZONES];
200
201/*
202 * Flags and enumerations to be passed to internal functions.
203 */
204enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
205
206#define ZFREE_STATFAIL 0x00000001 /* Update zone failure statistic. */
207#define ZFREE_STATFREE 0x00000002 /* Update zone free statistic. */
208
209/* Prototypes.. */
210
211static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
212static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
213static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
214static void page_free(void *, int, u_int8_t);
215static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
216static void cache_drain(uma_zone_t);
217static void bucket_drain(uma_zone_t, uma_bucket_t);
218static void bucket_cache_drain(uma_zone_t zone);
219static int keg_ctor(void *, int, void *, int);
220static void keg_dtor(void *, int, void *);
221static int zone_ctor(void *, int, void *, int);
222static void zone_dtor(void *, int, void *);
223static int zero_init(void *, int, int);
224static void keg_small_init(uma_keg_t keg);
225static void keg_large_init(uma_keg_t keg);
226static void zone_foreach(void (*zfunc)(uma_zone_t));
227static void zone_timeout(uma_zone_t zone);
228static int hash_alloc(struct uma_hash *);
229static int hash_expand(struct uma_hash *, struct uma_hash *);
230static void hash_free(struct uma_hash *hash);
231static void uma_timeout(void *);
232static void uma_startup3(void);
233static void *zone_alloc_item(uma_zone_t, void *, int);
234static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
235 int);
236static void bucket_enable(void);
237static void bucket_init(void);
238static uma_bucket_t bucket_alloc(int, int);
239static void bucket_free(uma_bucket_t);
240static void bucket_zone_drain(void);
241static int zone_alloc_bucket(uma_zone_t zone, int flags);
242static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
243static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
244static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
245static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
246 uma_fini fini, int align, u_int32_t flags);
247static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
248static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
249
250void uma_print_zone(uma_zone_t);
251void uma_print_stats(void);
252static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
253static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
254
255SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
256
257SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
258 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
259
260SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
261 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
262
263/*
264 * This routine checks to see whether or not it's safe to enable buckets.
265 */
266
267static void
268bucket_enable(void)
269{
270 if (cnt.v_free_count < cnt.v_free_min)
271 bucketdisable = 1;
272 else
273 bucketdisable = 0;
274}
275
276/*
277 * Initialize bucket_zones, the array of zones of buckets of various sizes.
278 *
279 * For each zone, calculate the memory required for each bucket, consisting
280 * of the header and an array of pointers. Initialize bucket_size[] to point
281 * the range of appropriate bucket sizes at the zone.
282 */
283static void
284bucket_init(void)
285{
286 struct uma_bucket_zone *ubz;
287 int i;
288 int j;
289
290 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
291 int size;
292
293 ubz = &bucket_zones[j];
294 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
295 size += sizeof(void *) * ubz->ubz_entries;
296 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
297 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
298 UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET);
299 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
300 bucket_size[i >> BUCKET_SHIFT] = j;
301 }
302}
303
304/*
305 * Given a desired number of entries for a bucket, return the zone from which
306 * to allocate the bucket.
307 */
308static struct uma_bucket_zone *
309bucket_zone_lookup(int entries)
310{
311 int idx;
312
313 idx = howmany(entries, 1 << BUCKET_SHIFT);
314 return (&bucket_zones[bucket_size[idx]]);
315}
316
317static uma_bucket_t
318bucket_alloc(int entries, int bflags)
319{
320 struct uma_bucket_zone *ubz;
321 uma_bucket_t bucket;
322
323 /*
324 * This is to stop us from allocating per cpu buckets while we're
325 * running out of vm.boot_pages. Otherwise, we would exhaust the
326 * boot pages. This also prevents us from allocating buckets in
327 * low memory situations.
328 */
329 if (bucketdisable)
330 return (NULL);
331
332 ubz = bucket_zone_lookup(entries);
333 bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
334 if (bucket) {
335#ifdef INVARIANTS
336 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
337#endif
338 bucket->ub_cnt = 0;
339 bucket->ub_entries = ubz->ubz_entries;
340 }
341
342 return (bucket);
343}
344
345static void
346bucket_free(uma_bucket_t bucket)
347{
348 struct uma_bucket_zone *ubz;
349
350 ubz = bucket_zone_lookup(bucket->ub_entries);
351 zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
352 ZFREE_STATFREE);
353}
354
355static void
356bucket_zone_drain(void)
357{
358 struct uma_bucket_zone *ubz;
359
360 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
361 zone_drain(ubz->ubz_zone);
362}
363
364static inline uma_keg_t
365zone_first_keg(uma_zone_t zone)
366{
367
368 return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
369}
370
371static void
372zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
373{
374 uma_klink_t klink;
375
376 LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
377 kegfn(klink->kl_keg);
378}
379
380/*
381 * Routine called by timeout which is used to fire off some time interval
382 * based calculations. (stats, hash size, etc.)
383 *
384 * Arguments:
385 * arg Unused
386 *
387 * Returns:
388 * Nothing
389 */
390static void
391uma_timeout(void *unused)
392{
393 bucket_enable();
394 zone_foreach(zone_timeout);
395
396 /* Reschedule this event */
397 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
398}
399
400/*
401 * Routine to perform timeout driven calculations. This expands the
402 * hashes and does per cpu statistics aggregation.
403 *
404 * Returns nothing.
405 */
406static void
407keg_timeout(uma_keg_t keg)
408{
409
410 KEG_LOCK(keg);
411 /*
412 * Expand the keg hash table.
413 *
414 * This is done if the number of slabs is larger than the hash size.
415 * What I'm trying to do here is completely reduce collisions. This
416 * may be a little aggressive. Should I allow for two collisions max?
417 */
418 if (keg->uk_flags & UMA_ZONE_HASH &&
419 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
420 struct uma_hash newhash;
421 struct uma_hash oldhash;
422 int ret;
423
424 /*
425 * This is so involved because allocating and freeing
426 * while the keg lock is held will lead to deadlock.
427 * I have to do everything in stages and check for
428 * races.
429 */
430 newhash = keg->uk_hash;
431 KEG_UNLOCK(keg);
432 ret = hash_alloc(&newhash);
433 KEG_LOCK(keg);
434 if (ret) {
435 if (hash_expand(&keg->uk_hash, &newhash)) {
436 oldhash = keg->uk_hash;
437 keg->uk_hash = newhash;
438 } else
439 oldhash = newhash;
440
441 KEG_UNLOCK(keg);
442 hash_free(&oldhash);
443 KEG_LOCK(keg);
444 }
445 }
446 KEG_UNLOCK(keg);
447}
448
449static void
450zone_timeout(uma_zone_t zone)
451{
452
453 zone_foreach_keg(zone, &keg_timeout);
454}
455
456/*
457 * Allocate and zero fill the next sized hash table from the appropriate
458 * backing store.
459 *
460 * Arguments:
461 * hash A new hash structure with the old hash size in uh_hashsize
462 *
463 * Returns:
464 * 1 on sucess and 0 on failure.
465 */
466static int
467hash_alloc(struct uma_hash *hash)
468{
469 int oldsize;
470 int alloc;
471
472 oldsize = hash->uh_hashsize;
473
474 /* We're just going to go to a power of two greater */
475 if (oldsize) {
476 hash->uh_hashsize = oldsize * 2;
477 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
478 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
479 M_UMAHASH, M_NOWAIT);
480 } else {
481 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
482 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
483 M_WAITOK);
484 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
485 }
486 if (hash->uh_slab_hash) {
487 bzero(hash->uh_slab_hash, alloc);
488 hash->uh_hashmask = hash->uh_hashsize - 1;
489 return (1);
490 }
491
492 return (0);
493}
494
495/*
496 * Expands the hash table for HASH zones. This is done from zone_timeout
497 * to reduce collisions. This must not be done in the regular allocation
498 * path, otherwise, we can recurse on the vm while allocating pages.
499 *
500 * Arguments:
501 * oldhash The hash you want to expand
502 * newhash The hash structure for the new table
503 *
504 * Returns:
505 * Nothing
506 *
507 * Discussion:
508 */
509static int
510hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
511{
512 uma_slab_t slab;
513 int hval;
514 int i;
515
516 if (!newhash->uh_slab_hash)
517 return (0);
518
519 if (oldhash->uh_hashsize >= newhash->uh_hashsize)
520 return (0);
521
522 /*
523 * I need to investigate hash algorithms for resizing without a
524 * full rehash.
525 */
526
527 for (i = 0; i < oldhash->uh_hashsize; i++)
528 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
529 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
530 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
531 hval = UMA_HASH(newhash, slab->us_data);
532 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
533 slab, us_hlink);
534 }
535
536 return (1);
537}
538
539/*
540 * Free the hash bucket to the appropriate backing store.
541 *
542 * Arguments:
543 * slab_hash The hash bucket we're freeing
544 * hashsize The number of entries in that hash bucket
545 *
546 * Returns:
547 * Nothing
548 */
549static void
550hash_free(struct uma_hash *hash)
551{
552 if (hash->uh_slab_hash == NULL)
553 return;
554 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
555 zone_free_item(hashzone,
556 hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
557 else
558 free(hash->uh_slab_hash, M_UMAHASH);
559}
560
561/*
562 * Frees all outstanding items in a bucket
563 *
564 * Arguments:
565 * zone The zone to free to, must be unlocked.
566 * bucket The free/alloc bucket with items, cpu queue must be locked.
567 *
568 * Returns:
569 * Nothing
570 */
571
572static void
573bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
574{
575 void *item;
576
577 if (bucket == NULL)
578 return;
579
580 while (bucket->ub_cnt > 0) {
581 bucket->ub_cnt--;
582 item = bucket->ub_bucket[bucket->ub_cnt];
583#ifdef INVARIANTS
584 bucket->ub_bucket[bucket->ub_cnt] = NULL;
585 KASSERT(item != NULL,
586 ("bucket_drain: botched ptr, item is NULL"));
587#endif
588 zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
589 }
590}
591
592/*
593 * Drains the per cpu caches for a zone.
594 *
595 * NOTE: This may only be called while the zone is being turn down, and not
596 * during normal operation. This is necessary in order that we do not have
597 * to migrate CPUs to drain the per-CPU caches.
598 *
599 * Arguments:
600 * zone The zone to drain, must be unlocked.
601 *
602 * Returns:
603 * Nothing
604 */
605static void
606cache_drain(uma_zone_t zone)
607{
608 uma_cache_t cache;
609 int cpu;
610
611 /*
612 * XXX: It is safe to not lock the per-CPU caches, because we're
613 * tearing down the zone anyway. I.e., there will be no further use
614 * of the caches at this point.
615 *
616 * XXX: It would good to be able to assert that the zone is being
617 * torn down to prevent improper use of cache_drain().
618 *
619 * XXX: We lock the zone before passing into bucket_cache_drain() as
620 * it is used elsewhere. Should the tear-down path be made special
621 * there in some form?
622 */
623 CPU_FOREACH(cpu) {
624 cache = &zone->uz_cpu[cpu];
625 bucket_drain(zone, cache->uc_allocbucket);
626 bucket_drain(zone, cache->uc_freebucket);
627 if (cache->uc_allocbucket != NULL)
628 bucket_free(cache->uc_allocbucket);
629 if (cache->uc_freebucket != NULL)
630 bucket_free(cache->uc_freebucket);
631 cache->uc_allocbucket = cache->uc_freebucket = NULL;
632 }
633 ZONE_LOCK(zone);
634 bucket_cache_drain(zone);
635 ZONE_UNLOCK(zone);
636}
637
638/*
639 * Drain the cached buckets from a zone. Expects a locked zone on entry.
640 */
641static void
642bucket_cache_drain(uma_zone_t zone)
643{
644 uma_bucket_t bucket;
645
646 /*
647 * Drain the bucket queues and free the buckets, we just keep two per
648 * cpu (alloc/free).
649 */
650 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
651 LIST_REMOVE(bucket, ub_link);
652 ZONE_UNLOCK(zone);
653 bucket_drain(zone, bucket);
654 bucket_free(bucket);
655 ZONE_LOCK(zone);
656 }
657
658 /* Now we do the free queue.. */
659 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
660 LIST_REMOVE(bucket, ub_link);
661 bucket_free(bucket);
662 }
663}
664
665/*
666 * Frees pages from a keg back to the system. This is done on demand from
667 * the pageout daemon.
668 *
669 * Returns nothing.
670 */
671static void
672keg_drain(uma_keg_t keg)
673{
674 struct slabhead freeslabs = { 0 };
675 uma_slab_t slab;
676 uma_slab_t n;
677 u_int8_t flags;
678 u_int8_t *mem;
679 int i;
680
681 /*
682 * We don't want to take pages from statically allocated kegs at this
683 * time
684 */
685 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
686 return;
687
688#ifdef UMA_DEBUG
689 printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
690#endif
691 KEG_LOCK(keg);
692 if (keg->uk_free == 0)
693 goto finished;
694
695 slab = LIST_FIRST(&keg->uk_free_slab);
696 while (slab) {
697 n = LIST_NEXT(slab, us_link);
698
699 /* We have no where to free these to */
700 if (slab->us_flags & UMA_SLAB_BOOT) {
701 slab = n;
702 continue;
703 }
704
705 LIST_REMOVE(slab, us_link);
706 keg->uk_pages -= keg->uk_ppera;
707 keg->uk_free -= keg->uk_ipers;
708
709 if (keg->uk_flags & UMA_ZONE_HASH)
710 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
711
712 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
713
714 slab = n;
715 }
716finished:
717 KEG_UNLOCK(keg);
718
719 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
720 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
721 if (keg->uk_fini)
722 for (i = 0; i < keg->uk_ipers; i++)
723 keg->uk_fini(
724 slab->us_data + (keg->uk_rsize * i),
725 keg->uk_size);
726 flags = slab->us_flags;
727 mem = slab->us_data;
728
729 if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
730 vm_object_t obj;
731
732 if (flags & UMA_SLAB_KMEM)
733 obj = kmem_object;
734 else if (flags & UMA_SLAB_KERNEL)
735 obj = kernel_object;
736 else
737 obj = NULL;
738 for (i = 0; i < keg->uk_ppera; i++)
739 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
740 obj);
741 }
742 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
743 zone_free_item(keg->uk_slabzone, slab, NULL,
744 SKIP_NONE, ZFREE_STATFREE);
745#ifdef UMA_DEBUG
746 printf("%s: Returning %d bytes.\n",
747 keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera);
748#endif
749 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
750 }
751}
752
753static void
754zone_drain_wait(uma_zone_t zone, int waitok)
755{
756
757 /*
758 * Set draining to interlock with zone_dtor() so we can release our
759 * locks as we go. Only dtor() should do a WAITOK call since it
760 * is the only call that knows the structure will still be available
761 * when it wakes up.
762 */
763 ZONE_LOCK(zone);
764 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
765 if (waitok == M_NOWAIT)
766 goto out;
767 mtx_unlock(&uma_mtx);
768 msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
769 mtx_lock(&uma_mtx);
770 }
771 zone->uz_flags |= UMA_ZFLAG_DRAINING;
772 bucket_cache_drain(zone);
773 ZONE_UNLOCK(zone);
774 /*
775 * The DRAINING flag protects us from being freed while
776 * we're running. Normally the uma_mtx would protect us but we
777 * must be able to release and acquire the right lock for each keg.
778 */
779 zone_foreach_keg(zone, &keg_drain);
780 ZONE_LOCK(zone);
781 zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
782 wakeup(zone);
783out:
784 ZONE_UNLOCK(zone);
785}
786
787void
788zone_drain(uma_zone_t zone)
789{
790
791 zone_drain_wait(zone, M_NOWAIT);
792}
793
794/*
795 * Allocate a new slab for a keg. This does not insert the slab onto a list.
796 *
797 * Arguments:
798 * wait Shall we wait?
799 *
800 * Returns:
801 * The slab that was allocated or NULL if there is no memory and the
802 * caller specified M_NOWAIT.
803 */
804static uma_slab_t
805keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
806{
807 uma_slabrefcnt_t slabref;
808 uma_alloc allocf;
809 uma_slab_t slab;
810 u_int8_t *mem;
811 u_int8_t flags;
812 int i;
813
814 mtx_assert(&keg->uk_lock, MA_OWNED);
815 slab = NULL;
816
817#ifdef UMA_DEBUG
818 printf("slab_zalloc: Allocating a new slab for %s\n", keg->uk_name);
819#endif
820 allocf = keg->uk_allocf;
821 KEG_UNLOCK(keg);
822
823 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
824 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
825 if (slab == NULL) {
826 KEG_LOCK(keg);
827 return NULL;
828 }
829 }
830
831 /*
832 * This reproduces the old vm_zone behavior of zero filling pages the
833 * first time they are added to a zone.
834 *
835 * Malloced items are zeroed in uma_zalloc.
836 */
837
838 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
839 wait |= M_ZERO;
840 else
841 wait &= ~M_ZERO;
842
843 /* zone is passed for legacy reasons. */
844 mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
845 if (mem == NULL) {
846 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
847 zone_free_item(keg->uk_slabzone, slab, NULL,
848 SKIP_NONE, ZFREE_STATFREE);
849 KEG_LOCK(keg);
850 return (NULL);
851 }
852
853 /* Point the slab into the allocated memory */
854 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
855 slab = (uma_slab_t )(mem + keg->uk_pgoff);
856
857 if (keg->uk_flags & UMA_ZONE_VTOSLAB)
858 for (i = 0; i < keg->uk_ppera; i++)
859 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
860
861 slab->us_keg = keg;
862 slab->us_data = mem;
863 slab->us_freecount = keg->uk_ipers;
864 slab->us_firstfree = 0;
865 slab->us_flags = flags;
866
867 if (keg->uk_flags & UMA_ZONE_REFCNT) {
868 slabref = (uma_slabrefcnt_t)slab;
869 for (i = 0; i < keg->uk_ipers; i++) {
870 slabref->us_freelist[i].us_refcnt = 0;
871 slabref->us_freelist[i].us_item = i+1;
872 }
873 } else {
874 for (i = 0; i < keg->uk_ipers; i++)
875 slab->us_freelist[i].us_item = i+1;
876 }
877
878 if (keg->uk_init != NULL) {
879 for (i = 0; i < keg->uk_ipers; i++)
880 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
881 keg->uk_size, wait) != 0)
882 break;
883 if (i != keg->uk_ipers) {
884 if (keg->uk_fini != NULL) {
885 for (i--; i > -1; i--)
886 keg->uk_fini(slab->us_data +
887 (keg->uk_rsize * i),
888 keg->uk_size);
889 }
890 if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
891 vm_object_t obj;
892
893 if (flags & UMA_SLAB_KMEM)
894 obj = kmem_object;
895 else if (flags & UMA_SLAB_KERNEL)
896 obj = kernel_object;
897 else
898 obj = NULL;
899 for (i = 0; i < keg->uk_ppera; i++)
900 vsetobj((vm_offset_t)mem +
901 (i * PAGE_SIZE), obj);
902 }
903 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
904 zone_free_item(keg->uk_slabzone, slab,
905 NULL, SKIP_NONE, ZFREE_STATFREE);
906 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
907 flags);
908 KEG_LOCK(keg);
909 return (NULL);
910 }
911 }
912 KEG_LOCK(keg);
913
914 if (keg->uk_flags & UMA_ZONE_HASH)
915 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
916
917 keg->uk_pages += keg->uk_ppera;
918 keg->uk_free += keg->uk_ipers;
919
920 return (slab);
921}
922
923/*
924 * This function is intended to be used early on in place of page_alloc() so
925 * that we may use the boot time page cache to satisfy allocations before
926 * the VM is ready.
927 */
928static void *
929startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
930{
931 uma_keg_t keg;
932 uma_slab_t tmps;
933 int pages, check_pages;
934
935 keg = zone_first_keg(zone);
936 pages = howmany(bytes, PAGE_SIZE);
937 check_pages = pages - 1;
938 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
939
940 /*
941 * Check our small startup cache to see if it has pages remaining.
942 */
943 mtx_lock(&uma_boot_pages_mtx);
944
945 /* First check if we have enough room. */
946 tmps = LIST_FIRST(&uma_boot_pages);
947 while (tmps != NULL && check_pages-- > 0)
948 tmps = LIST_NEXT(tmps, us_link);
949 if (tmps != NULL) {
950 /*
951 * It's ok to lose tmps references. The last one will
952 * have tmps->us_data pointing to the start address of
953 * "pages" contiguous pages of memory.
954 */
955 while (pages-- > 0) {
956 tmps = LIST_FIRST(&uma_boot_pages);
957 LIST_REMOVE(tmps, us_link);
958 }
959 mtx_unlock(&uma_boot_pages_mtx);
960 *pflag = tmps->us_flags;
961 return (tmps->us_data);
962 }
963 mtx_unlock(&uma_boot_pages_mtx);
964 if (booted < UMA_STARTUP2)
965 panic("UMA: Increase vm.boot_pages");
966 /*
967 * Now that we've booted reset these users to their real allocator.
968 */
969#ifdef UMA_MD_SMALL_ALLOC
970 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
971#else
972 keg->uk_allocf = page_alloc;
973#endif
974 return keg->uk_allocf(zone, bytes, pflag, wait);
975}
976
977/*
978 * Allocates a number of pages from the system
979 *
980 * Arguments:
981 * bytes The number of bytes requested
982 * wait Shall we wait?
983 *
984 * Returns:
985 * A pointer to the alloced memory or possibly
986 * NULL if M_NOWAIT is set.
987 */
988static void *
989page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
990{
991 void *p; /* Returned page */
992
993 *pflag = UMA_SLAB_KMEM;
994 p = (void *) kmem_malloc(kmem_map, bytes, wait);
995
996 return (p);
997}
998
999/*
1000 * Allocates a number of pages from within an object
1001 *
1002 * Arguments:
1003 * bytes The number of bytes requested
1004 * wait Shall we wait?
1005 *
1006 * Returns:
1007 * A pointer to the alloced memory or possibly
1008 * NULL if M_NOWAIT is set.
1009 */
1010static void *
1011obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1012{
1013 vm_object_t object;
1014 vm_offset_t retkva, zkva;
1015 vm_page_t p;
1016 int pages, startpages;
1017 uma_keg_t keg;
1018
1019 keg = zone_first_keg(zone);
1020 object = keg->uk_obj;
1021 retkva = 0;
1022
1023 /*
1024 * This looks a little weird since we're getting one page at a time.
1025 */
1026 VM_OBJECT_LOCK(object);
1027 p = TAILQ_LAST(&object->memq, pglist);
1028 pages = p != NULL ? p->pindex + 1 : 0;
1029 startpages = pages;
1030 zkva = keg->uk_kva + pages * PAGE_SIZE;
1031 for (; bytes > 0; bytes -= PAGE_SIZE) {
1032 p = vm_page_alloc(object, pages,
1033 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
1034 if (p == NULL) {
1035 if (pages != startpages)
1036 pmap_qremove(retkva, pages - startpages);
1037 while (pages != startpages) {
1038 pages--;
1039 p = TAILQ_LAST(&object->memq, pglist);
1040 vm_page_unwire(p, 0);
1041 vm_page_free(p);
1042 }
1043 retkva = 0;
1044 goto done;
1045 }
1046 pmap_qenter(zkva, &p, 1);
1047 if (retkva == 0)
1048 retkva = zkva;
1049 zkva += PAGE_SIZE;
1050 pages += 1;
1051 }
1052done:
1053 VM_OBJECT_UNLOCK(object);
1054 *flags = UMA_SLAB_PRIV;
1055
1056 return ((void *)retkva);
1057}
1058
1059/*
1060 * Frees a number of pages to the system
1061 *
1062 * Arguments:
1063 * mem A pointer to the memory to be freed
1064 * size The size of the memory being freed
1065 * flags The original p->us_flags field
1066 *
1067 * Returns:
1068 * Nothing
1069 */
1070static void
1071page_free(void *mem, int size, u_int8_t flags)
1072{
1073 vm_map_t map;
1074
1075 if (flags & UMA_SLAB_KMEM)
1076 map = kmem_map;
1077 else if (flags & UMA_SLAB_KERNEL)
1078 map = kernel_map;
1079 else
1080 panic("UMA: page_free used with invalid flags %d", flags);
1081
1082 kmem_free(map, (vm_offset_t)mem, size);
1083}
1084
1085/*
1086 * Zero fill initializer
1087 *
1088 * Arguments/Returns follow uma_init specifications
1089 */
1090static int
1091zero_init(void *mem, int size, int flags)
1092{
1093 bzero(mem, size);
1094 return (0);
1095}
1096
1097/*
1098 * Finish creating a small uma keg. This calculates ipers, and the keg size.
1099 *
1100 * Arguments
1101 * keg The zone we should initialize
1102 *
1103 * Returns
1104 * Nothing
1105 */
1106static void
1107keg_small_init(uma_keg_t keg)
1108{
1109 u_int rsize;
1110 u_int memused;
1111 u_int wastedspace;
1112 u_int shsize;
1113
1114 KASSERT(keg != NULL, ("Keg is null in keg_small_init"));
1115 rsize = keg->uk_size;
1116
1117 if (rsize < UMA_SMALLEST_UNIT)
1118 rsize = UMA_SMALLEST_UNIT;
1119 if (rsize & keg->uk_align)
1120 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1121
1122 keg->uk_rsize = rsize;
1123 keg->uk_ppera = 1;
1124
1125 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1126 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */
1127 shsize = sizeof(struct uma_slab_refcnt);
1128 } else {
1129 rsize += UMA_FRITM_SZ; /* Account for linkage */
1130 shsize = sizeof(struct uma_slab);
1131 }
1132
1133 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
1134 KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0"));
1135 memused = keg->uk_ipers * rsize + shsize;
1136 wastedspace = UMA_SLAB_SIZE - memused;
1137
1138 /*
1139 * We can't do OFFPAGE if we're internal or if we've been
1140 * asked to not go to the VM for buckets. If we do this we
1141 * may end up going to the VM (kmem_map) for slabs which we
1142 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1143 * result of UMA_ZONE_VM, which clearly forbids it.
1144 */
1145 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1146 (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1147 return;
1148
1149 if ((wastedspace >= UMA_MAX_WASTE) &&
1150 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
1151 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
1152 KASSERT(keg->uk_ipers <= 255,
1153 ("keg_small_init: keg->uk_ipers too high!"));
1154#ifdef UMA_DEBUG
1155 printf("UMA decided we need offpage slab headers for "
1156 "keg: %s, calculated wastedspace = %d, "
1157 "maximum wasted space allowed = %d, "
1158 "calculated ipers = %d, "
1159 "new wasted space = %d\n", keg->uk_name, wastedspace,
1160 UMA_MAX_WASTE, keg->uk_ipers,
1161 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
1162#endif
1163 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1164 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1165 keg->uk_flags |= UMA_ZONE_HASH;
1166 }
1167}
1168
1169/*
1170 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
1171 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
1172 * more complicated.
1173 *
1174 * Arguments
1175 * keg The keg we should initialize
1176 *
1177 * Returns
1178 * Nothing
1179 */
1180static void
1181keg_large_init(uma_keg_t keg)
1182{
1183 int pages;
1184
1185 KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1186 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1187 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1188
1189 pages = keg->uk_size / UMA_SLAB_SIZE;
1190
1191 /* Account for remainder */
1192 if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
1193 pages++;
1194
1195 keg->uk_ppera = pages;
1196 keg->uk_ipers = 1;
1197 keg->uk_rsize = keg->uk_size;
1198
1199 /* We can't do OFFPAGE if we're internal, bail out here. */
1200 if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1201 return;
1202
1203 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1204 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1205 keg->uk_flags |= UMA_ZONE_HASH;
1206}
1207
1208static void
1209keg_cachespread_init(uma_keg_t keg)
1210{
1211 int alignsize;
1212 int trailer;
1213 int pages;
1214 int rsize;
1215
1216 alignsize = keg->uk_align + 1;
1217 rsize = keg->uk_size;
1218 /*
1219 * We want one item to start on every align boundary in a page. To
1220 * do this we will span pages. We will also extend the item by the
1221 * size of align if it is an even multiple of align. Otherwise, it
1222 * would fall on the same boundary every time.
1223 */
1224 if (rsize & keg->uk_align)
1225 rsize = (rsize & ~keg->uk_align) + alignsize;
1226 if ((rsize & alignsize) == 0)
1227 rsize += alignsize;
1228 trailer = rsize - keg->uk_size;
1229 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1230 pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1231 keg->uk_rsize = rsize;
1232 keg->uk_ppera = pages;
1233 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1234 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1235 KASSERT(keg->uk_ipers <= uma_max_ipers,
1236 ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
1237 keg->uk_ipers));
1238}
1239
1240/*
1241 * Keg header ctor. This initializes all fields, locks, etc. And inserts
1242 * the keg onto the global keg list.
1243 *
1244 * Arguments/Returns follow uma_ctor specifications
1245 * udata Actually uma_kctor_args
1246 */
1247static int
1248keg_ctor(void *mem, int size, void *udata, int flags)
1249{
1250 struct uma_kctor_args *arg = udata;
1251 uma_keg_t keg = mem;
1252 uma_zone_t zone;
1253
1254 bzero(keg, size);
1255 keg->uk_size = arg->size;
1256 keg->uk_init = arg->uminit;
1257 keg->uk_fini = arg->fini;
1258 keg->uk_align = arg->align;
1259 keg->uk_free = 0;
1260 keg->uk_pages = 0;
1261 keg->uk_flags = arg->flags;
1262 keg->uk_allocf = page_alloc;
1263 keg->uk_freef = page_free;
1264 keg->uk_recurse = 0;
1265 keg->uk_slabzone = NULL;
1266
1267 /*
1268 * The master zone is passed to us at keg-creation time.
1269 */
1270 zone = arg->zone;
1271 keg->uk_name = zone->uz_name;
1272
1273 if (arg->flags & UMA_ZONE_VM)
1274 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1275
1276 if (arg->flags & UMA_ZONE_ZINIT)
1277 keg->uk_init = zero_init;
1278
1279 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1280 keg->uk_flags |= UMA_ZONE_VTOSLAB;
1281
1282 /*
1283 * The +UMA_FRITM_SZ added to uk_size is to account for the
1284 * linkage that is added to the size in keg_small_init(). If
1285 * we don't account for this here then we may end up in
1286 * keg_small_init() with a calculated 'ipers' of 0.
1287 */
1288 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1289 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
1290 keg_cachespread_init(keg);
1291 else if ((keg->uk_size+UMA_FRITMREF_SZ) >
1292 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
1293 keg_large_init(keg);
1294 else
1295 keg_small_init(keg);
1296 } else {
1297 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
1298 keg_cachespread_init(keg);
1299 else if ((keg->uk_size+UMA_FRITM_SZ) >
1300 (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1301 keg_large_init(keg);
1302 else
1303 keg_small_init(keg);
1304 }
1305
1306 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1307 if (keg->uk_flags & UMA_ZONE_REFCNT)
1308 keg->uk_slabzone = slabrefzone;
1309 else
1310 keg->uk_slabzone = slabzone;
1311 }
1312
1313 /*
1314 * If we haven't booted yet we need allocations to go through the
1315 * startup cache until the vm is ready.
1316 */
1317 if (keg->uk_ppera == 1) {
1318#ifdef UMA_MD_SMALL_ALLOC
1319 keg->uk_allocf = uma_small_alloc;
1320 keg->uk_freef = uma_small_free;
1321
1322 if (booted < UMA_STARTUP)
1323 keg->uk_allocf = startup_alloc;
1324#else
1325 if (booted < UMA_STARTUP2)
1326 keg->uk_allocf = startup_alloc;
1327#endif
1328 } else if (booted < UMA_STARTUP2 &&
1329 (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1330 keg->uk_allocf = startup_alloc;
1331
1332 /*
1333 * Initialize keg's lock (shared among zones).
1334 */
1335 if (arg->flags & UMA_ZONE_MTXCLASS)
1336 KEG_LOCK_INIT(keg, 1);
1337 else
1338 KEG_LOCK_INIT(keg, 0);
1339
1340 /*
1341 * If we're putting the slab header in the actual page we need to
1342 * figure out where in each page it goes. This calculates a right
1343 * justified offset into the memory on an ALIGN_PTR boundary.
1344 */
1345 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1346 u_int totsize;
1347
1348 /* Size of the slab struct and free list */
1349 if (keg->uk_flags & UMA_ZONE_REFCNT)
1350 totsize = sizeof(struct uma_slab_refcnt) +
1351 keg->uk_ipers * UMA_FRITMREF_SZ;
1352 else
1353 totsize = sizeof(struct uma_slab) +
1354 keg->uk_ipers * UMA_FRITM_SZ;
1355
1356 if (totsize & UMA_ALIGN_PTR)
1357 totsize = (totsize & ~UMA_ALIGN_PTR) +
1358 (UMA_ALIGN_PTR + 1);
1359 keg->uk_pgoff = (UMA_SLAB_SIZE * keg->uk_ppera) - totsize;
1360
1361 if (keg->uk_flags & UMA_ZONE_REFCNT)
1362 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
1363 + keg->uk_ipers * UMA_FRITMREF_SZ;
1364 else
1365 totsize = keg->uk_pgoff + sizeof(struct uma_slab)
1366 + keg->uk_ipers * UMA_FRITM_SZ;
1367
1368 /*
1369 * The only way the following is possible is if with our
1370 * UMA_ALIGN_PTR adjustments we are now bigger than
1371 * UMA_SLAB_SIZE. I haven't checked whether this is
1372 * mathematically possible for all cases, so we make
1373 * sure here anyway.
1374 */
1375 if (totsize > UMA_SLAB_SIZE * keg->uk_ppera) {
1376 printf("zone %s ipers %d rsize %d size %d\n",
1377 zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1378 keg->uk_size);
1379 panic("UMA slab won't fit.");
1380 }
1381 }
1382
1383 if (keg->uk_flags & UMA_ZONE_HASH)
1384 hash_alloc(&keg->uk_hash);
1385
1386#ifdef UMA_DEBUG
1387 printf("UMA: %s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
1388 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1389 keg->uk_ipers, keg->uk_ppera,
1390 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1391#endif
1392
1393 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1394
1395 mtx_lock(&uma_mtx);
1396 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1397 mtx_unlock(&uma_mtx);
1398 return (0);
1399}
1400
1401/*
1402 * Zone header ctor. This initializes all fields, locks, etc.
1403 *
1404 * Arguments/Returns follow uma_ctor specifications
1405 * udata Actually uma_zctor_args
1406 */
1407static int
1408zone_ctor(void *mem, int size, void *udata, int flags)
1409{
1410 struct uma_zctor_args *arg = udata;
1411 uma_zone_t zone = mem;
1412 uma_zone_t z;
1413 uma_keg_t keg;
1414
1415 bzero(zone, size);
1416 zone->uz_name = arg->name;
1417 zone->uz_ctor = arg->ctor;
1418 zone->uz_dtor = arg->dtor;
1419 zone->uz_slab = zone_fetch_slab;
1420 zone->uz_init = NULL;
1421 zone->uz_fini = NULL;
1422 zone->uz_allocs = 0;
1423 zone->uz_frees = 0;
1424 zone->uz_fails = 0;
1425 zone->uz_sleeps = 0;
1426 zone->uz_fills = zone->uz_count = 0;
1427 zone->uz_flags = 0;
1428 keg = arg->keg;
1429
1430 if (arg->flags & UMA_ZONE_SECONDARY) {
1431 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1432 zone->uz_init = arg->uminit;
1433 zone->uz_fini = arg->fini;
1434 zone->uz_lock = &keg->uk_lock;
1435 zone->uz_flags |= UMA_ZONE_SECONDARY;
1436 mtx_lock(&uma_mtx);
1437 ZONE_LOCK(zone);
1438 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1439 if (LIST_NEXT(z, uz_link) == NULL) {
1440 LIST_INSERT_AFTER(z, zone, uz_link);
1441 break;
1442 }
1443 }
1444 ZONE_UNLOCK(zone);
1445 mtx_unlock(&uma_mtx);
1446 } else if (keg == NULL) {
1447 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1448 arg->align, arg->flags)) == NULL)
1449 return (ENOMEM);
1450 } else {
1451 struct uma_kctor_args karg;
1452 int error;
1453
1454 /* We should only be here from uma_startup() */
1455 karg.size = arg->size;
1456 karg.uminit = arg->uminit;
1457 karg.fini = arg->fini;
1458 karg.align = arg->align;
1459 karg.flags = arg->flags;
1460 karg.zone = zone;
1461 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1462 flags);
1463 if (error)
1464 return (error);
1465 }
1466 /*
1467 * Link in the first keg.
1468 */
1469 zone->uz_klink.kl_keg = keg;
1470 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1471 zone->uz_lock = &keg->uk_lock;
1472 zone->uz_size = keg->uk_size;
1473 zone->uz_flags |= (keg->uk_flags &
1474 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1475
1476 /*
1477 * Some internal zones don't have room allocated for the per cpu
1478 * caches. If we're internal, bail out here.
1479 */
1480 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1481 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1482 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1483 return (0);
1484 }
1485
1486 if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
1487 zone->uz_count = BUCKET_MAX;
1488 else if (keg->uk_ipers <= BUCKET_MAX)
1489 zone->uz_count = keg->uk_ipers;
1490 else
1491 zone->uz_count = BUCKET_MAX;
1492 return (0);
1493}
1494
1495/*
1496 * Keg header dtor. This frees all data, destroys locks, frees the hash
1497 * table and removes the keg from the global list.
1498 *
1499 * Arguments/Returns follow uma_dtor specifications
1500 * udata unused
1501 */
1502static void
1503keg_dtor(void *arg, int size, void *udata)
1504{
1505 uma_keg_t keg;
1506
1507 keg = (uma_keg_t)arg;
1508 KEG_LOCK(keg);
1509 if (keg->uk_free != 0) {
1510 printf("Freed UMA keg was not empty (%d items). "
1511 " Lost %d pages of memory.\n",
1512 keg->uk_free, keg->uk_pages);
1513 }
1514 KEG_UNLOCK(keg);
1515
1516 hash_free(&keg->uk_hash);
1517
1518 KEG_LOCK_FINI(keg);
1519}
1520
1521/*
1522 * Zone header dtor.
1523 *
1524 * Arguments/Returns follow uma_dtor specifications
1525 * udata unused
1526 */
1527static void
1528zone_dtor(void *arg, int size, void *udata)
1529{
1530 uma_klink_t klink;
1531 uma_zone_t zone;
1532 uma_keg_t keg;
1533
1534 zone = (uma_zone_t)arg;
1535 keg = zone_first_keg(zone);
1536
1537 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1538 cache_drain(zone);
1539
1540 mtx_lock(&uma_mtx);
1541 LIST_REMOVE(zone, uz_link);
1542 mtx_unlock(&uma_mtx);
1543 /*
1544 * XXX there are some races here where
1545 * the zone can be drained but zone lock
1546 * released and then refilled before we
1547 * remove it... we dont care for now
1548 */
1549 zone_drain_wait(zone, M_WAITOK);
1550 /*
1551 * Unlink all of our kegs.
1552 */
1553 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1554 klink->kl_keg = NULL;
1555 LIST_REMOVE(klink, kl_link);
1556 if (klink == &zone->uz_klink)
1557 continue;
1558 free(klink, M_TEMP);
1559 }
1560 /*
1561 * We only destroy kegs from non secondary zones.
1562 */
1563 if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1564 mtx_lock(&uma_mtx);
1565 LIST_REMOVE(keg, uk_link);
1566 mtx_unlock(&uma_mtx);
1567 zone_free_item(kegs, keg, NULL, SKIP_NONE,
1568 ZFREE_STATFREE);
1569 }
1570}
1571
1572/*
1573 * Traverses every zone in the system and calls a callback
1574 *
1575 * Arguments:
1576 * zfunc A pointer to a function which accepts a zone
1577 * as an argument.
1578 *
1579 * Returns:
1580 * Nothing
1581 */
1582static void
1583zone_foreach(void (*zfunc)(uma_zone_t))
1584{
1585 uma_keg_t keg;
1586 uma_zone_t zone;
1587
1588 mtx_lock(&uma_mtx);
1589 LIST_FOREACH(keg, &uma_kegs, uk_link) {
1590 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1591 zfunc(zone);
1592 }
1593 mtx_unlock(&uma_mtx);
1594}
1595
1596/* Public functions */
1597/* See uma.h */
1598void
1599uma_startup(void *bootmem, int boot_pages)
1600{
1601 struct uma_zctor_args args;
1602 uma_slab_t slab;
1603 u_int slabsize;
1604 u_int objsize, totsize, wsize;
1605 int i;
1606
1607#ifdef UMA_DEBUG
1608 printf("Creating uma keg headers zone and keg.\n");
1609#endif
1610 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1611
1612 /*
1613 * Figure out the maximum number of items-per-slab we'll have if
1614 * we're using the OFFPAGE slab header to track free items, given
1615 * all possible object sizes and the maximum desired wastage
1616 * (UMA_MAX_WASTE).
1617 *
1618 * We iterate until we find an object size for
1619 * which the calculated wastage in keg_small_init() will be
1620 * enough to warrant OFFPAGE. Since wastedspace versus objsize
1621 * is an overall increasing see-saw function, we find the smallest
1622 * objsize such that the wastage is always acceptable for objects
1623 * with that objsize or smaller. Since a smaller objsize always
1624 * generates a larger possible uma_max_ipers, we use this computed
1625 * objsize to calculate the largest ipers possible. Since the
1626 * ipers calculated for OFFPAGE slab headers is always larger than
1627 * the ipers initially calculated in keg_small_init(), we use
1628 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
1629 * obtain the maximum ipers possible for offpage slab headers.
1630 *
1631 * It should be noted that ipers versus objsize is an inversly
1632 * proportional function which drops off rather quickly so as
1633 * long as our UMA_MAX_WASTE is such that the objsize we calculate
1634 * falls into the portion of the inverse relation AFTER the steep
1635 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
1636 *
1637 * Note that we have 8-bits (1 byte) to use as a freelist index
1638 * inside the actual slab header itself and this is enough to
1639 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized
1640 * object with offpage slab header would have ipers =
1641 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
1642 * 1 greater than what our byte-integer freelist index can
1643 * accomodate, but we know that this situation never occurs as
1644 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
1645 * that we need to go to offpage slab headers. Or, if we do,
1646 * then we trap that condition below and panic in the INVARIANTS case.
1647 */
1648 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
1649 totsize = wsize;
1650 objsize = UMA_SMALLEST_UNIT;
1651 while (totsize >= wsize) {
1652 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
1653 (objsize + UMA_FRITM_SZ);
1654 totsize *= (UMA_FRITM_SZ + objsize);
1655 objsize++;
1656 }
1657 if (objsize > UMA_SMALLEST_UNIT)
1658 objsize--;
1659 uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64);
1660
1661 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
1662 totsize = wsize;
1663 objsize = UMA_SMALLEST_UNIT;
1664 while (totsize >= wsize) {
1665 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
1666 (objsize + UMA_FRITMREF_SZ);
1667 totsize *= (UMA_FRITMREF_SZ + objsize);
1668 objsize++;
1669 }
1670 if (objsize > UMA_SMALLEST_UNIT)
1671 objsize--;
1672 uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64);
1673
1674 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
1675 ("uma_startup: calculated uma_max_ipers values too large!"));
1676
1677#ifdef UMA_DEBUG
1678 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
1679 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
1680 uma_max_ipers_ref);
1681#endif
1682
1683 /* "manually" create the initial zone */
1684 args.name = "UMA Kegs";
1685 args.size = sizeof(struct uma_keg);
1686 args.ctor = keg_ctor;
1687 args.dtor = keg_dtor;
1688 args.uminit = zero_init;
1689 args.fini = NULL;
1690 args.keg = &masterkeg;
1691 args.align = 32 - 1;
1692 args.flags = UMA_ZFLAG_INTERNAL;
1693 /* The initial zone has no Per cpu queues so it's smaller */
1694 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1695
1696#ifdef UMA_DEBUG
1697 printf("Filling boot free list.\n");
1698#endif
1699 for (i = 0; i < boot_pages; i++) {
1700 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1701 slab->us_data = (u_int8_t *)slab;
1702 slab->us_flags = UMA_SLAB_BOOT;
1703 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1704 }
1705 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1706
1707#ifdef UMA_DEBUG
1708 printf("Creating uma zone headers zone and keg.\n");
1709#endif
1710 args.name = "UMA Zones";
1711 args.size = sizeof(struct uma_zone) +
1712 (sizeof(struct uma_cache) * (mp_maxid + 1));
1713 args.ctor = zone_ctor;
1714 args.dtor = zone_dtor;
1715 args.uminit = zero_init;
1716 args.fini = NULL;
1717 args.keg = NULL;
1718 args.align = 32 - 1;
1719 args.flags = UMA_ZFLAG_INTERNAL;
1720 /* The initial zone has no Per cpu queues so it's smaller */
1721 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1722
1723#ifdef UMA_DEBUG
1724 printf("Initializing pcpu cache locks.\n");
1725#endif
1726#ifdef UMA_DEBUG
1727 printf("Creating slab and hash zones.\n");
1728#endif
1729
1730 /*
1731 * This is the max number of free list items we'll have with
1732 * offpage slabs.
1733 */
1734 slabsize = uma_max_ipers * UMA_FRITM_SZ;
1735 slabsize += sizeof(struct uma_slab);
1736
1737 /* Now make a zone for slab headers */
1738 slabzone = uma_zcreate("UMA Slabs",
1739 slabsize,
1740 NULL, NULL, NULL, NULL,
1741 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1742
1743 /*
1744 * We also create a zone for the bigger slabs with reference
1745 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1746 */
1747 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
1748 slabsize += sizeof(struct uma_slab_refcnt);
1749 slabrefzone = uma_zcreate("UMA RCntSlabs",
1750 slabsize,
1751 NULL, NULL, NULL, NULL,
1752 UMA_ALIGN_PTR,
1753 UMA_ZFLAG_INTERNAL);
1754
1755 hashzone = uma_zcreate("UMA Hash",
1756 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1757 NULL, NULL, NULL, NULL,
1758 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1759
1760 bucket_init();
1761
1762 booted = UMA_STARTUP;
1763
1764#ifdef UMA_DEBUG
1765 printf("UMA startup complete.\n");
1766#endif
1767}
1768
1769/* see uma.h */
1770void
1771uma_startup2(void)
1772{
1773 booted = UMA_STARTUP2;
1774 bucket_enable();
1775#ifdef UMA_DEBUG
1776 printf("UMA startup2 complete.\n");
1777#endif
1778}
1779
1780/*
1781 * Initialize our callout handle
1782 *
1783 */
1784
1785static void
1786uma_startup3(void)
1787{
1788#ifdef UMA_DEBUG
1789 printf("Starting callout.\n");
1790#endif
1791 callout_init(&uma_callout, CALLOUT_MPSAFE);
1792 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1793#ifdef UMA_DEBUG
1794 printf("UMA startup3 complete.\n");
1795#endif
1796}
1797
1798static uma_keg_t
1799uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1800 int align, u_int32_t flags)
1801{
1802 struct uma_kctor_args args;
1803
1804 args.size = size;
1805 args.uminit = uminit;
1806 args.fini = fini;
1807 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1808 args.flags = flags;
1809 args.zone = zone;
1810 return (zone_alloc_item(kegs, &args, M_WAITOK));
1811}
1812
1813/* See uma.h */
1814void
1815uma_set_align(int align)
1816{
1817
1818 if (align != UMA_ALIGN_CACHE)
1819 uma_align_cache = align;
1820}
1821
1822/* See uma.h */
1823uma_zone_t
1824uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1825 uma_init uminit, uma_fini fini, int align, u_int32_t flags)
1826
1827{
1828 struct uma_zctor_args args;
1829
1830 /* This stuff is essential for the zone ctor */
1831 args.name = name;
1832 args.size = size;
1833 args.ctor = ctor;
1834 args.dtor = dtor;
1835 args.uminit = uminit;
1836 args.fini = fini;
1837 args.align = align;
1838 args.flags = flags;
1839 args.keg = NULL;
1840
1841 return (zone_alloc_item(zones, &args, M_WAITOK));
1842}
1843
1844/* See uma.h */
1845uma_zone_t
1846uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1847 uma_init zinit, uma_fini zfini, uma_zone_t master)
1848{
1849 struct uma_zctor_args args;
1850 uma_keg_t keg;
1851
1852 keg = zone_first_keg(master);
1853 args.name = name;
1854 args.size = keg->uk_size;
1855 args.ctor = ctor;
1856 args.dtor = dtor;
1857 args.uminit = zinit;
1858 args.fini = zfini;
1859 args.align = keg->uk_align;
1860 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1861 args.keg = keg;
1862
1863 /* XXX Attaches only one keg of potentially many. */
1864 return (zone_alloc_item(zones, &args, M_WAITOK));
1865}
1866
1867static void
1868zone_lock_pair(uma_zone_t a, uma_zone_t b)
1869{
1870 if (a < b) {
1871 ZONE_LOCK(a);
1872 mtx_lock_flags(b->uz_lock, MTX_DUPOK);
1873 } else {
1874 ZONE_LOCK(b);
1875 mtx_lock_flags(a->uz_lock, MTX_DUPOK);
1876 }
1877}
1878
1879static void
1880zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1881{
1882
1883 ZONE_UNLOCK(a);
1884 ZONE_UNLOCK(b);
1885}
1886
1887int
1888uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1889{
1890 uma_klink_t klink;
1891 uma_klink_t kl;
1892 int error;
1893
1894 error = 0;
1895 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
1896
1897 zone_lock_pair(zone, master);
1898 /*
1899 * zone must use vtoslab() to resolve objects and must already be
1900 * a secondary.
1901 */
1902 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
1903 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
1904 error = EINVAL;
1905 goto out;
1906 }
1907 /*
1908 * The new master must also use vtoslab().
1909 */
1910 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
1911 error = EINVAL;
1912 goto out;
1913 }
1914 /*
1915 * Both must either be refcnt, or not be refcnt.
1916 */
1917 if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
1918 (master->uz_flags & UMA_ZONE_REFCNT)) {
1919 error = EINVAL;
1920 goto out;
1921 }
1922 /*
1923 * The underlying object must be the same size. rsize
1924 * may be different.
1925 */
1926 if (master->uz_size != zone->uz_size) {
1927 error = E2BIG;
1928 goto out;
1929 }
1930 /*
1931 * Put it at the end of the list.
1932 */
1933 klink->kl_keg = zone_first_keg(master);
1934 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
1935 if (LIST_NEXT(kl, kl_link) == NULL) {
1936 LIST_INSERT_AFTER(kl, klink, kl_link);
1937 break;
1938 }
1939 }
1940 klink = NULL;
1941 zone->uz_flags |= UMA_ZFLAG_MULTI;
1942 zone->uz_slab = zone_fetch_slab_multi;
1943
1944out:
1945 zone_unlock_pair(zone, master);
1946 if (klink != NULL)
1947 free(klink, M_TEMP);
1948
1949 return (error);
1950}
1951
1952
1953/* See uma.h */
1954void
1955uma_zdestroy(uma_zone_t zone)
1956{
1957
1958 zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
1959}
1960
1961/* See uma.h */
1962void *
1963uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1964{
1965 void *item;
1966 uma_cache_t cache;
1967 uma_bucket_t bucket;
1968 int cpu;
1969
1970 /* This is the fast path allocation */
1971#ifdef UMA_DEBUG_ALLOC_1
1972 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1973#endif
1974 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
1975 zone->uz_name, flags);
1976
1977 if (flags & M_WAITOK) {
1978 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1979 "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
1980 }
96/*
97 * This is the zone and keg from which all zones are spawned. The idea is that
98 * even the zone & keg heads are allocated from the allocator, so we use the
99 * bss section to bootstrap us.
100 */
101static struct uma_keg masterkeg;
102static struct uma_zone masterzone_k;
103static struct uma_zone masterzone_z;
104static uma_zone_t kegs = &masterzone_k;
105static uma_zone_t zones = &masterzone_z;
106
107/* This is the zone from which all of uma_slab_t's are allocated. */
108static uma_zone_t slabzone;
109static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
110
111/*
112 * The initial hash tables come out of this zone so they can be allocated
113 * prior to malloc coming up.
114 */
115static uma_zone_t hashzone;
116
117/* The boot-time adjusted value for cache line alignment. */
118int uma_align_cache = 64 - 1;
119
120static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
121
122/*
123 * Are we allowed to allocate buckets?
124 */
125static int bucketdisable = 1;
126
127/* Linked list of all kegs in the system */
128static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
129
130/* This mutex protects the keg list */
131static struct mtx uma_mtx;
132
133/* Linked list of boot time pages */
134static LIST_HEAD(,uma_slab) uma_boot_pages =
135 LIST_HEAD_INITIALIZER(uma_boot_pages);
136
137/* This mutex protects the boot time pages list */
138static struct mtx uma_boot_pages_mtx;
139
140/* Is the VM done starting up? */
141static int booted = 0;
142#define UMA_STARTUP 1
143#define UMA_STARTUP2 2
144
145/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
146static u_int uma_max_ipers;
147static u_int uma_max_ipers_ref;
148
149/*
150 * This is the handle used to schedule events that need to happen
151 * outside of the allocation fast path.
152 */
153static struct callout uma_callout;
154#define UMA_TIMEOUT 20 /* Seconds for callout interval. */
155
156/*
157 * This structure is passed as the zone ctor arg so that I don't have to create
158 * a special allocation function just for zones.
159 */
160struct uma_zctor_args {
161 char *name;
162 size_t size;
163 uma_ctor ctor;
164 uma_dtor dtor;
165 uma_init uminit;
166 uma_fini fini;
167 uma_keg_t keg;
168 int align;
169 u_int32_t flags;
170};
171
172struct uma_kctor_args {
173 uma_zone_t zone;
174 size_t size;
175 uma_init uminit;
176 uma_fini fini;
177 int align;
178 u_int32_t flags;
179};
180
181struct uma_bucket_zone {
182 uma_zone_t ubz_zone;
183 char *ubz_name;
184 int ubz_entries;
185};
186
187#define BUCKET_MAX 128
188
189struct uma_bucket_zone bucket_zones[] = {
190 { NULL, "16 Bucket", 16 },
191 { NULL, "32 Bucket", 32 },
192 { NULL, "64 Bucket", 64 },
193 { NULL, "128 Bucket", 128 },
194 { NULL, NULL, 0}
195};
196
197#define BUCKET_SHIFT 4
198#define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1)
199
200/*
201 * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
202 * of approximately the right size.
203 */
204static uint8_t bucket_size[BUCKET_ZONES];
205
206/*
207 * Flags and enumerations to be passed to internal functions.
208 */
209enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
210
211#define ZFREE_STATFAIL 0x00000001 /* Update zone failure statistic. */
212#define ZFREE_STATFREE 0x00000002 /* Update zone free statistic. */
213
214/* Prototypes.. */
215
216static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
217static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
218static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
219static void page_free(void *, int, u_int8_t);
220static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
221static void cache_drain(uma_zone_t);
222static void bucket_drain(uma_zone_t, uma_bucket_t);
223static void bucket_cache_drain(uma_zone_t zone);
224static int keg_ctor(void *, int, void *, int);
225static void keg_dtor(void *, int, void *);
226static int zone_ctor(void *, int, void *, int);
227static void zone_dtor(void *, int, void *);
228static int zero_init(void *, int, int);
229static void keg_small_init(uma_keg_t keg);
230static void keg_large_init(uma_keg_t keg);
231static void zone_foreach(void (*zfunc)(uma_zone_t));
232static void zone_timeout(uma_zone_t zone);
233static int hash_alloc(struct uma_hash *);
234static int hash_expand(struct uma_hash *, struct uma_hash *);
235static void hash_free(struct uma_hash *hash);
236static void uma_timeout(void *);
237static void uma_startup3(void);
238static void *zone_alloc_item(uma_zone_t, void *, int);
239static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
240 int);
241static void bucket_enable(void);
242static void bucket_init(void);
243static uma_bucket_t bucket_alloc(int, int);
244static void bucket_free(uma_bucket_t);
245static void bucket_zone_drain(void);
246static int zone_alloc_bucket(uma_zone_t zone, int flags);
247static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
248static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
249static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
250static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
251 uma_fini fini, int align, u_int32_t flags);
252static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
253static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
254
255void uma_print_zone(uma_zone_t);
256void uma_print_stats(void);
257static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
258static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
259
260SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
261
262SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
263 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
264
265SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
266 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
267
268/*
269 * This routine checks to see whether or not it's safe to enable buckets.
270 */
271
272static void
273bucket_enable(void)
274{
275 if (cnt.v_free_count < cnt.v_free_min)
276 bucketdisable = 1;
277 else
278 bucketdisable = 0;
279}
280
281/*
282 * Initialize bucket_zones, the array of zones of buckets of various sizes.
283 *
284 * For each zone, calculate the memory required for each bucket, consisting
285 * of the header and an array of pointers. Initialize bucket_size[] to point
286 * the range of appropriate bucket sizes at the zone.
287 */
288static void
289bucket_init(void)
290{
291 struct uma_bucket_zone *ubz;
292 int i;
293 int j;
294
295 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
296 int size;
297
298 ubz = &bucket_zones[j];
299 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
300 size += sizeof(void *) * ubz->ubz_entries;
301 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
302 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
303 UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET);
304 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
305 bucket_size[i >> BUCKET_SHIFT] = j;
306 }
307}
308
309/*
310 * Given a desired number of entries for a bucket, return the zone from which
311 * to allocate the bucket.
312 */
313static struct uma_bucket_zone *
314bucket_zone_lookup(int entries)
315{
316 int idx;
317
318 idx = howmany(entries, 1 << BUCKET_SHIFT);
319 return (&bucket_zones[bucket_size[idx]]);
320}
321
322static uma_bucket_t
323bucket_alloc(int entries, int bflags)
324{
325 struct uma_bucket_zone *ubz;
326 uma_bucket_t bucket;
327
328 /*
329 * This is to stop us from allocating per cpu buckets while we're
330 * running out of vm.boot_pages. Otherwise, we would exhaust the
331 * boot pages. This also prevents us from allocating buckets in
332 * low memory situations.
333 */
334 if (bucketdisable)
335 return (NULL);
336
337 ubz = bucket_zone_lookup(entries);
338 bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
339 if (bucket) {
340#ifdef INVARIANTS
341 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
342#endif
343 bucket->ub_cnt = 0;
344 bucket->ub_entries = ubz->ubz_entries;
345 }
346
347 return (bucket);
348}
349
350static void
351bucket_free(uma_bucket_t bucket)
352{
353 struct uma_bucket_zone *ubz;
354
355 ubz = bucket_zone_lookup(bucket->ub_entries);
356 zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
357 ZFREE_STATFREE);
358}
359
360static void
361bucket_zone_drain(void)
362{
363 struct uma_bucket_zone *ubz;
364
365 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
366 zone_drain(ubz->ubz_zone);
367}
368
369static inline uma_keg_t
370zone_first_keg(uma_zone_t zone)
371{
372
373 return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
374}
375
376static void
377zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
378{
379 uma_klink_t klink;
380
381 LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
382 kegfn(klink->kl_keg);
383}
384
385/*
386 * Routine called by timeout which is used to fire off some time interval
387 * based calculations. (stats, hash size, etc.)
388 *
389 * Arguments:
390 * arg Unused
391 *
392 * Returns:
393 * Nothing
394 */
395static void
396uma_timeout(void *unused)
397{
398 bucket_enable();
399 zone_foreach(zone_timeout);
400
401 /* Reschedule this event */
402 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
403}
404
405/*
406 * Routine to perform timeout driven calculations. This expands the
407 * hashes and does per cpu statistics aggregation.
408 *
409 * Returns nothing.
410 */
411static void
412keg_timeout(uma_keg_t keg)
413{
414
415 KEG_LOCK(keg);
416 /*
417 * Expand the keg hash table.
418 *
419 * This is done if the number of slabs is larger than the hash size.
420 * What I'm trying to do here is completely reduce collisions. This
421 * may be a little aggressive. Should I allow for two collisions max?
422 */
423 if (keg->uk_flags & UMA_ZONE_HASH &&
424 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
425 struct uma_hash newhash;
426 struct uma_hash oldhash;
427 int ret;
428
429 /*
430 * This is so involved because allocating and freeing
431 * while the keg lock is held will lead to deadlock.
432 * I have to do everything in stages and check for
433 * races.
434 */
435 newhash = keg->uk_hash;
436 KEG_UNLOCK(keg);
437 ret = hash_alloc(&newhash);
438 KEG_LOCK(keg);
439 if (ret) {
440 if (hash_expand(&keg->uk_hash, &newhash)) {
441 oldhash = keg->uk_hash;
442 keg->uk_hash = newhash;
443 } else
444 oldhash = newhash;
445
446 KEG_UNLOCK(keg);
447 hash_free(&oldhash);
448 KEG_LOCK(keg);
449 }
450 }
451 KEG_UNLOCK(keg);
452}
453
454static void
455zone_timeout(uma_zone_t zone)
456{
457
458 zone_foreach_keg(zone, &keg_timeout);
459}
460
461/*
462 * Allocate and zero fill the next sized hash table from the appropriate
463 * backing store.
464 *
465 * Arguments:
466 * hash A new hash structure with the old hash size in uh_hashsize
467 *
468 * Returns:
469 * 1 on sucess and 0 on failure.
470 */
471static int
472hash_alloc(struct uma_hash *hash)
473{
474 int oldsize;
475 int alloc;
476
477 oldsize = hash->uh_hashsize;
478
479 /* We're just going to go to a power of two greater */
480 if (oldsize) {
481 hash->uh_hashsize = oldsize * 2;
482 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
483 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
484 M_UMAHASH, M_NOWAIT);
485 } else {
486 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
487 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
488 M_WAITOK);
489 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
490 }
491 if (hash->uh_slab_hash) {
492 bzero(hash->uh_slab_hash, alloc);
493 hash->uh_hashmask = hash->uh_hashsize - 1;
494 return (1);
495 }
496
497 return (0);
498}
499
500/*
501 * Expands the hash table for HASH zones. This is done from zone_timeout
502 * to reduce collisions. This must not be done in the regular allocation
503 * path, otherwise, we can recurse on the vm while allocating pages.
504 *
505 * Arguments:
506 * oldhash The hash you want to expand
507 * newhash The hash structure for the new table
508 *
509 * Returns:
510 * Nothing
511 *
512 * Discussion:
513 */
514static int
515hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
516{
517 uma_slab_t slab;
518 int hval;
519 int i;
520
521 if (!newhash->uh_slab_hash)
522 return (0);
523
524 if (oldhash->uh_hashsize >= newhash->uh_hashsize)
525 return (0);
526
527 /*
528 * I need to investigate hash algorithms for resizing without a
529 * full rehash.
530 */
531
532 for (i = 0; i < oldhash->uh_hashsize; i++)
533 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
534 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
535 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
536 hval = UMA_HASH(newhash, slab->us_data);
537 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
538 slab, us_hlink);
539 }
540
541 return (1);
542}
543
544/*
545 * Free the hash bucket to the appropriate backing store.
546 *
547 * Arguments:
548 * slab_hash The hash bucket we're freeing
549 * hashsize The number of entries in that hash bucket
550 *
551 * Returns:
552 * Nothing
553 */
554static void
555hash_free(struct uma_hash *hash)
556{
557 if (hash->uh_slab_hash == NULL)
558 return;
559 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
560 zone_free_item(hashzone,
561 hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
562 else
563 free(hash->uh_slab_hash, M_UMAHASH);
564}
565
566/*
567 * Frees all outstanding items in a bucket
568 *
569 * Arguments:
570 * zone The zone to free to, must be unlocked.
571 * bucket The free/alloc bucket with items, cpu queue must be locked.
572 *
573 * Returns:
574 * Nothing
575 */
576
577static void
578bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
579{
580 void *item;
581
582 if (bucket == NULL)
583 return;
584
585 while (bucket->ub_cnt > 0) {
586 bucket->ub_cnt--;
587 item = bucket->ub_bucket[bucket->ub_cnt];
588#ifdef INVARIANTS
589 bucket->ub_bucket[bucket->ub_cnt] = NULL;
590 KASSERT(item != NULL,
591 ("bucket_drain: botched ptr, item is NULL"));
592#endif
593 zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
594 }
595}
596
597/*
598 * Drains the per cpu caches for a zone.
599 *
600 * NOTE: This may only be called while the zone is being turn down, and not
601 * during normal operation. This is necessary in order that we do not have
602 * to migrate CPUs to drain the per-CPU caches.
603 *
604 * Arguments:
605 * zone The zone to drain, must be unlocked.
606 *
607 * Returns:
608 * Nothing
609 */
610static void
611cache_drain(uma_zone_t zone)
612{
613 uma_cache_t cache;
614 int cpu;
615
616 /*
617 * XXX: It is safe to not lock the per-CPU caches, because we're
618 * tearing down the zone anyway. I.e., there will be no further use
619 * of the caches at this point.
620 *
621 * XXX: It would good to be able to assert that the zone is being
622 * torn down to prevent improper use of cache_drain().
623 *
624 * XXX: We lock the zone before passing into bucket_cache_drain() as
625 * it is used elsewhere. Should the tear-down path be made special
626 * there in some form?
627 */
628 CPU_FOREACH(cpu) {
629 cache = &zone->uz_cpu[cpu];
630 bucket_drain(zone, cache->uc_allocbucket);
631 bucket_drain(zone, cache->uc_freebucket);
632 if (cache->uc_allocbucket != NULL)
633 bucket_free(cache->uc_allocbucket);
634 if (cache->uc_freebucket != NULL)
635 bucket_free(cache->uc_freebucket);
636 cache->uc_allocbucket = cache->uc_freebucket = NULL;
637 }
638 ZONE_LOCK(zone);
639 bucket_cache_drain(zone);
640 ZONE_UNLOCK(zone);
641}
642
643/*
644 * Drain the cached buckets from a zone. Expects a locked zone on entry.
645 */
646static void
647bucket_cache_drain(uma_zone_t zone)
648{
649 uma_bucket_t bucket;
650
651 /*
652 * Drain the bucket queues and free the buckets, we just keep two per
653 * cpu (alloc/free).
654 */
655 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
656 LIST_REMOVE(bucket, ub_link);
657 ZONE_UNLOCK(zone);
658 bucket_drain(zone, bucket);
659 bucket_free(bucket);
660 ZONE_LOCK(zone);
661 }
662
663 /* Now we do the free queue.. */
664 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
665 LIST_REMOVE(bucket, ub_link);
666 bucket_free(bucket);
667 }
668}
669
670/*
671 * Frees pages from a keg back to the system. This is done on demand from
672 * the pageout daemon.
673 *
674 * Returns nothing.
675 */
676static void
677keg_drain(uma_keg_t keg)
678{
679 struct slabhead freeslabs = { 0 };
680 uma_slab_t slab;
681 uma_slab_t n;
682 u_int8_t flags;
683 u_int8_t *mem;
684 int i;
685
686 /*
687 * We don't want to take pages from statically allocated kegs at this
688 * time
689 */
690 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
691 return;
692
693#ifdef UMA_DEBUG
694 printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
695#endif
696 KEG_LOCK(keg);
697 if (keg->uk_free == 0)
698 goto finished;
699
700 slab = LIST_FIRST(&keg->uk_free_slab);
701 while (slab) {
702 n = LIST_NEXT(slab, us_link);
703
704 /* We have no where to free these to */
705 if (slab->us_flags & UMA_SLAB_BOOT) {
706 slab = n;
707 continue;
708 }
709
710 LIST_REMOVE(slab, us_link);
711 keg->uk_pages -= keg->uk_ppera;
712 keg->uk_free -= keg->uk_ipers;
713
714 if (keg->uk_flags & UMA_ZONE_HASH)
715 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
716
717 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
718
719 slab = n;
720 }
721finished:
722 KEG_UNLOCK(keg);
723
724 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
725 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
726 if (keg->uk_fini)
727 for (i = 0; i < keg->uk_ipers; i++)
728 keg->uk_fini(
729 slab->us_data + (keg->uk_rsize * i),
730 keg->uk_size);
731 flags = slab->us_flags;
732 mem = slab->us_data;
733
734 if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
735 vm_object_t obj;
736
737 if (flags & UMA_SLAB_KMEM)
738 obj = kmem_object;
739 else if (flags & UMA_SLAB_KERNEL)
740 obj = kernel_object;
741 else
742 obj = NULL;
743 for (i = 0; i < keg->uk_ppera; i++)
744 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
745 obj);
746 }
747 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
748 zone_free_item(keg->uk_slabzone, slab, NULL,
749 SKIP_NONE, ZFREE_STATFREE);
750#ifdef UMA_DEBUG
751 printf("%s: Returning %d bytes.\n",
752 keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera);
753#endif
754 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
755 }
756}
757
758static void
759zone_drain_wait(uma_zone_t zone, int waitok)
760{
761
762 /*
763 * Set draining to interlock with zone_dtor() so we can release our
764 * locks as we go. Only dtor() should do a WAITOK call since it
765 * is the only call that knows the structure will still be available
766 * when it wakes up.
767 */
768 ZONE_LOCK(zone);
769 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
770 if (waitok == M_NOWAIT)
771 goto out;
772 mtx_unlock(&uma_mtx);
773 msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
774 mtx_lock(&uma_mtx);
775 }
776 zone->uz_flags |= UMA_ZFLAG_DRAINING;
777 bucket_cache_drain(zone);
778 ZONE_UNLOCK(zone);
779 /*
780 * The DRAINING flag protects us from being freed while
781 * we're running. Normally the uma_mtx would protect us but we
782 * must be able to release and acquire the right lock for each keg.
783 */
784 zone_foreach_keg(zone, &keg_drain);
785 ZONE_LOCK(zone);
786 zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
787 wakeup(zone);
788out:
789 ZONE_UNLOCK(zone);
790}
791
792void
793zone_drain(uma_zone_t zone)
794{
795
796 zone_drain_wait(zone, M_NOWAIT);
797}
798
799/*
800 * Allocate a new slab for a keg. This does not insert the slab onto a list.
801 *
802 * Arguments:
803 * wait Shall we wait?
804 *
805 * Returns:
806 * The slab that was allocated or NULL if there is no memory and the
807 * caller specified M_NOWAIT.
808 */
809static uma_slab_t
810keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
811{
812 uma_slabrefcnt_t slabref;
813 uma_alloc allocf;
814 uma_slab_t slab;
815 u_int8_t *mem;
816 u_int8_t flags;
817 int i;
818
819 mtx_assert(&keg->uk_lock, MA_OWNED);
820 slab = NULL;
821
822#ifdef UMA_DEBUG
823 printf("slab_zalloc: Allocating a new slab for %s\n", keg->uk_name);
824#endif
825 allocf = keg->uk_allocf;
826 KEG_UNLOCK(keg);
827
828 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
829 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
830 if (slab == NULL) {
831 KEG_LOCK(keg);
832 return NULL;
833 }
834 }
835
836 /*
837 * This reproduces the old vm_zone behavior of zero filling pages the
838 * first time they are added to a zone.
839 *
840 * Malloced items are zeroed in uma_zalloc.
841 */
842
843 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
844 wait |= M_ZERO;
845 else
846 wait &= ~M_ZERO;
847
848 /* zone is passed for legacy reasons. */
849 mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
850 if (mem == NULL) {
851 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
852 zone_free_item(keg->uk_slabzone, slab, NULL,
853 SKIP_NONE, ZFREE_STATFREE);
854 KEG_LOCK(keg);
855 return (NULL);
856 }
857
858 /* Point the slab into the allocated memory */
859 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
860 slab = (uma_slab_t )(mem + keg->uk_pgoff);
861
862 if (keg->uk_flags & UMA_ZONE_VTOSLAB)
863 for (i = 0; i < keg->uk_ppera; i++)
864 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
865
866 slab->us_keg = keg;
867 slab->us_data = mem;
868 slab->us_freecount = keg->uk_ipers;
869 slab->us_firstfree = 0;
870 slab->us_flags = flags;
871
872 if (keg->uk_flags & UMA_ZONE_REFCNT) {
873 slabref = (uma_slabrefcnt_t)slab;
874 for (i = 0; i < keg->uk_ipers; i++) {
875 slabref->us_freelist[i].us_refcnt = 0;
876 slabref->us_freelist[i].us_item = i+1;
877 }
878 } else {
879 for (i = 0; i < keg->uk_ipers; i++)
880 slab->us_freelist[i].us_item = i+1;
881 }
882
883 if (keg->uk_init != NULL) {
884 for (i = 0; i < keg->uk_ipers; i++)
885 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
886 keg->uk_size, wait) != 0)
887 break;
888 if (i != keg->uk_ipers) {
889 if (keg->uk_fini != NULL) {
890 for (i--; i > -1; i--)
891 keg->uk_fini(slab->us_data +
892 (keg->uk_rsize * i),
893 keg->uk_size);
894 }
895 if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
896 vm_object_t obj;
897
898 if (flags & UMA_SLAB_KMEM)
899 obj = kmem_object;
900 else if (flags & UMA_SLAB_KERNEL)
901 obj = kernel_object;
902 else
903 obj = NULL;
904 for (i = 0; i < keg->uk_ppera; i++)
905 vsetobj((vm_offset_t)mem +
906 (i * PAGE_SIZE), obj);
907 }
908 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
909 zone_free_item(keg->uk_slabzone, slab,
910 NULL, SKIP_NONE, ZFREE_STATFREE);
911 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
912 flags);
913 KEG_LOCK(keg);
914 return (NULL);
915 }
916 }
917 KEG_LOCK(keg);
918
919 if (keg->uk_flags & UMA_ZONE_HASH)
920 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
921
922 keg->uk_pages += keg->uk_ppera;
923 keg->uk_free += keg->uk_ipers;
924
925 return (slab);
926}
927
928/*
929 * This function is intended to be used early on in place of page_alloc() so
930 * that we may use the boot time page cache to satisfy allocations before
931 * the VM is ready.
932 */
933static void *
934startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
935{
936 uma_keg_t keg;
937 uma_slab_t tmps;
938 int pages, check_pages;
939
940 keg = zone_first_keg(zone);
941 pages = howmany(bytes, PAGE_SIZE);
942 check_pages = pages - 1;
943 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
944
945 /*
946 * Check our small startup cache to see if it has pages remaining.
947 */
948 mtx_lock(&uma_boot_pages_mtx);
949
950 /* First check if we have enough room. */
951 tmps = LIST_FIRST(&uma_boot_pages);
952 while (tmps != NULL && check_pages-- > 0)
953 tmps = LIST_NEXT(tmps, us_link);
954 if (tmps != NULL) {
955 /*
956 * It's ok to lose tmps references. The last one will
957 * have tmps->us_data pointing to the start address of
958 * "pages" contiguous pages of memory.
959 */
960 while (pages-- > 0) {
961 tmps = LIST_FIRST(&uma_boot_pages);
962 LIST_REMOVE(tmps, us_link);
963 }
964 mtx_unlock(&uma_boot_pages_mtx);
965 *pflag = tmps->us_flags;
966 return (tmps->us_data);
967 }
968 mtx_unlock(&uma_boot_pages_mtx);
969 if (booted < UMA_STARTUP2)
970 panic("UMA: Increase vm.boot_pages");
971 /*
972 * Now that we've booted reset these users to their real allocator.
973 */
974#ifdef UMA_MD_SMALL_ALLOC
975 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
976#else
977 keg->uk_allocf = page_alloc;
978#endif
979 return keg->uk_allocf(zone, bytes, pflag, wait);
980}
981
982/*
983 * Allocates a number of pages from the system
984 *
985 * Arguments:
986 * bytes The number of bytes requested
987 * wait Shall we wait?
988 *
989 * Returns:
990 * A pointer to the alloced memory or possibly
991 * NULL if M_NOWAIT is set.
992 */
993static void *
994page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
995{
996 void *p; /* Returned page */
997
998 *pflag = UMA_SLAB_KMEM;
999 p = (void *) kmem_malloc(kmem_map, bytes, wait);
1000
1001 return (p);
1002}
1003
1004/*
1005 * Allocates a number of pages from within an object
1006 *
1007 * Arguments:
1008 * bytes The number of bytes requested
1009 * wait Shall we wait?
1010 *
1011 * Returns:
1012 * A pointer to the alloced memory or possibly
1013 * NULL if M_NOWAIT is set.
1014 */
1015static void *
1016obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1017{
1018 vm_object_t object;
1019 vm_offset_t retkva, zkva;
1020 vm_page_t p;
1021 int pages, startpages;
1022 uma_keg_t keg;
1023
1024 keg = zone_first_keg(zone);
1025 object = keg->uk_obj;
1026 retkva = 0;
1027
1028 /*
1029 * This looks a little weird since we're getting one page at a time.
1030 */
1031 VM_OBJECT_LOCK(object);
1032 p = TAILQ_LAST(&object->memq, pglist);
1033 pages = p != NULL ? p->pindex + 1 : 0;
1034 startpages = pages;
1035 zkva = keg->uk_kva + pages * PAGE_SIZE;
1036 for (; bytes > 0; bytes -= PAGE_SIZE) {
1037 p = vm_page_alloc(object, pages,
1038 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
1039 if (p == NULL) {
1040 if (pages != startpages)
1041 pmap_qremove(retkva, pages - startpages);
1042 while (pages != startpages) {
1043 pages--;
1044 p = TAILQ_LAST(&object->memq, pglist);
1045 vm_page_unwire(p, 0);
1046 vm_page_free(p);
1047 }
1048 retkva = 0;
1049 goto done;
1050 }
1051 pmap_qenter(zkva, &p, 1);
1052 if (retkva == 0)
1053 retkva = zkva;
1054 zkva += PAGE_SIZE;
1055 pages += 1;
1056 }
1057done:
1058 VM_OBJECT_UNLOCK(object);
1059 *flags = UMA_SLAB_PRIV;
1060
1061 return ((void *)retkva);
1062}
1063
1064/*
1065 * Frees a number of pages to the system
1066 *
1067 * Arguments:
1068 * mem A pointer to the memory to be freed
1069 * size The size of the memory being freed
1070 * flags The original p->us_flags field
1071 *
1072 * Returns:
1073 * Nothing
1074 */
1075static void
1076page_free(void *mem, int size, u_int8_t flags)
1077{
1078 vm_map_t map;
1079
1080 if (flags & UMA_SLAB_KMEM)
1081 map = kmem_map;
1082 else if (flags & UMA_SLAB_KERNEL)
1083 map = kernel_map;
1084 else
1085 panic("UMA: page_free used with invalid flags %d", flags);
1086
1087 kmem_free(map, (vm_offset_t)mem, size);
1088}
1089
1090/*
1091 * Zero fill initializer
1092 *
1093 * Arguments/Returns follow uma_init specifications
1094 */
1095static int
1096zero_init(void *mem, int size, int flags)
1097{
1098 bzero(mem, size);
1099 return (0);
1100}
1101
1102/*
1103 * Finish creating a small uma keg. This calculates ipers, and the keg size.
1104 *
1105 * Arguments
1106 * keg The zone we should initialize
1107 *
1108 * Returns
1109 * Nothing
1110 */
1111static void
1112keg_small_init(uma_keg_t keg)
1113{
1114 u_int rsize;
1115 u_int memused;
1116 u_int wastedspace;
1117 u_int shsize;
1118
1119 KASSERT(keg != NULL, ("Keg is null in keg_small_init"));
1120 rsize = keg->uk_size;
1121
1122 if (rsize < UMA_SMALLEST_UNIT)
1123 rsize = UMA_SMALLEST_UNIT;
1124 if (rsize & keg->uk_align)
1125 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1126
1127 keg->uk_rsize = rsize;
1128 keg->uk_ppera = 1;
1129
1130 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1131 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */
1132 shsize = sizeof(struct uma_slab_refcnt);
1133 } else {
1134 rsize += UMA_FRITM_SZ; /* Account for linkage */
1135 shsize = sizeof(struct uma_slab);
1136 }
1137
1138 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
1139 KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0"));
1140 memused = keg->uk_ipers * rsize + shsize;
1141 wastedspace = UMA_SLAB_SIZE - memused;
1142
1143 /*
1144 * We can't do OFFPAGE if we're internal or if we've been
1145 * asked to not go to the VM for buckets. If we do this we
1146 * may end up going to the VM (kmem_map) for slabs which we
1147 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1148 * result of UMA_ZONE_VM, which clearly forbids it.
1149 */
1150 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1151 (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1152 return;
1153
1154 if ((wastedspace >= UMA_MAX_WASTE) &&
1155 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
1156 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
1157 KASSERT(keg->uk_ipers <= 255,
1158 ("keg_small_init: keg->uk_ipers too high!"));
1159#ifdef UMA_DEBUG
1160 printf("UMA decided we need offpage slab headers for "
1161 "keg: %s, calculated wastedspace = %d, "
1162 "maximum wasted space allowed = %d, "
1163 "calculated ipers = %d, "
1164 "new wasted space = %d\n", keg->uk_name, wastedspace,
1165 UMA_MAX_WASTE, keg->uk_ipers,
1166 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
1167#endif
1168 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1169 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1170 keg->uk_flags |= UMA_ZONE_HASH;
1171 }
1172}
1173
1174/*
1175 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
1176 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
1177 * more complicated.
1178 *
1179 * Arguments
1180 * keg The keg we should initialize
1181 *
1182 * Returns
1183 * Nothing
1184 */
1185static void
1186keg_large_init(uma_keg_t keg)
1187{
1188 int pages;
1189
1190 KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1191 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1192 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1193
1194 pages = keg->uk_size / UMA_SLAB_SIZE;
1195
1196 /* Account for remainder */
1197 if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
1198 pages++;
1199
1200 keg->uk_ppera = pages;
1201 keg->uk_ipers = 1;
1202 keg->uk_rsize = keg->uk_size;
1203
1204 /* We can't do OFFPAGE if we're internal, bail out here. */
1205 if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1206 return;
1207
1208 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1209 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1210 keg->uk_flags |= UMA_ZONE_HASH;
1211}
1212
1213static void
1214keg_cachespread_init(uma_keg_t keg)
1215{
1216 int alignsize;
1217 int trailer;
1218 int pages;
1219 int rsize;
1220
1221 alignsize = keg->uk_align + 1;
1222 rsize = keg->uk_size;
1223 /*
1224 * We want one item to start on every align boundary in a page. To
1225 * do this we will span pages. We will also extend the item by the
1226 * size of align if it is an even multiple of align. Otherwise, it
1227 * would fall on the same boundary every time.
1228 */
1229 if (rsize & keg->uk_align)
1230 rsize = (rsize & ~keg->uk_align) + alignsize;
1231 if ((rsize & alignsize) == 0)
1232 rsize += alignsize;
1233 trailer = rsize - keg->uk_size;
1234 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1235 pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1236 keg->uk_rsize = rsize;
1237 keg->uk_ppera = pages;
1238 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1239 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1240 KASSERT(keg->uk_ipers <= uma_max_ipers,
1241 ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
1242 keg->uk_ipers));
1243}
1244
1245/*
1246 * Keg header ctor. This initializes all fields, locks, etc. And inserts
1247 * the keg onto the global keg list.
1248 *
1249 * Arguments/Returns follow uma_ctor specifications
1250 * udata Actually uma_kctor_args
1251 */
1252static int
1253keg_ctor(void *mem, int size, void *udata, int flags)
1254{
1255 struct uma_kctor_args *arg = udata;
1256 uma_keg_t keg = mem;
1257 uma_zone_t zone;
1258
1259 bzero(keg, size);
1260 keg->uk_size = arg->size;
1261 keg->uk_init = arg->uminit;
1262 keg->uk_fini = arg->fini;
1263 keg->uk_align = arg->align;
1264 keg->uk_free = 0;
1265 keg->uk_pages = 0;
1266 keg->uk_flags = arg->flags;
1267 keg->uk_allocf = page_alloc;
1268 keg->uk_freef = page_free;
1269 keg->uk_recurse = 0;
1270 keg->uk_slabzone = NULL;
1271
1272 /*
1273 * The master zone is passed to us at keg-creation time.
1274 */
1275 zone = arg->zone;
1276 keg->uk_name = zone->uz_name;
1277
1278 if (arg->flags & UMA_ZONE_VM)
1279 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1280
1281 if (arg->flags & UMA_ZONE_ZINIT)
1282 keg->uk_init = zero_init;
1283
1284 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1285 keg->uk_flags |= UMA_ZONE_VTOSLAB;
1286
1287 /*
1288 * The +UMA_FRITM_SZ added to uk_size is to account for the
1289 * linkage that is added to the size in keg_small_init(). If
1290 * we don't account for this here then we may end up in
1291 * keg_small_init() with a calculated 'ipers' of 0.
1292 */
1293 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1294 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
1295 keg_cachespread_init(keg);
1296 else if ((keg->uk_size+UMA_FRITMREF_SZ) >
1297 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
1298 keg_large_init(keg);
1299 else
1300 keg_small_init(keg);
1301 } else {
1302 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
1303 keg_cachespread_init(keg);
1304 else if ((keg->uk_size+UMA_FRITM_SZ) >
1305 (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1306 keg_large_init(keg);
1307 else
1308 keg_small_init(keg);
1309 }
1310
1311 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1312 if (keg->uk_flags & UMA_ZONE_REFCNT)
1313 keg->uk_slabzone = slabrefzone;
1314 else
1315 keg->uk_slabzone = slabzone;
1316 }
1317
1318 /*
1319 * If we haven't booted yet we need allocations to go through the
1320 * startup cache until the vm is ready.
1321 */
1322 if (keg->uk_ppera == 1) {
1323#ifdef UMA_MD_SMALL_ALLOC
1324 keg->uk_allocf = uma_small_alloc;
1325 keg->uk_freef = uma_small_free;
1326
1327 if (booted < UMA_STARTUP)
1328 keg->uk_allocf = startup_alloc;
1329#else
1330 if (booted < UMA_STARTUP2)
1331 keg->uk_allocf = startup_alloc;
1332#endif
1333 } else if (booted < UMA_STARTUP2 &&
1334 (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1335 keg->uk_allocf = startup_alloc;
1336
1337 /*
1338 * Initialize keg's lock (shared among zones).
1339 */
1340 if (arg->flags & UMA_ZONE_MTXCLASS)
1341 KEG_LOCK_INIT(keg, 1);
1342 else
1343 KEG_LOCK_INIT(keg, 0);
1344
1345 /*
1346 * If we're putting the slab header in the actual page we need to
1347 * figure out where in each page it goes. This calculates a right
1348 * justified offset into the memory on an ALIGN_PTR boundary.
1349 */
1350 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1351 u_int totsize;
1352
1353 /* Size of the slab struct and free list */
1354 if (keg->uk_flags & UMA_ZONE_REFCNT)
1355 totsize = sizeof(struct uma_slab_refcnt) +
1356 keg->uk_ipers * UMA_FRITMREF_SZ;
1357 else
1358 totsize = sizeof(struct uma_slab) +
1359 keg->uk_ipers * UMA_FRITM_SZ;
1360
1361 if (totsize & UMA_ALIGN_PTR)
1362 totsize = (totsize & ~UMA_ALIGN_PTR) +
1363 (UMA_ALIGN_PTR + 1);
1364 keg->uk_pgoff = (UMA_SLAB_SIZE * keg->uk_ppera) - totsize;
1365
1366 if (keg->uk_flags & UMA_ZONE_REFCNT)
1367 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
1368 + keg->uk_ipers * UMA_FRITMREF_SZ;
1369 else
1370 totsize = keg->uk_pgoff + sizeof(struct uma_slab)
1371 + keg->uk_ipers * UMA_FRITM_SZ;
1372
1373 /*
1374 * The only way the following is possible is if with our
1375 * UMA_ALIGN_PTR adjustments we are now bigger than
1376 * UMA_SLAB_SIZE. I haven't checked whether this is
1377 * mathematically possible for all cases, so we make
1378 * sure here anyway.
1379 */
1380 if (totsize > UMA_SLAB_SIZE * keg->uk_ppera) {
1381 printf("zone %s ipers %d rsize %d size %d\n",
1382 zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1383 keg->uk_size);
1384 panic("UMA slab won't fit.");
1385 }
1386 }
1387
1388 if (keg->uk_flags & UMA_ZONE_HASH)
1389 hash_alloc(&keg->uk_hash);
1390
1391#ifdef UMA_DEBUG
1392 printf("UMA: %s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
1393 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1394 keg->uk_ipers, keg->uk_ppera,
1395 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1396#endif
1397
1398 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1399
1400 mtx_lock(&uma_mtx);
1401 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1402 mtx_unlock(&uma_mtx);
1403 return (0);
1404}
1405
1406/*
1407 * Zone header ctor. This initializes all fields, locks, etc.
1408 *
1409 * Arguments/Returns follow uma_ctor specifications
1410 * udata Actually uma_zctor_args
1411 */
1412static int
1413zone_ctor(void *mem, int size, void *udata, int flags)
1414{
1415 struct uma_zctor_args *arg = udata;
1416 uma_zone_t zone = mem;
1417 uma_zone_t z;
1418 uma_keg_t keg;
1419
1420 bzero(zone, size);
1421 zone->uz_name = arg->name;
1422 zone->uz_ctor = arg->ctor;
1423 zone->uz_dtor = arg->dtor;
1424 zone->uz_slab = zone_fetch_slab;
1425 zone->uz_init = NULL;
1426 zone->uz_fini = NULL;
1427 zone->uz_allocs = 0;
1428 zone->uz_frees = 0;
1429 zone->uz_fails = 0;
1430 zone->uz_sleeps = 0;
1431 zone->uz_fills = zone->uz_count = 0;
1432 zone->uz_flags = 0;
1433 keg = arg->keg;
1434
1435 if (arg->flags & UMA_ZONE_SECONDARY) {
1436 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1437 zone->uz_init = arg->uminit;
1438 zone->uz_fini = arg->fini;
1439 zone->uz_lock = &keg->uk_lock;
1440 zone->uz_flags |= UMA_ZONE_SECONDARY;
1441 mtx_lock(&uma_mtx);
1442 ZONE_LOCK(zone);
1443 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1444 if (LIST_NEXT(z, uz_link) == NULL) {
1445 LIST_INSERT_AFTER(z, zone, uz_link);
1446 break;
1447 }
1448 }
1449 ZONE_UNLOCK(zone);
1450 mtx_unlock(&uma_mtx);
1451 } else if (keg == NULL) {
1452 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1453 arg->align, arg->flags)) == NULL)
1454 return (ENOMEM);
1455 } else {
1456 struct uma_kctor_args karg;
1457 int error;
1458
1459 /* We should only be here from uma_startup() */
1460 karg.size = arg->size;
1461 karg.uminit = arg->uminit;
1462 karg.fini = arg->fini;
1463 karg.align = arg->align;
1464 karg.flags = arg->flags;
1465 karg.zone = zone;
1466 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1467 flags);
1468 if (error)
1469 return (error);
1470 }
1471 /*
1472 * Link in the first keg.
1473 */
1474 zone->uz_klink.kl_keg = keg;
1475 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1476 zone->uz_lock = &keg->uk_lock;
1477 zone->uz_size = keg->uk_size;
1478 zone->uz_flags |= (keg->uk_flags &
1479 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1480
1481 /*
1482 * Some internal zones don't have room allocated for the per cpu
1483 * caches. If we're internal, bail out here.
1484 */
1485 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1486 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1487 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1488 return (0);
1489 }
1490
1491 if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
1492 zone->uz_count = BUCKET_MAX;
1493 else if (keg->uk_ipers <= BUCKET_MAX)
1494 zone->uz_count = keg->uk_ipers;
1495 else
1496 zone->uz_count = BUCKET_MAX;
1497 return (0);
1498}
1499
1500/*
1501 * Keg header dtor. This frees all data, destroys locks, frees the hash
1502 * table and removes the keg from the global list.
1503 *
1504 * Arguments/Returns follow uma_dtor specifications
1505 * udata unused
1506 */
1507static void
1508keg_dtor(void *arg, int size, void *udata)
1509{
1510 uma_keg_t keg;
1511
1512 keg = (uma_keg_t)arg;
1513 KEG_LOCK(keg);
1514 if (keg->uk_free != 0) {
1515 printf("Freed UMA keg was not empty (%d items). "
1516 " Lost %d pages of memory.\n",
1517 keg->uk_free, keg->uk_pages);
1518 }
1519 KEG_UNLOCK(keg);
1520
1521 hash_free(&keg->uk_hash);
1522
1523 KEG_LOCK_FINI(keg);
1524}
1525
1526/*
1527 * Zone header dtor.
1528 *
1529 * Arguments/Returns follow uma_dtor specifications
1530 * udata unused
1531 */
1532static void
1533zone_dtor(void *arg, int size, void *udata)
1534{
1535 uma_klink_t klink;
1536 uma_zone_t zone;
1537 uma_keg_t keg;
1538
1539 zone = (uma_zone_t)arg;
1540 keg = zone_first_keg(zone);
1541
1542 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1543 cache_drain(zone);
1544
1545 mtx_lock(&uma_mtx);
1546 LIST_REMOVE(zone, uz_link);
1547 mtx_unlock(&uma_mtx);
1548 /*
1549 * XXX there are some races here where
1550 * the zone can be drained but zone lock
1551 * released and then refilled before we
1552 * remove it... we dont care for now
1553 */
1554 zone_drain_wait(zone, M_WAITOK);
1555 /*
1556 * Unlink all of our kegs.
1557 */
1558 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1559 klink->kl_keg = NULL;
1560 LIST_REMOVE(klink, kl_link);
1561 if (klink == &zone->uz_klink)
1562 continue;
1563 free(klink, M_TEMP);
1564 }
1565 /*
1566 * We only destroy kegs from non secondary zones.
1567 */
1568 if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1569 mtx_lock(&uma_mtx);
1570 LIST_REMOVE(keg, uk_link);
1571 mtx_unlock(&uma_mtx);
1572 zone_free_item(kegs, keg, NULL, SKIP_NONE,
1573 ZFREE_STATFREE);
1574 }
1575}
1576
1577/*
1578 * Traverses every zone in the system and calls a callback
1579 *
1580 * Arguments:
1581 * zfunc A pointer to a function which accepts a zone
1582 * as an argument.
1583 *
1584 * Returns:
1585 * Nothing
1586 */
1587static void
1588zone_foreach(void (*zfunc)(uma_zone_t))
1589{
1590 uma_keg_t keg;
1591 uma_zone_t zone;
1592
1593 mtx_lock(&uma_mtx);
1594 LIST_FOREACH(keg, &uma_kegs, uk_link) {
1595 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1596 zfunc(zone);
1597 }
1598 mtx_unlock(&uma_mtx);
1599}
1600
1601/* Public functions */
1602/* See uma.h */
1603void
1604uma_startup(void *bootmem, int boot_pages)
1605{
1606 struct uma_zctor_args args;
1607 uma_slab_t slab;
1608 u_int slabsize;
1609 u_int objsize, totsize, wsize;
1610 int i;
1611
1612#ifdef UMA_DEBUG
1613 printf("Creating uma keg headers zone and keg.\n");
1614#endif
1615 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1616
1617 /*
1618 * Figure out the maximum number of items-per-slab we'll have if
1619 * we're using the OFFPAGE slab header to track free items, given
1620 * all possible object sizes and the maximum desired wastage
1621 * (UMA_MAX_WASTE).
1622 *
1623 * We iterate until we find an object size for
1624 * which the calculated wastage in keg_small_init() will be
1625 * enough to warrant OFFPAGE. Since wastedspace versus objsize
1626 * is an overall increasing see-saw function, we find the smallest
1627 * objsize such that the wastage is always acceptable for objects
1628 * with that objsize or smaller. Since a smaller objsize always
1629 * generates a larger possible uma_max_ipers, we use this computed
1630 * objsize to calculate the largest ipers possible. Since the
1631 * ipers calculated for OFFPAGE slab headers is always larger than
1632 * the ipers initially calculated in keg_small_init(), we use
1633 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
1634 * obtain the maximum ipers possible for offpage slab headers.
1635 *
1636 * It should be noted that ipers versus objsize is an inversly
1637 * proportional function which drops off rather quickly so as
1638 * long as our UMA_MAX_WASTE is such that the objsize we calculate
1639 * falls into the portion of the inverse relation AFTER the steep
1640 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
1641 *
1642 * Note that we have 8-bits (1 byte) to use as a freelist index
1643 * inside the actual slab header itself and this is enough to
1644 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized
1645 * object with offpage slab header would have ipers =
1646 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
1647 * 1 greater than what our byte-integer freelist index can
1648 * accomodate, but we know that this situation never occurs as
1649 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
1650 * that we need to go to offpage slab headers. Or, if we do,
1651 * then we trap that condition below and panic in the INVARIANTS case.
1652 */
1653 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
1654 totsize = wsize;
1655 objsize = UMA_SMALLEST_UNIT;
1656 while (totsize >= wsize) {
1657 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
1658 (objsize + UMA_FRITM_SZ);
1659 totsize *= (UMA_FRITM_SZ + objsize);
1660 objsize++;
1661 }
1662 if (objsize > UMA_SMALLEST_UNIT)
1663 objsize--;
1664 uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64);
1665
1666 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
1667 totsize = wsize;
1668 objsize = UMA_SMALLEST_UNIT;
1669 while (totsize >= wsize) {
1670 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
1671 (objsize + UMA_FRITMREF_SZ);
1672 totsize *= (UMA_FRITMREF_SZ + objsize);
1673 objsize++;
1674 }
1675 if (objsize > UMA_SMALLEST_UNIT)
1676 objsize--;
1677 uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64);
1678
1679 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
1680 ("uma_startup: calculated uma_max_ipers values too large!"));
1681
1682#ifdef UMA_DEBUG
1683 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
1684 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
1685 uma_max_ipers_ref);
1686#endif
1687
1688 /* "manually" create the initial zone */
1689 args.name = "UMA Kegs";
1690 args.size = sizeof(struct uma_keg);
1691 args.ctor = keg_ctor;
1692 args.dtor = keg_dtor;
1693 args.uminit = zero_init;
1694 args.fini = NULL;
1695 args.keg = &masterkeg;
1696 args.align = 32 - 1;
1697 args.flags = UMA_ZFLAG_INTERNAL;
1698 /* The initial zone has no Per cpu queues so it's smaller */
1699 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1700
1701#ifdef UMA_DEBUG
1702 printf("Filling boot free list.\n");
1703#endif
1704 for (i = 0; i < boot_pages; i++) {
1705 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1706 slab->us_data = (u_int8_t *)slab;
1707 slab->us_flags = UMA_SLAB_BOOT;
1708 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1709 }
1710 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1711
1712#ifdef UMA_DEBUG
1713 printf("Creating uma zone headers zone and keg.\n");
1714#endif
1715 args.name = "UMA Zones";
1716 args.size = sizeof(struct uma_zone) +
1717 (sizeof(struct uma_cache) * (mp_maxid + 1));
1718 args.ctor = zone_ctor;
1719 args.dtor = zone_dtor;
1720 args.uminit = zero_init;
1721 args.fini = NULL;
1722 args.keg = NULL;
1723 args.align = 32 - 1;
1724 args.flags = UMA_ZFLAG_INTERNAL;
1725 /* The initial zone has no Per cpu queues so it's smaller */
1726 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1727
1728#ifdef UMA_DEBUG
1729 printf("Initializing pcpu cache locks.\n");
1730#endif
1731#ifdef UMA_DEBUG
1732 printf("Creating slab and hash zones.\n");
1733#endif
1734
1735 /*
1736 * This is the max number of free list items we'll have with
1737 * offpage slabs.
1738 */
1739 slabsize = uma_max_ipers * UMA_FRITM_SZ;
1740 slabsize += sizeof(struct uma_slab);
1741
1742 /* Now make a zone for slab headers */
1743 slabzone = uma_zcreate("UMA Slabs",
1744 slabsize,
1745 NULL, NULL, NULL, NULL,
1746 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1747
1748 /*
1749 * We also create a zone for the bigger slabs with reference
1750 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1751 */
1752 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
1753 slabsize += sizeof(struct uma_slab_refcnt);
1754 slabrefzone = uma_zcreate("UMA RCntSlabs",
1755 slabsize,
1756 NULL, NULL, NULL, NULL,
1757 UMA_ALIGN_PTR,
1758 UMA_ZFLAG_INTERNAL);
1759
1760 hashzone = uma_zcreate("UMA Hash",
1761 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1762 NULL, NULL, NULL, NULL,
1763 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1764
1765 bucket_init();
1766
1767 booted = UMA_STARTUP;
1768
1769#ifdef UMA_DEBUG
1770 printf("UMA startup complete.\n");
1771#endif
1772}
1773
1774/* see uma.h */
1775void
1776uma_startup2(void)
1777{
1778 booted = UMA_STARTUP2;
1779 bucket_enable();
1780#ifdef UMA_DEBUG
1781 printf("UMA startup2 complete.\n");
1782#endif
1783}
1784
1785/*
1786 * Initialize our callout handle
1787 *
1788 */
1789
1790static void
1791uma_startup3(void)
1792{
1793#ifdef UMA_DEBUG
1794 printf("Starting callout.\n");
1795#endif
1796 callout_init(&uma_callout, CALLOUT_MPSAFE);
1797 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1798#ifdef UMA_DEBUG
1799 printf("UMA startup3 complete.\n");
1800#endif
1801}
1802
1803static uma_keg_t
1804uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1805 int align, u_int32_t flags)
1806{
1807 struct uma_kctor_args args;
1808
1809 args.size = size;
1810 args.uminit = uminit;
1811 args.fini = fini;
1812 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1813 args.flags = flags;
1814 args.zone = zone;
1815 return (zone_alloc_item(kegs, &args, M_WAITOK));
1816}
1817
1818/* See uma.h */
1819void
1820uma_set_align(int align)
1821{
1822
1823 if (align != UMA_ALIGN_CACHE)
1824 uma_align_cache = align;
1825}
1826
1827/* See uma.h */
1828uma_zone_t
1829uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1830 uma_init uminit, uma_fini fini, int align, u_int32_t flags)
1831
1832{
1833 struct uma_zctor_args args;
1834
1835 /* This stuff is essential for the zone ctor */
1836 args.name = name;
1837 args.size = size;
1838 args.ctor = ctor;
1839 args.dtor = dtor;
1840 args.uminit = uminit;
1841 args.fini = fini;
1842 args.align = align;
1843 args.flags = flags;
1844 args.keg = NULL;
1845
1846 return (zone_alloc_item(zones, &args, M_WAITOK));
1847}
1848
1849/* See uma.h */
1850uma_zone_t
1851uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1852 uma_init zinit, uma_fini zfini, uma_zone_t master)
1853{
1854 struct uma_zctor_args args;
1855 uma_keg_t keg;
1856
1857 keg = zone_first_keg(master);
1858 args.name = name;
1859 args.size = keg->uk_size;
1860 args.ctor = ctor;
1861 args.dtor = dtor;
1862 args.uminit = zinit;
1863 args.fini = zfini;
1864 args.align = keg->uk_align;
1865 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1866 args.keg = keg;
1867
1868 /* XXX Attaches only one keg of potentially many. */
1869 return (zone_alloc_item(zones, &args, M_WAITOK));
1870}
1871
1872static void
1873zone_lock_pair(uma_zone_t a, uma_zone_t b)
1874{
1875 if (a < b) {
1876 ZONE_LOCK(a);
1877 mtx_lock_flags(b->uz_lock, MTX_DUPOK);
1878 } else {
1879 ZONE_LOCK(b);
1880 mtx_lock_flags(a->uz_lock, MTX_DUPOK);
1881 }
1882}
1883
1884static void
1885zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1886{
1887
1888 ZONE_UNLOCK(a);
1889 ZONE_UNLOCK(b);
1890}
1891
1892int
1893uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1894{
1895 uma_klink_t klink;
1896 uma_klink_t kl;
1897 int error;
1898
1899 error = 0;
1900 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
1901
1902 zone_lock_pair(zone, master);
1903 /*
1904 * zone must use vtoslab() to resolve objects and must already be
1905 * a secondary.
1906 */
1907 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
1908 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
1909 error = EINVAL;
1910 goto out;
1911 }
1912 /*
1913 * The new master must also use vtoslab().
1914 */
1915 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
1916 error = EINVAL;
1917 goto out;
1918 }
1919 /*
1920 * Both must either be refcnt, or not be refcnt.
1921 */
1922 if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
1923 (master->uz_flags & UMA_ZONE_REFCNT)) {
1924 error = EINVAL;
1925 goto out;
1926 }
1927 /*
1928 * The underlying object must be the same size. rsize
1929 * may be different.
1930 */
1931 if (master->uz_size != zone->uz_size) {
1932 error = E2BIG;
1933 goto out;
1934 }
1935 /*
1936 * Put it at the end of the list.
1937 */
1938 klink->kl_keg = zone_first_keg(master);
1939 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
1940 if (LIST_NEXT(kl, kl_link) == NULL) {
1941 LIST_INSERT_AFTER(kl, klink, kl_link);
1942 break;
1943 }
1944 }
1945 klink = NULL;
1946 zone->uz_flags |= UMA_ZFLAG_MULTI;
1947 zone->uz_slab = zone_fetch_slab_multi;
1948
1949out:
1950 zone_unlock_pair(zone, master);
1951 if (klink != NULL)
1952 free(klink, M_TEMP);
1953
1954 return (error);
1955}
1956
1957
1958/* See uma.h */
1959void
1960uma_zdestroy(uma_zone_t zone)
1961{
1962
1963 zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
1964}
1965
1966/* See uma.h */
1967void *
1968uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1969{
1970 void *item;
1971 uma_cache_t cache;
1972 uma_bucket_t bucket;
1973 int cpu;
1974
1975 /* This is the fast path allocation */
1976#ifdef UMA_DEBUG_ALLOC_1
1977 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1978#endif
1979 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
1980 zone->uz_name, flags);
1981
1982 if (flags & M_WAITOK) {
1983 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1984 "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
1985 }
1981
1986#ifdef DEBUG_MEMGUARD
1987 if (memguard_cmp_zone(zone)) {
1988 item = memguard_alloc(zone->uz_size, flags);
1989 if (item != NULL) {
1990 /*
1991 * Avoid conflict with the use-after-free
1992 * protecting infrastructure from INVARIANTS.
1993 */
1994 if (zone->uz_init != NULL &&
1995 zone->uz_init != mtrash_init &&
1996 zone->uz_init(item, zone->uz_size, flags) != 0)
1997 return (NULL);
1998 if (zone->uz_ctor != NULL &&
1999 zone->uz_ctor != mtrash_ctor &&
2000 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2001 zone->uz_fini(item, zone->uz_size);
2002 return (NULL);
2003 }
2004 return (item);
2005 }
2006 /* This is unfortunate but should not be fatal. */
2007 }
2008#endif
1982 /*
1983 * If possible, allocate from the per-CPU cache. There are two
1984 * requirements for safe access to the per-CPU cache: (1) the thread
1985 * accessing the cache must not be preempted or yield during access,
1986 * and (2) the thread must not migrate CPUs without switching which
1987 * cache it accesses. We rely on a critical section to prevent
1988 * preemption and migration. We release the critical section in
1989 * order to acquire the zone mutex if we are unable to allocate from
1990 * the current cache; when we re-acquire the critical section, we
1991 * must detect and handle migration if it has occurred.
1992 */
1993zalloc_restart:
1994 critical_enter();
1995 cpu = curcpu;
1996 cache = &zone->uz_cpu[cpu];
1997
1998zalloc_start:
1999 bucket = cache->uc_allocbucket;
2000
2001 if (bucket) {
2002 if (bucket->ub_cnt > 0) {
2003 bucket->ub_cnt--;
2004 item = bucket->ub_bucket[bucket->ub_cnt];
2005#ifdef INVARIANTS
2006 bucket->ub_bucket[bucket->ub_cnt] = NULL;
2007#endif
2008 KASSERT(item != NULL,
2009 ("uma_zalloc: Bucket pointer mangled."));
2010 cache->uc_allocs++;
2011 critical_exit();
2012#ifdef INVARIANTS
2013 ZONE_LOCK(zone);
2014 uma_dbg_alloc(zone, NULL, item);
2015 ZONE_UNLOCK(zone);
2016#endif
2017 if (zone->uz_ctor != NULL) {
2018 if (zone->uz_ctor(item, zone->uz_size,
2019 udata, flags) != 0) {
2020 zone_free_item(zone, item, udata,
2021 SKIP_DTOR, ZFREE_STATFAIL |
2022 ZFREE_STATFREE);
2023 return (NULL);
2024 }
2025 }
2026 if (flags & M_ZERO)
2027 bzero(item, zone->uz_size);
2028 return (item);
2029 } else if (cache->uc_freebucket) {
2030 /*
2031 * We have run out of items in our allocbucket.
2032 * See if we can switch with our free bucket.
2033 */
2034 if (cache->uc_freebucket->ub_cnt > 0) {
2035#ifdef UMA_DEBUG_ALLOC
2036 printf("uma_zalloc: Swapping empty with"
2037 " alloc.\n");
2038#endif
2039 bucket = cache->uc_freebucket;
2040 cache->uc_freebucket = cache->uc_allocbucket;
2041 cache->uc_allocbucket = bucket;
2042
2043 goto zalloc_start;
2044 }
2045 }
2046 }
2047 /*
2048 * Attempt to retrieve the item from the per-CPU cache has failed, so
2049 * we must go back to the zone. This requires the zone lock, so we
2050 * must drop the critical section, then re-acquire it when we go back
2051 * to the cache. Since the critical section is released, we may be
2052 * preempted or migrate. As such, make sure not to maintain any
2053 * thread-local state specific to the cache from prior to releasing
2054 * the critical section.
2055 */
2056 critical_exit();
2057 ZONE_LOCK(zone);
2058 critical_enter();
2059 cpu = curcpu;
2060 cache = &zone->uz_cpu[cpu];
2061 bucket = cache->uc_allocbucket;
2062 if (bucket != NULL) {
2063 if (bucket->ub_cnt > 0) {
2064 ZONE_UNLOCK(zone);
2065 goto zalloc_start;
2066 }
2067 bucket = cache->uc_freebucket;
2068 if (bucket != NULL && bucket->ub_cnt > 0) {
2069 ZONE_UNLOCK(zone);
2070 goto zalloc_start;
2071 }
2072 }
2073
2074 /* Since we have locked the zone we may as well send back our stats */
2075 zone->uz_allocs += cache->uc_allocs;
2076 cache->uc_allocs = 0;
2077 zone->uz_frees += cache->uc_frees;
2078 cache->uc_frees = 0;
2079
2080 /* Our old one is now a free bucket */
2081 if (cache->uc_allocbucket) {
2082 KASSERT(cache->uc_allocbucket->ub_cnt == 0,
2083 ("uma_zalloc_arg: Freeing a non free bucket."));
2084 LIST_INSERT_HEAD(&zone->uz_free_bucket,
2085 cache->uc_allocbucket, ub_link);
2086 cache->uc_allocbucket = NULL;
2087 }
2088
2089 /* Check the free list for a new alloc bucket */
2090 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
2091 KASSERT(bucket->ub_cnt != 0,
2092 ("uma_zalloc_arg: Returning an empty bucket."));
2093
2094 LIST_REMOVE(bucket, ub_link);
2095 cache->uc_allocbucket = bucket;
2096 ZONE_UNLOCK(zone);
2097 goto zalloc_start;
2098 }
2099 /* We are no longer associated with this CPU. */
2100 critical_exit();
2101
2102 /* Bump up our uz_count so we get here less */
2103 if (zone->uz_count < BUCKET_MAX)
2104 zone->uz_count++;
2105
2106 /*
2107 * Now lets just fill a bucket and put it on the free list. If that
2108 * works we'll restart the allocation from the begining.
2109 */
2110 if (zone_alloc_bucket(zone, flags)) {
2111 ZONE_UNLOCK(zone);
2112 goto zalloc_restart;
2113 }
2114 ZONE_UNLOCK(zone);
2115 /*
2116 * We may not be able to get a bucket so return an actual item.
2117 */
2118#ifdef UMA_DEBUG
2119 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2120#endif
2121
2122 item = zone_alloc_item(zone, udata, flags);
2123 return (item);
2124}
2125
2126static uma_slab_t
2127keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2128{
2129 uma_slab_t slab;
2130
2131 mtx_assert(&keg->uk_lock, MA_OWNED);
2132 slab = NULL;
2133
2134 for (;;) {
2135 /*
2136 * Find a slab with some space. Prefer slabs that are partially
2137 * used over those that are totally full. This helps to reduce
2138 * fragmentation.
2139 */
2140 if (keg->uk_free != 0) {
2141 if (!LIST_EMPTY(&keg->uk_part_slab)) {
2142 slab = LIST_FIRST(&keg->uk_part_slab);
2143 } else {
2144 slab = LIST_FIRST(&keg->uk_free_slab);
2145 LIST_REMOVE(slab, us_link);
2146 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2147 us_link);
2148 }
2149 MPASS(slab->us_keg == keg);
2150 return (slab);
2151 }
2152
2153 /*
2154 * M_NOVM means don't ask at all!
2155 */
2156 if (flags & M_NOVM)
2157 break;
2158
2159 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2160 keg->uk_flags |= UMA_ZFLAG_FULL;
2161 /*
2162 * If this is not a multi-zone, set the FULL bit.
2163 * Otherwise slab_multi() takes care of it.
2164 */
2165 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0)
2166 zone->uz_flags |= UMA_ZFLAG_FULL;
2167 if (flags & M_NOWAIT)
2168 break;
2169 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2170 continue;
2171 }
2172 keg->uk_recurse++;
2173 slab = keg_alloc_slab(keg, zone, flags);
2174 keg->uk_recurse--;
2175 /*
2176 * If we got a slab here it's safe to mark it partially used
2177 * and return. We assume that the caller is going to remove
2178 * at least one item.
2179 */
2180 if (slab) {
2181 MPASS(slab->us_keg == keg);
2182 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2183 return (slab);
2184 }
2185 /*
2186 * We might not have been able to get a slab but another cpu
2187 * could have while we were unlocked. Check again before we
2188 * fail.
2189 */
2190 flags |= M_NOVM;
2191 }
2192 return (slab);
2193}
2194
2195static inline void
2196zone_relock(uma_zone_t zone, uma_keg_t keg)
2197{
2198 if (zone->uz_lock != &keg->uk_lock) {
2199 KEG_UNLOCK(keg);
2200 ZONE_LOCK(zone);
2201 }
2202}
2203
2204static inline void
2205keg_relock(uma_keg_t keg, uma_zone_t zone)
2206{
2207 if (zone->uz_lock != &keg->uk_lock) {
2208 ZONE_UNLOCK(zone);
2209 KEG_LOCK(keg);
2210 }
2211}
2212
2213static uma_slab_t
2214zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2215{
2216 uma_slab_t slab;
2217
2218 if (keg == NULL)
2219 keg = zone_first_keg(zone);
2220 /*
2221 * This is to prevent us from recursively trying to allocate
2222 * buckets. The problem is that if an allocation forces us to
2223 * grab a new bucket we will call page_alloc, which will go off
2224 * and cause the vm to allocate vm_map_entries. If we need new
2225 * buckets there too we will recurse in kmem_alloc and bad
2226 * things happen. So instead we return a NULL bucket, and make
2227 * the code that allocates buckets smart enough to deal with it
2228 */
2229 if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0)
2230 return (NULL);
2231
2232 for (;;) {
2233 slab = keg_fetch_slab(keg, zone, flags);
2234 if (slab)
2235 return (slab);
2236 if (flags & (M_NOWAIT | M_NOVM))
2237 break;
2238 }
2239 return (NULL);
2240}
2241
2242/*
2243 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2244 * with the keg locked. Caller must call zone_relock() afterwards if the
2245 * zone lock is required. On NULL the zone lock is held.
2246 *
2247 * The last pointer is used to seed the search. It is not required.
2248 */
2249static uma_slab_t
2250zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2251{
2252 uma_klink_t klink;
2253 uma_slab_t slab;
2254 uma_keg_t keg;
2255 int flags;
2256 int empty;
2257 int full;
2258
2259 /*
2260 * Don't wait on the first pass. This will skip limit tests
2261 * as well. We don't want to block if we can find a provider
2262 * without blocking.
2263 */
2264 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2265 /*
2266 * Use the last slab allocated as a hint for where to start
2267 * the search.
2268 */
2269 if (last) {
2270 slab = keg_fetch_slab(last, zone, flags);
2271 if (slab)
2272 return (slab);
2273 zone_relock(zone, last);
2274 last = NULL;
2275 }
2276 /*
2277 * Loop until we have a slab incase of transient failures
2278 * while M_WAITOK is specified. I'm not sure this is 100%
2279 * required but we've done it for so long now.
2280 */
2281 for (;;) {
2282 empty = 0;
2283 full = 0;
2284 /*
2285 * Search the available kegs for slabs. Be careful to hold the
2286 * correct lock while calling into the keg layer.
2287 */
2288 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2289 keg = klink->kl_keg;
2290 keg_relock(keg, zone);
2291 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2292 slab = keg_fetch_slab(keg, zone, flags);
2293 if (slab)
2294 return (slab);
2295 }
2296 if (keg->uk_flags & UMA_ZFLAG_FULL)
2297 full++;
2298 else
2299 empty++;
2300 zone_relock(zone, keg);
2301 }
2302 if (rflags & (M_NOWAIT | M_NOVM))
2303 break;
2304 flags = rflags;
2305 /*
2306 * All kegs are full. XXX We can't atomically check all kegs
2307 * and sleep so just sleep for a short period and retry.
2308 */
2309 if (full && !empty) {
2310 zone->uz_flags |= UMA_ZFLAG_FULL;
2311 zone->uz_sleeps++;
2312 msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
2313 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2314 continue;
2315 }
2316 }
2317 return (NULL);
2318}
2319
2320static void *
2321slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
2322{
2323 uma_keg_t keg;
2324 uma_slabrefcnt_t slabref;
2325 void *item;
2326 u_int8_t freei;
2327
2328 keg = slab->us_keg;
2329 mtx_assert(&keg->uk_lock, MA_OWNED);
2330
2331 freei = slab->us_firstfree;
2332 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2333 slabref = (uma_slabrefcnt_t)slab;
2334 slab->us_firstfree = slabref->us_freelist[freei].us_item;
2335 } else {
2336 slab->us_firstfree = slab->us_freelist[freei].us_item;
2337 }
2338 item = slab->us_data + (keg->uk_rsize * freei);
2339
2340 slab->us_freecount--;
2341 keg->uk_free--;
2342#ifdef INVARIANTS
2343 uma_dbg_alloc(zone, slab, item);
2344#endif
2345 /* Move this slab to the full list */
2346 if (slab->us_freecount == 0) {
2347 LIST_REMOVE(slab, us_link);
2348 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2349 }
2350
2351 return (item);
2352}
2353
2354static int
2355zone_alloc_bucket(uma_zone_t zone, int flags)
2356{
2357 uma_bucket_t bucket;
2358 uma_slab_t slab;
2359 uma_keg_t keg;
2360 int16_t saved;
2361 int max, origflags = flags;
2362
2363 /*
2364 * Try this zone's free list first so we don't allocate extra buckets.
2365 */
2366 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2367 KASSERT(bucket->ub_cnt == 0,
2368 ("zone_alloc_bucket: Bucket on free list is not empty."));
2369 LIST_REMOVE(bucket, ub_link);
2370 } else {
2371 int bflags;
2372
2373 bflags = (flags & ~M_ZERO);
2374 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2375 bflags |= M_NOVM;
2376
2377 ZONE_UNLOCK(zone);
2378 bucket = bucket_alloc(zone->uz_count, bflags);
2379 ZONE_LOCK(zone);
2380 }
2381
2382 if (bucket == NULL) {
2383 return (0);
2384 }
2385
2386#ifdef SMP
2387 /*
2388 * This code is here to limit the number of simultaneous bucket fills
2389 * for any given zone to the number of per cpu caches in this zone. This
2390 * is done so that we don't allocate more memory than we really need.
2391 */
2392 if (zone->uz_fills >= mp_ncpus)
2393 goto done;
2394
2395#endif
2396 zone->uz_fills++;
2397
2398 max = MIN(bucket->ub_entries, zone->uz_count);
2399 /* Try to keep the buckets totally full */
2400 saved = bucket->ub_cnt;
2401 slab = NULL;
2402 keg = NULL;
2403 while (bucket->ub_cnt < max &&
2404 (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
2405 keg = slab->us_keg;
2406 while (slab->us_freecount && bucket->ub_cnt < max) {
2407 bucket->ub_bucket[bucket->ub_cnt++] =
2408 slab_alloc_item(zone, slab);
2409 }
2410
2411 /* Don't block on the next fill */
2412 flags |= M_NOWAIT;
2413 }
2414 if (slab)
2415 zone_relock(zone, keg);
2416
2417 /*
2418 * We unlock here because we need to call the zone's init.
2419 * It should be safe to unlock because the slab dealt with
2420 * above is already on the appropriate list within the keg
2421 * and the bucket we filled is not yet on any list, so we
2422 * own it.
2423 */
2424 if (zone->uz_init != NULL) {
2425 int i;
2426
2427 ZONE_UNLOCK(zone);
2428 for (i = saved; i < bucket->ub_cnt; i++)
2429 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2430 origflags) != 0)
2431 break;
2432 /*
2433 * If we couldn't initialize the whole bucket, put the
2434 * rest back onto the freelist.
2435 */
2436 if (i != bucket->ub_cnt) {
2437 int j;
2438
2439 for (j = i; j < bucket->ub_cnt; j++) {
2440 zone_free_item(zone, bucket->ub_bucket[j],
2441 NULL, SKIP_FINI, 0);
2442#ifdef INVARIANTS
2443 bucket->ub_bucket[j] = NULL;
2444#endif
2445 }
2446 bucket->ub_cnt = i;
2447 }
2448 ZONE_LOCK(zone);
2449 }
2450
2451 zone->uz_fills--;
2452 if (bucket->ub_cnt != 0) {
2453 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2454 bucket, ub_link);
2455 return (1);
2456 }
2457#ifdef SMP
2458done:
2459#endif
2460 bucket_free(bucket);
2461
2462 return (0);
2463}
2464/*
2465 * Allocates an item for an internal zone
2466 *
2467 * Arguments
2468 * zone The zone to alloc for.
2469 * udata The data to be passed to the constructor.
2470 * flags M_WAITOK, M_NOWAIT, M_ZERO.
2471 *
2472 * Returns
2473 * NULL if there is no memory and M_NOWAIT is set
2474 * An item if successful
2475 */
2476
2477static void *
2478zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2479{
2480 uma_slab_t slab;
2481 void *item;
2482
2483 item = NULL;
2484
2485#ifdef UMA_DEBUG_ALLOC
2486 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2487#endif
2488 ZONE_LOCK(zone);
2489
2490 slab = zone->uz_slab(zone, NULL, flags);
2491 if (slab == NULL) {
2492 zone->uz_fails++;
2493 ZONE_UNLOCK(zone);
2494 return (NULL);
2495 }
2496
2497 item = slab_alloc_item(zone, slab);
2498
2499 zone_relock(zone, slab->us_keg);
2500 zone->uz_allocs++;
2501 ZONE_UNLOCK(zone);
2502
2503 /*
2504 * We have to call both the zone's init (not the keg's init)
2505 * and the zone's ctor. This is because the item is going from
2506 * a keg slab directly to the user, and the user is expecting it
2507 * to be both zone-init'd as well as zone-ctor'd.
2508 */
2509 if (zone->uz_init != NULL) {
2510 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2511 zone_free_item(zone, item, udata, SKIP_FINI,
2512 ZFREE_STATFAIL | ZFREE_STATFREE);
2513 return (NULL);
2514 }
2515 }
2516 if (zone->uz_ctor != NULL) {
2517 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2518 zone_free_item(zone, item, udata, SKIP_DTOR,
2519 ZFREE_STATFAIL | ZFREE_STATFREE);
2520 return (NULL);
2521 }
2522 }
2523 if (flags & M_ZERO)
2524 bzero(item, zone->uz_size);
2525
2526 return (item);
2527}
2528
2529/* See uma.h */
2530void
2531uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2532{
2533 uma_cache_t cache;
2534 uma_bucket_t bucket;
2535 int bflags;
2536 int cpu;
2537
2538#ifdef UMA_DEBUG_ALLOC_1
2539 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2540#endif
2541 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2542 zone->uz_name);
2543
2544 /* uma_zfree(..., NULL) does nothing, to match free(9). */
2545 if (item == NULL)
2546 return;
2009 /*
2010 * If possible, allocate from the per-CPU cache. There are two
2011 * requirements for safe access to the per-CPU cache: (1) the thread
2012 * accessing the cache must not be preempted or yield during access,
2013 * and (2) the thread must not migrate CPUs without switching which
2014 * cache it accesses. We rely on a critical section to prevent
2015 * preemption and migration. We release the critical section in
2016 * order to acquire the zone mutex if we are unable to allocate from
2017 * the current cache; when we re-acquire the critical section, we
2018 * must detect and handle migration if it has occurred.
2019 */
2020zalloc_restart:
2021 critical_enter();
2022 cpu = curcpu;
2023 cache = &zone->uz_cpu[cpu];
2024
2025zalloc_start:
2026 bucket = cache->uc_allocbucket;
2027
2028 if (bucket) {
2029 if (bucket->ub_cnt > 0) {
2030 bucket->ub_cnt--;
2031 item = bucket->ub_bucket[bucket->ub_cnt];
2032#ifdef INVARIANTS
2033 bucket->ub_bucket[bucket->ub_cnt] = NULL;
2034#endif
2035 KASSERT(item != NULL,
2036 ("uma_zalloc: Bucket pointer mangled."));
2037 cache->uc_allocs++;
2038 critical_exit();
2039#ifdef INVARIANTS
2040 ZONE_LOCK(zone);
2041 uma_dbg_alloc(zone, NULL, item);
2042 ZONE_UNLOCK(zone);
2043#endif
2044 if (zone->uz_ctor != NULL) {
2045 if (zone->uz_ctor(item, zone->uz_size,
2046 udata, flags) != 0) {
2047 zone_free_item(zone, item, udata,
2048 SKIP_DTOR, ZFREE_STATFAIL |
2049 ZFREE_STATFREE);
2050 return (NULL);
2051 }
2052 }
2053 if (flags & M_ZERO)
2054 bzero(item, zone->uz_size);
2055 return (item);
2056 } else if (cache->uc_freebucket) {
2057 /*
2058 * We have run out of items in our allocbucket.
2059 * See if we can switch with our free bucket.
2060 */
2061 if (cache->uc_freebucket->ub_cnt > 0) {
2062#ifdef UMA_DEBUG_ALLOC
2063 printf("uma_zalloc: Swapping empty with"
2064 " alloc.\n");
2065#endif
2066 bucket = cache->uc_freebucket;
2067 cache->uc_freebucket = cache->uc_allocbucket;
2068 cache->uc_allocbucket = bucket;
2069
2070 goto zalloc_start;
2071 }
2072 }
2073 }
2074 /*
2075 * Attempt to retrieve the item from the per-CPU cache has failed, so
2076 * we must go back to the zone. This requires the zone lock, so we
2077 * must drop the critical section, then re-acquire it when we go back
2078 * to the cache. Since the critical section is released, we may be
2079 * preempted or migrate. As such, make sure not to maintain any
2080 * thread-local state specific to the cache from prior to releasing
2081 * the critical section.
2082 */
2083 critical_exit();
2084 ZONE_LOCK(zone);
2085 critical_enter();
2086 cpu = curcpu;
2087 cache = &zone->uz_cpu[cpu];
2088 bucket = cache->uc_allocbucket;
2089 if (bucket != NULL) {
2090 if (bucket->ub_cnt > 0) {
2091 ZONE_UNLOCK(zone);
2092 goto zalloc_start;
2093 }
2094 bucket = cache->uc_freebucket;
2095 if (bucket != NULL && bucket->ub_cnt > 0) {
2096 ZONE_UNLOCK(zone);
2097 goto zalloc_start;
2098 }
2099 }
2100
2101 /* Since we have locked the zone we may as well send back our stats */
2102 zone->uz_allocs += cache->uc_allocs;
2103 cache->uc_allocs = 0;
2104 zone->uz_frees += cache->uc_frees;
2105 cache->uc_frees = 0;
2106
2107 /* Our old one is now a free bucket */
2108 if (cache->uc_allocbucket) {
2109 KASSERT(cache->uc_allocbucket->ub_cnt == 0,
2110 ("uma_zalloc_arg: Freeing a non free bucket."));
2111 LIST_INSERT_HEAD(&zone->uz_free_bucket,
2112 cache->uc_allocbucket, ub_link);
2113 cache->uc_allocbucket = NULL;
2114 }
2115
2116 /* Check the free list for a new alloc bucket */
2117 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
2118 KASSERT(bucket->ub_cnt != 0,
2119 ("uma_zalloc_arg: Returning an empty bucket."));
2120
2121 LIST_REMOVE(bucket, ub_link);
2122 cache->uc_allocbucket = bucket;
2123 ZONE_UNLOCK(zone);
2124 goto zalloc_start;
2125 }
2126 /* We are no longer associated with this CPU. */
2127 critical_exit();
2128
2129 /* Bump up our uz_count so we get here less */
2130 if (zone->uz_count < BUCKET_MAX)
2131 zone->uz_count++;
2132
2133 /*
2134 * Now lets just fill a bucket and put it on the free list. If that
2135 * works we'll restart the allocation from the begining.
2136 */
2137 if (zone_alloc_bucket(zone, flags)) {
2138 ZONE_UNLOCK(zone);
2139 goto zalloc_restart;
2140 }
2141 ZONE_UNLOCK(zone);
2142 /*
2143 * We may not be able to get a bucket so return an actual item.
2144 */
2145#ifdef UMA_DEBUG
2146 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2147#endif
2148
2149 item = zone_alloc_item(zone, udata, flags);
2150 return (item);
2151}
2152
2153static uma_slab_t
2154keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2155{
2156 uma_slab_t slab;
2157
2158 mtx_assert(&keg->uk_lock, MA_OWNED);
2159 slab = NULL;
2160
2161 for (;;) {
2162 /*
2163 * Find a slab with some space. Prefer slabs that are partially
2164 * used over those that are totally full. This helps to reduce
2165 * fragmentation.
2166 */
2167 if (keg->uk_free != 0) {
2168 if (!LIST_EMPTY(&keg->uk_part_slab)) {
2169 slab = LIST_FIRST(&keg->uk_part_slab);
2170 } else {
2171 slab = LIST_FIRST(&keg->uk_free_slab);
2172 LIST_REMOVE(slab, us_link);
2173 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2174 us_link);
2175 }
2176 MPASS(slab->us_keg == keg);
2177 return (slab);
2178 }
2179
2180 /*
2181 * M_NOVM means don't ask at all!
2182 */
2183 if (flags & M_NOVM)
2184 break;
2185
2186 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2187 keg->uk_flags |= UMA_ZFLAG_FULL;
2188 /*
2189 * If this is not a multi-zone, set the FULL bit.
2190 * Otherwise slab_multi() takes care of it.
2191 */
2192 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0)
2193 zone->uz_flags |= UMA_ZFLAG_FULL;
2194 if (flags & M_NOWAIT)
2195 break;
2196 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2197 continue;
2198 }
2199 keg->uk_recurse++;
2200 slab = keg_alloc_slab(keg, zone, flags);
2201 keg->uk_recurse--;
2202 /*
2203 * If we got a slab here it's safe to mark it partially used
2204 * and return. We assume that the caller is going to remove
2205 * at least one item.
2206 */
2207 if (slab) {
2208 MPASS(slab->us_keg == keg);
2209 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2210 return (slab);
2211 }
2212 /*
2213 * We might not have been able to get a slab but another cpu
2214 * could have while we were unlocked. Check again before we
2215 * fail.
2216 */
2217 flags |= M_NOVM;
2218 }
2219 return (slab);
2220}
2221
2222static inline void
2223zone_relock(uma_zone_t zone, uma_keg_t keg)
2224{
2225 if (zone->uz_lock != &keg->uk_lock) {
2226 KEG_UNLOCK(keg);
2227 ZONE_LOCK(zone);
2228 }
2229}
2230
2231static inline void
2232keg_relock(uma_keg_t keg, uma_zone_t zone)
2233{
2234 if (zone->uz_lock != &keg->uk_lock) {
2235 ZONE_UNLOCK(zone);
2236 KEG_LOCK(keg);
2237 }
2238}
2239
2240static uma_slab_t
2241zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2242{
2243 uma_slab_t slab;
2244
2245 if (keg == NULL)
2246 keg = zone_first_keg(zone);
2247 /*
2248 * This is to prevent us from recursively trying to allocate
2249 * buckets. The problem is that if an allocation forces us to
2250 * grab a new bucket we will call page_alloc, which will go off
2251 * and cause the vm to allocate vm_map_entries. If we need new
2252 * buckets there too we will recurse in kmem_alloc and bad
2253 * things happen. So instead we return a NULL bucket, and make
2254 * the code that allocates buckets smart enough to deal with it
2255 */
2256 if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0)
2257 return (NULL);
2258
2259 for (;;) {
2260 slab = keg_fetch_slab(keg, zone, flags);
2261 if (slab)
2262 return (slab);
2263 if (flags & (M_NOWAIT | M_NOVM))
2264 break;
2265 }
2266 return (NULL);
2267}
2268
2269/*
2270 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2271 * with the keg locked. Caller must call zone_relock() afterwards if the
2272 * zone lock is required. On NULL the zone lock is held.
2273 *
2274 * The last pointer is used to seed the search. It is not required.
2275 */
2276static uma_slab_t
2277zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2278{
2279 uma_klink_t klink;
2280 uma_slab_t slab;
2281 uma_keg_t keg;
2282 int flags;
2283 int empty;
2284 int full;
2285
2286 /*
2287 * Don't wait on the first pass. This will skip limit tests
2288 * as well. We don't want to block if we can find a provider
2289 * without blocking.
2290 */
2291 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2292 /*
2293 * Use the last slab allocated as a hint for where to start
2294 * the search.
2295 */
2296 if (last) {
2297 slab = keg_fetch_slab(last, zone, flags);
2298 if (slab)
2299 return (slab);
2300 zone_relock(zone, last);
2301 last = NULL;
2302 }
2303 /*
2304 * Loop until we have a slab incase of transient failures
2305 * while M_WAITOK is specified. I'm not sure this is 100%
2306 * required but we've done it for so long now.
2307 */
2308 for (;;) {
2309 empty = 0;
2310 full = 0;
2311 /*
2312 * Search the available kegs for slabs. Be careful to hold the
2313 * correct lock while calling into the keg layer.
2314 */
2315 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2316 keg = klink->kl_keg;
2317 keg_relock(keg, zone);
2318 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2319 slab = keg_fetch_slab(keg, zone, flags);
2320 if (slab)
2321 return (slab);
2322 }
2323 if (keg->uk_flags & UMA_ZFLAG_FULL)
2324 full++;
2325 else
2326 empty++;
2327 zone_relock(zone, keg);
2328 }
2329 if (rflags & (M_NOWAIT | M_NOVM))
2330 break;
2331 flags = rflags;
2332 /*
2333 * All kegs are full. XXX We can't atomically check all kegs
2334 * and sleep so just sleep for a short period and retry.
2335 */
2336 if (full && !empty) {
2337 zone->uz_flags |= UMA_ZFLAG_FULL;
2338 zone->uz_sleeps++;
2339 msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
2340 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2341 continue;
2342 }
2343 }
2344 return (NULL);
2345}
2346
2347static void *
2348slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
2349{
2350 uma_keg_t keg;
2351 uma_slabrefcnt_t slabref;
2352 void *item;
2353 u_int8_t freei;
2354
2355 keg = slab->us_keg;
2356 mtx_assert(&keg->uk_lock, MA_OWNED);
2357
2358 freei = slab->us_firstfree;
2359 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2360 slabref = (uma_slabrefcnt_t)slab;
2361 slab->us_firstfree = slabref->us_freelist[freei].us_item;
2362 } else {
2363 slab->us_firstfree = slab->us_freelist[freei].us_item;
2364 }
2365 item = slab->us_data + (keg->uk_rsize * freei);
2366
2367 slab->us_freecount--;
2368 keg->uk_free--;
2369#ifdef INVARIANTS
2370 uma_dbg_alloc(zone, slab, item);
2371#endif
2372 /* Move this slab to the full list */
2373 if (slab->us_freecount == 0) {
2374 LIST_REMOVE(slab, us_link);
2375 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2376 }
2377
2378 return (item);
2379}
2380
2381static int
2382zone_alloc_bucket(uma_zone_t zone, int flags)
2383{
2384 uma_bucket_t bucket;
2385 uma_slab_t slab;
2386 uma_keg_t keg;
2387 int16_t saved;
2388 int max, origflags = flags;
2389
2390 /*
2391 * Try this zone's free list first so we don't allocate extra buckets.
2392 */
2393 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2394 KASSERT(bucket->ub_cnt == 0,
2395 ("zone_alloc_bucket: Bucket on free list is not empty."));
2396 LIST_REMOVE(bucket, ub_link);
2397 } else {
2398 int bflags;
2399
2400 bflags = (flags & ~M_ZERO);
2401 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2402 bflags |= M_NOVM;
2403
2404 ZONE_UNLOCK(zone);
2405 bucket = bucket_alloc(zone->uz_count, bflags);
2406 ZONE_LOCK(zone);
2407 }
2408
2409 if (bucket == NULL) {
2410 return (0);
2411 }
2412
2413#ifdef SMP
2414 /*
2415 * This code is here to limit the number of simultaneous bucket fills
2416 * for any given zone to the number of per cpu caches in this zone. This
2417 * is done so that we don't allocate more memory than we really need.
2418 */
2419 if (zone->uz_fills >= mp_ncpus)
2420 goto done;
2421
2422#endif
2423 zone->uz_fills++;
2424
2425 max = MIN(bucket->ub_entries, zone->uz_count);
2426 /* Try to keep the buckets totally full */
2427 saved = bucket->ub_cnt;
2428 slab = NULL;
2429 keg = NULL;
2430 while (bucket->ub_cnt < max &&
2431 (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
2432 keg = slab->us_keg;
2433 while (slab->us_freecount && bucket->ub_cnt < max) {
2434 bucket->ub_bucket[bucket->ub_cnt++] =
2435 slab_alloc_item(zone, slab);
2436 }
2437
2438 /* Don't block on the next fill */
2439 flags |= M_NOWAIT;
2440 }
2441 if (slab)
2442 zone_relock(zone, keg);
2443
2444 /*
2445 * We unlock here because we need to call the zone's init.
2446 * It should be safe to unlock because the slab dealt with
2447 * above is already on the appropriate list within the keg
2448 * and the bucket we filled is not yet on any list, so we
2449 * own it.
2450 */
2451 if (zone->uz_init != NULL) {
2452 int i;
2453
2454 ZONE_UNLOCK(zone);
2455 for (i = saved; i < bucket->ub_cnt; i++)
2456 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2457 origflags) != 0)
2458 break;
2459 /*
2460 * If we couldn't initialize the whole bucket, put the
2461 * rest back onto the freelist.
2462 */
2463 if (i != bucket->ub_cnt) {
2464 int j;
2465
2466 for (j = i; j < bucket->ub_cnt; j++) {
2467 zone_free_item(zone, bucket->ub_bucket[j],
2468 NULL, SKIP_FINI, 0);
2469#ifdef INVARIANTS
2470 bucket->ub_bucket[j] = NULL;
2471#endif
2472 }
2473 bucket->ub_cnt = i;
2474 }
2475 ZONE_LOCK(zone);
2476 }
2477
2478 zone->uz_fills--;
2479 if (bucket->ub_cnt != 0) {
2480 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2481 bucket, ub_link);
2482 return (1);
2483 }
2484#ifdef SMP
2485done:
2486#endif
2487 bucket_free(bucket);
2488
2489 return (0);
2490}
2491/*
2492 * Allocates an item for an internal zone
2493 *
2494 * Arguments
2495 * zone The zone to alloc for.
2496 * udata The data to be passed to the constructor.
2497 * flags M_WAITOK, M_NOWAIT, M_ZERO.
2498 *
2499 * Returns
2500 * NULL if there is no memory and M_NOWAIT is set
2501 * An item if successful
2502 */
2503
2504static void *
2505zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2506{
2507 uma_slab_t slab;
2508 void *item;
2509
2510 item = NULL;
2511
2512#ifdef UMA_DEBUG_ALLOC
2513 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2514#endif
2515 ZONE_LOCK(zone);
2516
2517 slab = zone->uz_slab(zone, NULL, flags);
2518 if (slab == NULL) {
2519 zone->uz_fails++;
2520 ZONE_UNLOCK(zone);
2521 return (NULL);
2522 }
2523
2524 item = slab_alloc_item(zone, slab);
2525
2526 zone_relock(zone, slab->us_keg);
2527 zone->uz_allocs++;
2528 ZONE_UNLOCK(zone);
2529
2530 /*
2531 * We have to call both the zone's init (not the keg's init)
2532 * and the zone's ctor. This is because the item is going from
2533 * a keg slab directly to the user, and the user is expecting it
2534 * to be both zone-init'd as well as zone-ctor'd.
2535 */
2536 if (zone->uz_init != NULL) {
2537 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2538 zone_free_item(zone, item, udata, SKIP_FINI,
2539 ZFREE_STATFAIL | ZFREE_STATFREE);
2540 return (NULL);
2541 }
2542 }
2543 if (zone->uz_ctor != NULL) {
2544 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2545 zone_free_item(zone, item, udata, SKIP_DTOR,
2546 ZFREE_STATFAIL | ZFREE_STATFREE);
2547 return (NULL);
2548 }
2549 }
2550 if (flags & M_ZERO)
2551 bzero(item, zone->uz_size);
2552
2553 return (item);
2554}
2555
2556/* See uma.h */
2557void
2558uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2559{
2560 uma_cache_t cache;
2561 uma_bucket_t bucket;
2562 int bflags;
2563 int cpu;
2564
2565#ifdef UMA_DEBUG_ALLOC_1
2566 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2567#endif
2568 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2569 zone->uz_name);
2570
2571 /* uma_zfree(..., NULL) does nothing, to match free(9). */
2572 if (item == NULL)
2573 return;
2547
2574#ifdef DEBUG_MEMGUARD
2575 if (is_memguard_addr(item)) {
2576 if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
2577 zone->uz_dtor(item, zone->uz_size, udata);
2578 if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
2579 zone->uz_fini(item, zone->uz_size);
2580 memguard_free(item);
2581 return;
2582 }
2583#endif
2548 if (zone->uz_dtor)
2549 zone->uz_dtor(item, zone->uz_size, udata);
2550
2551#ifdef INVARIANTS
2552 ZONE_LOCK(zone);
2553 if (zone->uz_flags & UMA_ZONE_MALLOC)
2554 uma_dbg_free(zone, udata, item);
2555 else
2556 uma_dbg_free(zone, NULL, item);
2557 ZONE_UNLOCK(zone);
2558#endif
2559 /*
2560 * The race here is acceptable. If we miss it we'll just have to wait
2561 * a little longer for the limits to be reset.
2562 */
2563 if (zone->uz_flags & UMA_ZFLAG_FULL)
2564 goto zfree_internal;
2565
2566 /*
2567 * If possible, free to the per-CPU cache. There are two
2568 * requirements for safe access to the per-CPU cache: (1) the thread
2569 * accessing the cache must not be preempted or yield during access,
2570 * and (2) the thread must not migrate CPUs without switching which
2571 * cache it accesses. We rely on a critical section to prevent
2572 * preemption and migration. We release the critical section in
2573 * order to acquire the zone mutex if we are unable to free to the
2574 * current cache; when we re-acquire the critical section, we must
2575 * detect and handle migration if it has occurred.
2576 */
2577zfree_restart:
2578 critical_enter();
2579 cpu = curcpu;
2580 cache = &zone->uz_cpu[cpu];
2581
2582zfree_start:
2583 bucket = cache->uc_freebucket;
2584
2585 if (bucket) {
2586 /*
2587 * Do we have room in our bucket? It is OK for this uz count
2588 * check to be slightly out of sync.
2589 */
2590
2591 if (bucket->ub_cnt < bucket->ub_entries) {
2592 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2593 ("uma_zfree: Freeing to non free bucket index."));
2594 bucket->ub_bucket[bucket->ub_cnt] = item;
2595 bucket->ub_cnt++;
2596 cache->uc_frees++;
2597 critical_exit();
2598 return;
2599 } else if (cache->uc_allocbucket) {
2600#ifdef UMA_DEBUG_ALLOC
2601 printf("uma_zfree: Swapping buckets.\n");
2602#endif
2603 /*
2604 * We have run out of space in our freebucket.
2605 * See if we can switch with our alloc bucket.
2606 */
2607 if (cache->uc_allocbucket->ub_cnt <
2608 cache->uc_freebucket->ub_cnt) {
2609 bucket = cache->uc_freebucket;
2610 cache->uc_freebucket = cache->uc_allocbucket;
2611 cache->uc_allocbucket = bucket;
2612 goto zfree_start;
2613 }
2614 }
2615 }
2616 /*
2617 * We can get here for two reasons:
2618 *
2619 * 1) The buckets are NULL
2620 * 2) The alloc and free buckets are both somewhat full.
2621 *
2622 * We must go back the zone, which requires acquiring the zone lock,
2623 * which in turn means we must release and re-acquire the critical
2624 * section. Since the critical section is released, we may be
2625 * preempted or migrate. As such, make sure not to maintain any
2626 * thread-local state specific to the cache from prior to releasing
2627 * the critical section.
2628 */
2629 critical_exit();
2630 ZONE_LOCK(zone);
2631 critical_enter();
2632 cpu = curcpu;
2633 cache = &zone->uz_cpu[cpu];
2634 if (cache->uc_freebucket != NULL) {
2635 if (cache->uc_freebucket->ub_cnt <
2636 cache->uc_freebucket->ub_entries) {
2637 ZONE_UNLOCK(zone);
2638 goto zfree_start;
2639 }
2640 if (cache->uc_allocbucket != NULL &&
2641 (cache->uc_allocbucket->ub_cnt <
2642 cache->uc_freebucket->ub_cnt)) {
2643 ZONE_UNLOCK(zone);
2644 goto zfree_start;
2645 }
2646 }
2647
2648 /* Since we have locked the zone we may as well send back our stats */
2649 zone->uz_allocs += cache->uc_allocs;
2650 cache->uc_allocs = 0;
2651 zone->uz_frees += cache->uc_frees;
2652 cache->uc_frees = 0;
2653
2654 bucket = cache->uc_freebucket;
2655 cache->uc_freebucket = NULL;
2656
2657 /* Can we throw this on the zone full list? */
2658 if (bucket != NULL) {
2659#ifdef UMA_DEBUG_ALLOC
2660 printf("uma_zfree: Putting old bucket on the free list.\n");
2661#endif
2662 /* ub_cnt is pointing to the last free item */
2663 KASSERT(bucket->ub_cnt != 0,
2664 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2665 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2666 bucket, ub_link);
2667 }
2668 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2669 LIST_REMOVE(bucket, ub_link);
2670 ZONE_UNLOCK(zone);
2671 cache->uc_freebucket = bucket;
2672 goto zfree_start;
2673 }
2674 /* We are no longer associated with this CPU. */
2675 critical_exit();
2676
2677 /* And the zone.. */
2678 ZONE_UNLOCK(zone);
2679
2680#ifdef UMA_DEBUG_ALLOC
2681 printf("uma_zfree: Allocating new free bucket.\n");
2682#endif
2683 bflags = M_NOWAIT;
2684
2685 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2686 bflags |= M_NOVM;
2687 bucket = bucket_alloc(zone->uz_count, bflags);
2688 if (bucket) {
2689 ZONE_LOCK(zone);
2690 LIST_INSERT_HEAD(&zone->uz_free_bucket,
2691 bucket, ub_link);
2692 ZONE_UNLOCK(zone);
2693 goto zfree_restart;
2694 }
2695
2696 /*
2697 * If nothing else caught this, we'll just do an internal free.
2698 */
2699zfree_internal:
2700 zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
2701
2702 return;
2703}
2704
2705/*
2706 * Frees an item to an INTERNAL zone or allocates a free bucket
2707 *
2708 * Arguments:
2709 * zone The zone to free to
2710 * item The item we're freeing
2711 * udata User supplied data for the dtor
2712 * skip Skip dtors and finis
2713 */
2714static void
2715zone_free_item(uma_zone_t zone, void *item, void *udata,
2716 enum zfreeskip skip, int flags)
2717{
2718 uma_slab_t slab;
2719 uma_slabrefcnt_t slabref;
2720 uma_keg_t keg;
2721 u_int8_t *mem;
2722 u_int8_t freei;
2723 int clearfull;
2724
2725 if (skip < SKIP_DTOR && zone->uz_dtor)
2726 zone->uz_dtor(item, zone->uz_size, udata);
2727
2728 if (skip < SKIP_FINI && zone->uz_fini)
2729 zone->uz_fini(item, zone->uz_size);
2730
2731 ZONE_LOCK(zone);
2732
2733 if (flags & ZFREE_STATFAIL)
2734 zone->uz_fails++;
2735 if (flags & ZFREE_STATFREE)
2736 zone->uz_frees++;
2737
2738 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2739 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
2740 keg = zone_first_keg(zone); /* Must only be one. */
2741 if (zone->uz_flags & UMA_ZONE_HASH) {
2742 slab = hash_sfind(&keg->uk_hash, mem);
2743 } else {
2744 mem += keg->uk_pgoff;
2745 slab = (uma_slab_t)mem;
2746 }
2747 } else {
2748 /* This prevents redundant lookups via free(). */
2749 if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL)
2750 slab = (uma_slab_t)udata;
2751 else
2752 slab = vtoslab((vm_offset_t)item);
2753 keg = slab->us_keg;
2754 keg_relock(keg, zone);
2755 }
2756 MPASS(keg == slab->us_keg);
2757
2758 /* Do we need to remove from any lists? */
2759 if (slab->us_freecount+1 == keg->uk_ipers) {
2760 LIST_REMOVE(slab, us_link);
2761 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2762 } else if (slab->us_freecount == 0) {
2763 LIST_REMOVE(slab, us_link);
2764 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2765 }
2766
2767 /* Slab management stuff */
2768 freei = ((unsigned long)item - (unsigned long)slab->us_data)
2769 / keg->uk_rsize;
2770
2771#ifdef INVARIANTS
2772 if (!skip)
2773 uma_dbg_free(zone, slab, item);
2774#endif
2775
2776 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2777 slabref = (uma_slabrefcnt_t)slab;
2778 slabref->us_freelist[freei].us_item = slab->us_firstfree;
2779 } else {
2780 slab->us_freelist[freei].us_item = slab->us_firstfree;
2781 }
2782 slab->us_firstfree = freei;
2783 slab->us_freecount++;
2784
2785 /* Zone statistics */
2786 keg->uk_free++;
2787
2788 clearfull = 0;
2789 if (keg->uk_flags & UMA_ZFLAG_FULL) {
2790 if (keg->uk_pages < keg->uk_maxpages) {
2791 keg->uk_flags &= ~UMA_ZFLAG_FULL;
2792 clearfull = 1;
2793 }
2794
2795 /*
2796 * We can handle one more allocation. Since we're clearing ZFLAG_FULL,
2797 * wake up all procs blocked on pages. This should be uncommon, so
2798 * keeping this simple for now (rather than adding count of blocked
2799 * threads etc).
2800 */
2801 wakeup(keg);
2802 }
2803 if (clearfull) {
2804 zone_relock(zone, keg);
2805 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2806 wakeup(zone);
2807 ZONE_UNLOCK(zone);
2808 } else
2809 KEG_UNLOCK(keg);
2810}
2811
2812/* See uma.h */
2813int
2814uma_zone_set_max(uma_zone_t zone, int nitems)
2815{
2816 uma_keg_t keg;
2817
2818 ZONE_LOCK(zone);
2819 keg = zone_first_keg(zone);
2820 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2821 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2822 keg->uk_maxpages += keg->uk_ppera;
2823 nitems = keg->uk_maxpages * keg->uk_ipers;
2824 ZONE_UNLOCK(zone);
2825
2826 return (nitems);
2827}
2828
2829/* See uma.h */
2830int
2831uma_zone_get_max(uma_zone_t zone)
2832{
2833 int nitems;
2834 uma_keg_t keg;
2835
2836 ZONE_LOCK(zone);
2837 keg = zone_first_keg(zone);
2838 nitems = keg->uk_maxpages * keg->uk_ipers;
2839 ZONE_UNLOCK(zone);
2840
2841 return (nitems);
2842}
2843
2844/* See uma.h */
2845int
2846uma_zone_get_cur(uma_zone_t zone)
2847{
2848 int64_t nitems;
2849 u_int i;
2850
2851 ZONE_LOCK(zone);
2852 nitems = zone->uz_allocs - zone->uz_frees;
2853 CPU_FOREACH(i) {
2854 /*
2855 * See the comment in sysctl_vm_zone_stats() regarding the
2856 * safety of accessing the per-cpu caches. With the zone lock
2857 * held, it is safe, but can potentially result in stale data.
2858 */
2859 nitems += zone->uz_cpu[i].uc_allocs -
2860 zone->uz_cpu[i].uc_frees;
2861 }
2862 ZONE_UNLOCK(zone);
2863
2864 return (nitems < 0 ? 0 : nitems);
2865}
2866
2867/* See uma.h */
2868void
2869uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2870{
2871 uma_keg_t keg;
2872
2873 ZONE_LOCK(zone);
2874 keg = zone_first_keg(zone);
2875 KASSERT(keg->uk_pages == 0,
2876 ("uma_zone_set_init on non-empty keg"));
2877 keg->uk_init = uminit;
2878 ZONE_UNLOCK(zone);
2879}
2880
2881/* See uma.h */
2882void
2883uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2884{
2885 uma_keg_t keg;
2886
2887 ZONE_LOCK(zone);
2888 keg = zone_first_keg(zone);
2889 KASSERT(keg->uk_pages == 0,
2890 ("uma_zone_set_fini on non-empty keg"));
2891 keg->uk_fini = fini;
2892 ZONE_UNLOCK(zone);
2893}
2894
2895/* See uma.h */
2896void
2897uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2898{
2899 ZONE_LOCK(zone);
2900 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2901 ("uma_zone_set_zinit on non-empty keg"));
2902 zone->uz_init = zinit;
2903 ZONE_UNLOCK(zone);
2904}
2905
2906/* See uma.h */
2907void
2908uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2909{
2910 ZONE_LOCK(zone);
2911 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2912 ("uma_zone_set_zfini on non-empty keg"));
2913 zone->uz_fini = zfini;
2914 ZONE_UNLOCK(zone);
2915}
2916
2917/* See uma.h */
2918/* XXX uk_freef is not actually used with the zone locked */
2919void
2920uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2921{
2922
2923 ZONE_LOCK(zone);
2924 zone_first_keg(zone)->uk_freef = freef;
2925 ZONE_UNLOCK(zone);
2926}
2927
2928/* See uma.h */
2929/* XXX uk_allocf is not actually used with the zone locked */
2930void
2931uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2932{
2933 uma_keg_t keg;
2934
2935 ZONE_LOCK(zone);
2936 keg = zone_first_keg(zone);
2937 keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2938 keg->uk_allocf = allocf;
2939 ZONE_UNLOCK(zone);
2940}
2941
2942/* See uma.h */
2943int
2944uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
2945{
2946 uma_keg_t keg;
2947 vm_offset_t kva;
2948 int pages;
2949
2950 keg = zone_first_keg(zone);
2951 pages = count / keg->uk_ipers;
2952
2953 if (pages * keg->uk_ipers < count)
2954 pages++;
2955
2956 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2957
2958 if (kva == 0)
2959 return (0);
2960 if (obj == NULL)
2961 obj = vm_object_allocate(OBJT_PHYS, pages);
2962 else {
2963 VM_OBJECT_LOCK_INIT(obj, "uma object");
2964 _vm_object_allocate(OBJT_PHYS, pages, obj);
2965 }
2966 ZONE_LOCK(zone);
2967 keg->uk_kva = kva;
2968 keg->uk_obj = obj;
2969 keg->uk_maxpages = pages;
2970 keg->uk_allocf = obj_alloc;
2971 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2972 ZONE_UNLOCK(zone);
2973 return (1);
2974}
2975
2976/* See uma.h */
2977void
2978uma_prealloc(uma_zone_t zone, int items)
2979{
2980 int slabs;
2981 uma_slab_t slab;
2982 uma_keg_t keg;
2983
2984 keg = zone_first_keg(zone);
2985 ZONE_LOCK(zone);
2986 slabs = items / keg->uk_ipers;
2987 if (slabs * keg->uk_ipers < items)
2988 slabs++;
2989 while (slabs > 0) {
2990 slab = keg_alloc_slab(keg, zone, M_WAITOK);
2991 if (slab == NULL)
2992 break;
2993 MPASS(slab->us_keg == keg);
2994 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2995 slabs--;
2996 }
2997 ZONE_UNLOCK(zone);
2998}
2999
3000/* See uma.h */
3001u_int32_t *
3002uma_find_refcnt(uma_zone_t zone, void *item)
3003{
3004 uma_slabrefcnt_t slabref;
3005 uma_keg_t keg;
3006 u_int32_t *refcnt;
3007 int idx;
3008
3009 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
3010 (~UMA_SLAB_MASK));
3011 keg = slabref->us_keg;
3012 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
3013 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
3014 idx = ((unsigned long)item - (unsigned long)slabref->us_data)
3015 / keg->uk_rsize;
3016 refcnt = &slabref->us_freelist[idx].us_refcnt;
3017 return refcnt;
3018}
3019
3020/* See uma.h */
3021void
3022uma_reclaim(void)
3023{
3024#ifdef UMA_DEBUG
3025 printf("UMA: vm asked us to release pages!\n");
3026#endif
3027 bucket_enable();
3028 zone_foreach(zone_drain);
3029 /*
3030 * Some slabs may have been freed but this zone will be visited early
3031 * we visit again so that we can free pages that are empty once other
3032 * zones are drained. We have to do the same for buckets.
3033 */
3034 zone_drain(slabzone);
3035 zone_drain(slabrefzone);
3036 bucket_zone_drain();
3037}
3038
3039/* See uma.h */
3040int
3041uma_zone_exhausted(uma_zone_t zone)
3042{
3043 int full;
3044
3045 ZONE_LOCK(zone);
3046 full = (zone->uz_flags & UMA_ZFLAG_FULL);
3047 ZONE_UNLOCK(zone);
3048 return (full);
3049}
3050
3051int
3052uma_zone_exhausted_nolock(uma_zone_t zone)
3053{
3054 return (zone->uz_flags & UMA_ZFLAG_FULL);
3055}
3056
3057void *
3058uma_large_malloc(int size, int wait)
3059{
3060 void *mem;
3061 uma_slab_t slab;
3062 u_int8_t flags;
3063
3064 slab = zone_alloc_item(slabzone, NULL, wait);
3065 if (slab == NULL)
3066 return (NULL);
3067 mem = page_alloc(NULL, size, &flags, wait);
3068 if (mem) {
3069 vsetslab((vm_offset_t)mem, slab);
3070 slab->us_data = mem;
3071 slab->us_flags = flags | UMA_SLAB_MALLOC;
3072 slab->us_size = size;
3073 } else {
3074 zone_free_item(slabzone, slab, NULL, SKIP_NONE,
3075 ZFREE_STATFAIL | ZFREE_STATFREE);
3076 }
3077
3078 return (mem);
3079}
3080
3081void
3082uma_large_free(uma_slab_t slab)
3083{
3084 vsetobj((vm_offset_t)slab->us_data, kmem_object);
3085 page_free(slab->us_data, slab->us_size, slab->us_flags);
3086 zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
3087}
3088
3089void
3090uma_print_stats(void)
3091{
3092 zone_foreach(uma_print_zone);
3093}
3094
3095static void
3096slab_print(uma_slab_t slab)
3097{
3098 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
3099 slab->us_keg, slab->us_data, slab->us_freecount,
3100 slab->us_firstfree);
3101}
3102
3103static void
3104cache_print(uma_cache_t cache)
3105{
3106 printf("alloc: %p(%d), free: %p(%d)\n",
3107 cache->uc_allocbucket,
3108 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3109 cache->uc_freebucket,
3110 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3111}
3112
3113static void
3114uma_print_keg(uma_keg_t keg)
3115{
3116 uma_slab_t slab;
3117
3118 printf("keg: %s(%p) size %d(%d) flags %d ipers %d ppera %d "
3119 "out %d free %d limit %d\n",
3120 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3121 keg->uk_ipers, keg->uk_ppera,
3122 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3123 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3124 printf("Part slabs:\n");
3125 LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3126 slab_print(slab);
3127 printf("Free slabs:\n");
3128 LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3129 slab_print(slab);
3130 printf("Full slabs:\n");
3131 LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3132 slab_print(slab);
3133}
3134
3135void
3136uma_print_zone(uma_zone_t zone)
3137{
3138 uma_cache_t cache;
3139 uma_klink_t kl;
3140 int i;
3141
3142 printf("zone: %s(%p) size %d flags %d\n",
3143 zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3144 LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3145 uma_print_keg(kl->kl_keg);
3146 CPU_FOREACH(i) {
3147 cache = &zone->uz_cpu[i];
3148 printf("CPU %d Cache:\n", i);
3149 cache_print(cache);
3150 }
3151}
3152
3153#ifdef DDB
3154/*
3155 * Generate statistics across both the zone and its per-cpu cache's. Return
3156 * desired statistics if the pointer is non-NULL for that statistic.
3157 *
3158 * Note: does not update the zone statistics, as it can't safely clear the
3159 * per-CPU cache statistic.
3160 *
3161 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3162 * safe from off-CPU; we should modify the caches to track this information
3163 * directly so that we don't have to.
3164 */
3165static void
3166uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
3167 u_int64_t *freesp, u_int64_t *sleepsp)
3168{
3169 uma_cache_t cache;
3170 u_int64_t allocs, frees, sleeps;
3171 int cachefree, cpu;
3172
3173 allocs = frees = sleeps = 0;
3174 cachefree = 0;
3175 CPU_FOREACH(cpu) {
3176 cache = &z->uz_cpu[cpu];
3177 if (cache->uc_allocbucket != NULL)
3178 cachefree += cache->uc_allocbucket->ub_cnt;
3179 if (cache->uc_freebucket != NULL)
3180 cachefree += cache->uc_freebucket->ub_cnt;
3181 allocs += cache->uc_allocs;
3182 frees += cache->uc_frees;
3183 }
3184 allocs += z->uz_allocs;
3185 frees += z->uz_frees;
3186 sleeps += z->uz_sleeps;
3187 if (cachefreep != NULL)
3188 *cachefreep = cachefree;
3189 if (allocsp != NULL)
3190 *allocsp = allocs;
3191 if (freesp != NULL)
3192 *freesp = frees;
3193 if (sleepsp != NULL)
3194 *sleepsp = sleeps;
3195}
3196#endif /* DDB */
3197
3198static int
3199sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3200{
3201 uma_keg_t kz;
3202 uma_zone_t z;
3203 int count;
3204
3205 count = 0;
3206 mtx_lock(&uma_mtx);
3207 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3208 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3209 count++;
3210 }
3211 mtx_unlock(&uma_mtx);
3212 return (sysctl_handle_int(oidp, &count, 0, req));
3213}
3214
3215static int
3216sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3217{
3218 struct uma_stream_header ush;
3219 struct uma_type_header uth;
3220 struct uma_percpu_stat ups;
3221 uma_bucket_t bucket;
3222 struct sbuf sbuf;
3223 uma_cache_t cache;
3224 uma_klink_t kl;
3225 uma_keg_t kz;
3226 uma_zone_t z;
3227 uma_keg_t k;
3228 int count, error, i;
3229
3230 error = sysctl_wire_old_buffer(req, 0);
3231 if (error != 0)
3232 return (error);
3233 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3234
3235 count = 0;
3236 mtx_lock(&uma_mtx);
3237 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3238 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3239 count++;
3240 }
3241
3242 /*
3243 * Insert stream header.
3244 */
3245 bzero(&ush, sizeof(ush));
3246 ush.ush_version = UMA_STREAM_VERSION;
3247 ush.ush_maxcpus = (mp_maxid + 1);
3248 ush.ush_count = count;
3249 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3250
3251 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3252 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3253 bzero(&uth, sizeof(uth));
3254 ZONE_LOCK(z);
3255 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3256 uth.uth_align = kz->uk_align;
3257 uth.uth_size = kz->uk_size;
3258 uth.uth_rsize = kz->uk_rsize;
3259 LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3260 k = kl->kl_keg;
3261 uth.uth_maxpages += k->uk_maxpages;
3262 uth.uth_pages += k->uk_pages;
3263 uth.uth_keg_free += k->uk_free;
3264 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3265 * k->uk_ipers;
3266 }
3267
3268 /*
3269 * A zone is secondary is it is not the first entry
3270 * on the keg's zone list.
3271 */
3272 if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3273 (LIST_FIRST(&kz->uk_zones) != z))
3274 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3275
3276 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
3277 uth.uth_zone_free += bucket->ub_cnt;
3278 uth.uth_allocs = z->uz_allocs;
3279 uth.uth_frees = z->uz_frees;
3280 uth.uth_fails = z->uz_fails;
3281 uth.uth_sleeps = z->uz_sleeps;
3282 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3283 /*
3284 * While it is not normally safe to access the cache
3285 * bucket pointers while not on the CPU that owns the
3286 * cache, we only allow the pointers to be exchanged
3287 * without the zone lock held, not invalidated, so
3288 * accept the possible race associated with bucket
3289 * exchange during monitoring.
3290 */
3291 for (i = 0; i < (mp_maxid + 1); i++) {
3292 bzero(&ups, sizeof(ups));
3293 if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3294 goto skip;
3295 if (CPU_ABSENT(i))
3296 goto skip;
3297 cache = &z->uz_cpu[i];
3298 if (cache->uc_allocbucket != NULL)
3299 ups.ups_cache_free +=
3300 cache->uc_allocbucket->ub_cnt;
3301 if (cache->uc_freebucket != NULL)
3302 ups.ups_cache_free +=
3303 cache->uc_freebucket->ub_cnt;
3304 ups.ups_allocs = cache->uc_allocs;
3305 ups.ups_frees = cache->uc_frees;
3306skip:
3307 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3308 }
3309 ZONE_UNLOCK(z);
3310 }
3311 }
3312 mtx_unlock(&uma_mtx);
3313 error = sbuf_finish(&sbuf);
3314 sbuf_delete(&sbuf);
3315 return (error);
3316}
3317
3318#ifdef DDB
3319DB_SHOW_COMMAND(uma, db_show_uma)
3320{
3321 u_int64_t allocs, frees, sleeps;
3322 uma_bucket_t bucket;
3323 uma_keg_t kz;
3324 uma_zone_t z;
3325 int cachefree;
3326
3327 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3328 "Requests", "Sleeps");
3329 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3330 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3331 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3332 allocs = z->uz_allocs;
3333 frees = z->uz_frees;
3334 sleeps = z->uz_sleeps;
3335 cachefree = 0;
3336 } else
3337 uma_zone_sumstat(z, &cachefree, &allocs,
3338 &frees, &sleeps);
3339 if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3340 (LIST_FIRST(&kz->uk_zones) != z)))
3341 cachefree += kz->uk_free;
3342 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
3343 cachefree += bucket->ub_cnt;
3344 db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name,
3345 (uintmax_t)kz->uk_size,
3346 (intmax_t)(allocs - frees), cachefree,
3347 (uintmax_t)allocs, sleeps);
3348 }
3349 }
3350}
3351#endif
2584 if (zone->uz_dtor)
2585 zone->uz_dtor(item, zone->uz_size, udata);
2586
2587#ifdef INVARIANTS
2588 ZONE_LOCK(zone);
2589 if (zone->uz_flags & UMA_ZONE_MALLOC)
2590 uma_dbg_free(zone, udata, item);
2591 else
2592 uma_dbg_free(zone, NULL, item);
2593 ZONE_UNLOCK(zone);
2594#endif
2595 /*
2596 * The race here is acceptable. If we miss it we'll just have to wait
2597 * a little longer for the limits to be reset.
2598 */
2599 if (zone->uz_flags & UMA_ZFLAG_FULL)
2600 goto zfree_internal;
2601
2602 /*
2603 * If possible, free to the per-CPU cache. There are two
2604 * requirements for safe access to the per-CPU cache: (1) the thread
2605 * accessing the cache must not be preempted or yield during access,
2606 * and (2) the thread must not migrate CPUs without switching which
2607 * cache it accesses. We rely on a critical section to prevent
2608 * preemption and migration. We release the critical section in
2609 * order to acquire the zone mutex if we are unable to free to the
2610 * current cache; when we re-acquire the critical section, we must
2611 * detect and handle migration if it has occurred.
2612 */
2613zfree_restart:
2614 critical_enter();
2615 cpu = curcpu;
2616 cache = &zone->uz_cpu[cpu];
2617
2618zfree_start:
2619 bucket = cache->uc_freebucket;
2620
2621 if (bucket) {
2622 /*
2623 * Do we have room in our bucket? It is OK for this uz count
2624 * check to be slightly out of sync.
2625 */
2626
2627 if (bucket->ub_cnt < bucket->ub_entries) {
2628 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2629 ("uma_zfree: Freeing to non free bucket index."));
2630 bucket->ub_bucket[bucket->ub_cnt] = item;
2631 bucket->ub_cnt++;
2632 cache->uc_frees++;
2633 critical_exit();
2634 return;
2635 } else if (cache->uc_allocbucket) {
2636#ifdef UMA_DEBUG_ALLOC
2637 printf("uma_zfree: Swapping buckets.\n");
2638#endif
2639 /*
2640 * We have run out of space in our freebucket.
2641 * See if we can switch with our alloc bucket.
2642 */
2643 if (cache->uc_allocbucket->ub_cnt <
2644 cache->uc_freebucket->ub_cnt) {
2645 bucket = cache->uc_freebucket;
2646 cache->uc_freebucket = cache->uc_allocbucket;
2647 cache->uc_allocbucket = bucket;
2648 goto zfree_start;
2649 }
2650 }
2651 }
2652 /*
2653 * We can get here for two reasons:
2654 *
2655 * 1) The buckets are NULL
2656 * 2) The alloc and free buckets are both somewhat full.
2657 *
2658 * We must go back the zone, which requires acquiring the zone lock,
2659 * which in turn means we must release and re-acquire the critical
2660 * section. Since the critical section is released, we may be
2661 * preempted or migrate. As such, make sure not to maintain any
2662 * thread-local state specific to the cache from prior to releasing
2663 * the critical section.
2664 */
2665 critical_exit();
2666 ZONE_LOCK(zone);
2667 critical_enter();
2668 cpu = curcpu;
2669 cache = &zone->uz_cpu[cpu];
2670 if (cache->uc_freebucket != NULL) {
2671 if (cache->uc_freebucket->ub_cnt <
2672 cache->uc_freebucket->ub_entries) {
2673 ZONE_UNLOCK(zone);
2674 goto zfree_start;
2675 }
2676 if (cache->uc_allocbucket != NULL &&
2677 (cache->uc_allocbucket->ub_cnt <
2678 cache->uc_freebucket->ub_cnt)) {
2679 ZONE_UNLOCK(zone);
2680 goto zfree_start;
2681 }
2682 }
2683
2684 /* Since we have locked the zone we may as well send back our stats */
2685 zone->uz_allocs += cache->uc_allocs;
2686 cache->uc_allocs = 0;
2687 zone->uz_frees += cache->uc_frees;
2688 cache->uc_frees = 0;
2689
2690 bucket = cache->uc_freebucket;
2691 cache->uc_freebucket = NULL;
2692
2693 /* Can we throw this on the zone full list? */
2694 if (bucket != NULL) {
2695#ifdef UMA_DEBUG_ALLOC
2696 printf("uma_zfree: Putting old bucket on the free list.\n");
2697#endif
2698 /* ub_cnt is pointing to the last free item */
2699 KASSERT(bucket->ub_cnt != 0,
2700 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2701 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2702 bucket, ub_link);
2703 }
2704 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2705 LIST_REMOVE(bucket, ub_link);
2706 ZONE_UNLOCK(zone);
2707 cache->uc_freebucket = bucket;
2708 goto zfree_start;
2709 }
2710 /* We are no longer associated with this CPU. */
2711 critical_exit();
2712
2713 /* And the zone.. */
2714 ZONE_UNLOCK(zone);
2715
2716#ifdef UMA_DEBUG_ALLOC
2717 printf("uma_zfree: Allocating new free bucket.\n");
2718#endif
2719 bflags = M_NOWAIT;
2720
2721 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2722 bflags |= M_NOVM;
2723 bucket = bucket_alloc(zone->uz_count, bflags);
2724 if (bucket) {
2725 ZONE_LOCK(zone);
2726 LIST_INSERT_HEAD(&zone->uz_free_bucket,
2727 bucket, ub_link);
2728 ZONE_UNLOCK(zone);
2729 goto zfree_restart;
2730 }
2731
2732 /*
2733 * If nothing else caught this, we'll just do an internal free.
2734 */
2735zfree_internal:
2736 zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
2737
2738 return;
2739}
2740
2741/*
2742 * Frees an item to an INTERNAL zone or allocates a free bucket
2743 *
2744 * Arguments:
2745 * zone The zone to free to
2746 * item The item we're freeing
2747 * udata User supplied data for the dtor
2748 * skip Skip dtors and finis
2749 */
2750static void
2751zone_free_item(uma_zone_t zone, void *item, void *udata,
2752 enum zfreeskip skip, int flags)
2753{
2754 uma_slab_t slab;
2755 uma_slabrefcnt_t slabref;
2756 uma_keg_t keg;
2757 u_int8_t *mem;
2758 u_int8_t freei;
2759 int clearfull;
2760
2761 if (skip < SKIP_DTOR && zone->uz_dtor)
2762 zone->uz_dtor(item, zone->uz_size, udata);
2763
2764 if (skip < SKIP_FINI && zone->uz_fini)
2765 zone->uz_fini(item, zone->uz_size);
2766
2767 ZONE_LOCK(zone);
2768
2769 if (flags & ZFREE_STATFAIL)
2770 zone->uz_fails++;
2771 if (flags & ZFREE_STATFREE)
2772 zone->uz_frees++;
2773
2774 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2775 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
2776 keg = zone_first_keg(zone); /* Must only be one. */
2777 if (zone->uz_flags & UMA_ZONE_HASH) {
2778 slab = hash_sfind(&keg->uk_hash, mem);
2779 } else {
2780 mem += keg->uk_pgoff;
2781 slab = (uma_slab_t)mem;
2782 }
2783 } else {
2784 /* This prevents redundant lookups via free(). */
2785 if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL)
2786 slab = (uma_slab_t)udata;
2787 else
2788 slab = vtoslab((vm_offset_t)item);
2789 keg = slab->us_keg;
2790 keg_relock(keg, zone);
2791 }
2792 MPASS(keg == slab->us_keg);
2793
2794 /* Do we need to remove from any lists? */
2795 if (slab->us_freecount+1 == keg->uk_ipers) {
2796 LIST_REMOVE(slab, us_link);
2797 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2798 } else if (slab->us_freecount == 0) {
2799 LIST_REMOVE(slab, us_link);
2800 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2801 }
2802
2803 /* Slab management stuff */
2804 freei = ((unsigned long)item - (unsigned long)slab->us_data)
2805 / keg->uk_rsize;
2806
2807#ifdef INVARIANTS
2808 if (!skip)
2809 uma_dbg_free(zone, slab, item);
2810#endif
2811
2812 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2813 slabref = (uma_slabrefcnt_t)slab;
2814 slabref->us_freelist[freei].us_item = slab->us_firstfree;
2815 } else {
2816 slab->us_freelist[freei].us_item = slab->us_firstfree;
2817 }
2818 slab->us_firstfree = freei;
2819 slab->us_freecount++;
2820
2821 /* Zone statistics */
2822 keg->uk_free++;
2823
2824 clearfull = 0;
2825 if (keg->uk_flags & UMA_ZFLAG_FULL) {
2826 if (keg->uk_pages < keg->uk_maxpages) {
2827 keg->uk_flags &= ~UMA_ZFLAG_FULL;
2828 clearfull = 1;
2829 }
2830
2831 /*
2832 * We can handle one more allocation. Since we're clearing ZFLAG_FULL,
2833 * wake up all procs blocked on pages. This should be uncommon, so
2834 * keeping this simple for now (rather than adding count of blocked
2835 * threads etc).
2836 */
2837 wakeup(keg);
2838 }
2839 if (clearfull) {
2840 zone_relock(zone, keg);
2841 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2842 wakeup(zone);
2843 ZONE_UNLOCK(zone);
2844 } else
2845 KEG_UNLOCK(keg);
2846}
2847
2848/* See uma.h */
2849int
2850uma_zone_set_max(uma_zone_t zone, int nitems)
2851{
2852 uma_keg_t keg;
2853
2854 ZONE_LOCK(zone);
2855 keg = zone_first_keg(zone);
2856 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2857 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2858 keg->uk_maxpages += keg->uk_ppera;
2859 nitems = keg->uk_maxpages * keg->uk_ipers;
2860 ZONE_UNLOCK(zone);
2861
2862 return (nitems);
2863}
2864
2865/* See uma.h */
2866int
2867uma_zone_get_max(uma_zone_t zone)
2868{
2869 int nitems;
2870 uma_keg_t keg;
2871
2872 ZONE_LOCK(zone);
2873 keg = zone_first_keg(zone);
2874 nitems = keg->uk_maxpages * keg->uk_ipers;
2875 ZONE_UNLOCK(zone);
2876
2877 return (nitems);
2878}
2879
2880/* See uma.h */
2881int
2882uma_zone_get_cur(uma_zone_t zone)
2883{
2884 int64_t nitems;
2885 u_int i;
2886
2887 ZONE_LOCK(zone);
2888 nitems = zone->uz_allocs - zone->uz_frees;
2889 CPU_FOREACH(i) {
2890 /*
2891 * See the comment in sysctl_vm_zone_stats() regarding the
2892 * safety of accessing the per-cpu caches. With the zone lock
2893 * held, it is safe, but can potentially result in stale data.
2894 */
2895 nitems += zone->uz_cpu[i].uc_allocs -
2896 zone->uz_cpu[i].uc_frees;
2897 }
2898 ZONE_UNLOCK(zone);
2899
2900 return (nitems < 0 ? 0 : nitems);
2901}
2902
2903/* See uma.h */
2904void
2905uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2906{
2907 uma_keg_t keg;
2908
2909 ZONE_LOCK(zone);
2910 keg = zone_first_keg(zone);
2911 KASSERT(keg->uk_pages == 0,
2912 ("uma_zone_set_init on non-empty keg"));
2913 keg->uk_init = uminit;
2914 ZONE_UNLOCK(zone);
2915}
2916
2917/* See uma.h */
2918void
2919uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2920{
2921 uma_keg_t keg;
2922
2923 ZONE_LOCK(zone);
2924 keg = zone_first_keg(zone);
2925 KASSERT(keg->uk_pages == 0,
2926 ("uma_zone_set_fini on non-empty keg"));
2927 keg->uk_fini = fini;
2928 ZONE_UNLOCK(zone);
2929}
2930
2931/* See uma.h */
2932void
2933uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2934{
2935 ZONE_LOCK(zone);
2936 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2937 ("uma_zone_set_zinit on non-empty keg"));
2938 zone->uz_init = zinit;
2939 ZONE_UNLOCK(zone);
2940}
2941
2942/* See uma.h */
2943void
2944uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2945{
2946 ZONE_LOCK(zone);
2947 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2948 ("uma_zone_set_zfini on non-empty keg"));
2949 zone->uz_fini = zfini;
2950 ZONE_UNLOCK(zone);
2951}
2952
2953/* See uma.h */
2954/* XXX uk_freef is not actually used with the zone locked */
2955void
2956uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2957{
2958
2959 ZONE_LOCK(zone);
2960 zone_first_keg(zone)->uk_freef = freef;
2961 ZONE_UNLOCK(zone);
2962}
2963
2964/* See uma.h */
2965/* XXX uk_allocf is not actually used with the zone locked */
2966void
2967uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2968{
2969 uma_keg_t keg;
2970
2971 ZONE_LOCK(zone);
2972 keg = zone_first_keg(zone);
2973 keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2974 keg->uk_allocf = allocf;
2975 ZONE_UNLOCK(zone);
2976}
2977
2978/* See uma.h */
2979int
2980uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
2981{
2982 uma_keg_t keg;
2983 vm_offset_t kva;
2984 int pages;
2985
2986 keg = zone_first_keg(zone);
2987 pages = count / keg->uk_ipers;
2988
2989 if (pages * keg->uk_ipers < count)
2990 pages++;
2991
2992 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2993
2994 if (kva == 0)
2995 return (0);
2996 if (obj == NULL)
2997 obj = vm_object_allocate(OBJT_PHYS, pages);
2998 else {
2999 VM_OBJECT_LOCK_INIT(obj, "uma object");
3000 _vm_object_allocate(OBJT_PHYS, pages, obj);
3001 }
3002 ZONE_LOCK(zone);
3003 keg->uk_kva = kva;
3004 keg->uk_obj = obj;
3005 keg->uk_maxpages = pages;
3006 keg->uk_allocf = obj_alloc;
3007 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
3008 ZONE_UNLOCK(zone);
3009 return (1);
3010}
3011
3012/* See uma.h */
3013void
3014uma_prealloc(uma_zone_t zone, int items)
3015{
3016 int slabs;
3017 uma_slab_t slab;
3018 uma_keg_t keg;
3019
3020 keg = zone_first_keg(zone);
3021 ZONE_LOCK(zone);
3022 slabs = items / keg->uk_ipers;
3023 if (slabs * keg->uk_ipers < items)
3024 slabs++;
3025 while (slabs > 0) {
3026 slab = keg_alloc_slab(keg, zone, M_WAITOK);
3027 if (slab == NULL)
3028 break;
3029 MPASS(slab->us_keg == keg);
3030 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3031 slabs--;
3032 }
3033 ZONE_UNLOCK(zone);
3034}
3035
3036/* See uma.h */
3037u_int32_t *
3038uma_find_refcnt(uma_zone_t zone, void *item)
3039{
3040 uma_slabrefcnt_t slabref;
3041 uma_keg_t keg;
3042 u_int32_t *refcnt;
3043 int idx;
3044
3045 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
3046 (~UMA_SLAB_MASK));
3047 keg = slabref->us_keg;
3048 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
3049 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
3050 idx = ((unsigned long)item - (unsigned long)slabref->us_data)
3051 / keg->uk_rsize;
3052 refcnt = &slabref->us_freelist[idx].us_refcnt;
3053 return refcnt;
3054}
3055
3056/* See uma.h */
3057void
3058uma_reclaim(void)
3059{
3060#ifdef UMA_DEBUG
3061 printf("UMA: vm asked us to release pages!\n");
3062#endif
3063 bucket_enable();
3064 zone_foreach(zone_drain);
3065 /*
3066 * Some slabs may have been freed but this zone will be visited early
3067 * we visit again so that we can free pages that are empty once other
3068 * zones are drained. We have to do the same for buckets.
3069 */
3070 zone_drain(slabzone);
3071 zone_drain(slabrefzone);
3072 bucket_zone_drain();
3073}
3074
3075/* See uma.h */
3076int
3077uma_zone_exhausted(uma_zone_t zone)
3078{
3079 int full;
3080
3081 ZONE_LOCK(zone);
3082 full = (zone->uz_flags & UMA_ZFLAG_FULL);
3083 ZONE_UNLOCK(zone);
3084 return (full);
3085}
3086
3087int
3088uma_zone_exhausted_nolock(uma_zone_t zone)
3089{
3090 return (zone->uz_flags & UMA_ZFLAG_FULL);
3091}
3092
3093void *
3094uma_large_malloc(int size, int wait)
3095{
3096 void *mem;
3097 uma_slab_t slab;
3098 u_int8_t flags;
3099
3100 slab = zone_alloc_item(slabzone, NULL, wait);
3101 if (slab == NULL)
3102 return (NULL);
3103 mem = page_alloc(NULL, size, &flags, wait);
3104 if (mem) {
3105 vsetslab((vm_offset_t)mem, slab);
3106 slab->us_data = mem;
3107 slab->us_flags = flags | UMA_SLAB_MALLOC;
3108 slab->us_size = size;
3109 } else {
3110 zone_free_item(slabzone, slab, NULL, SKIP_NONE,
3111 ZFREE_STATFAIL | ZFREE_STATFREE);
3112 }
3113
3114 return (mem);
3115}
3116
3117void
3118uma_large_free(uma_slab_t slab)
3119{
3120 vsetobj((vm_offset_t)slab->us_data, kmem_object);
3121 page_free(slab->us_data, slab->us_size, slab->us_flags);
3122 zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
3123}
3124
3125void
3126uma_print_stats(void)
3127{
3128 zone_foreach(uma_print_zone);
3129}
3130
3131static void
3132slab_print(uma_slab_t slab)
3133{
3134 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
3135 slab->us_keg, slab->us_data, slab->us_freecount,
3136 slab->us_firstfree);
3137}
3138
3139static void
3140cache_print(uma_cache_t cache)
3141{
3142 printf("alloc: %p(%d), free: %p(%d)\n",
3143 cache->uc_allocbucket,
3144 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3145 cache->uc_freebucket,
3146 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3147}
3148
3149static void
3150uma_print_keg(uma_keg_t keg)
3151{
3152 uma_slab_t slab;
3153
3154 printf("keg: %s(%p) size %d(%d) flags %d ipers %d ppera %d "
3155 "out %d free %d limit %d\n",
3156 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3157 keg->uk_ipers, keg->uk_ppera,
3158 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3159 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3160 printf("Part slabs:\n");
3161 LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3162 slab_print(slab);
3163 printf("Free slabs:\n");
3164 LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3165 slab_print(slab);
3166 printf("Full slabs:\n");
3167 LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3168 slab_print(slab);
3169}
3170
3171void
3172uma_print_zone(uma_zone_t zone)
3173{
3174 uma_cache_t cache;
3175 uma_klink_t kl;
3176 int i;
3177
3178 printf("zone: %s(%p) size %d flags %d\n",
3179 zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3180 LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3181 uma_print_keg(kl->kl_keg);
3182 CPU_FOREACH(i) {
3183 cache = &zone->uz_cpu[i];
3184 printf("CPU %d Cache:\n", i);
3185 cache_print(cache);
3186 }
3187}
3188
3189#ifdef DDB
3190/*
3191 * Generate statistics across both the zone and its per-cpu cache's. Return
3192 * desired statistics if the pointer is non-NULL for that statistic.
3193 *
3194 * Note: does not update the zone statistics, as it can't safely clear the
3195 * per-CPU cache statistic.
3196 *
3197 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3198 * safe from off-CPU; we should modify the caches to track this information
3199 * directly so that we don't have to.
3200 */
3201static void
3202uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
3203 u_int64_t *freesp, u_int64_t *sleepsp)
3204{
3205 uma_cache_t cache;
3206 u_int64_t allocs, frees, sleeps;
3207 int cachefree, cpu;
3208
3209 allocs = frees = sleeps = 0;
3210 cachefree = 0;
3211 CPU_FOREACH(cpu) {
3212 cache = &z->uz_cpu[cpu];
3213 if (cache->uc_allocbucket != NULL)
3214 cachefree += cache->uc_allocbucket->ub_cnt;
3215 if (cache->uc_freebucket != NULL)
3216 cachefree += cache->uc_freebucket->ub_cnt;
3217 allocs += cache->uc_allocs;
3218 frees += cache->uc_frees;
3219 }
3220 allocs += z->uz_allocs;
3221 frees += z->uz_frees;
3222 sleeps += z->uz_sleeps;
3223 if (cachefreep != NULL)
3224 *cachefreep = cachefree;
3225 if (allocsp != NULL)
3226 *allocsp = allocs;
3227 if (freesp != NULL)
3228 *freesp = frees;
3229 if (sleepsp != NULL)
3230 *sleepsp = sleeps;
3231}
3232#endif /* DDB */
3233
3234static int
3235sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3236{
3237 uma_keg_t kz;
3238 uma_zone_t z;
3239 int count;
3240
3241 count = 0;
3242 mtx_lock(&uma_mtx);
3243 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3244 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3245 count++;
3246 }
3247 mtx_unlock(&uma_mtx);
3248 return (sysctl_handle_int(oidp, &count, 0, req));
3249}
3250
3251static int
3252sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3253{
3254 struct uma_stream_header ush;
3255 struct uma_type_header uth;
3256 struct uma_percpu_stat ups;
3257 uma_bucket_t bucket;
3258 struct sbuf sbuf;
3259 uma_cache_t cache;
3260 uma_klink_t kl;
3261 uma_keg_t kz;
3262 uma_zone_t z;
3263 uma_keg_t k;
3264 int count, error, i;
3265
3266 error = sysctl_wire_old_buffer(req, 0);
3267 if (error != 0)
3268 return (error);
3269 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3270
3271 count = 0;
3272 mtx_lock(&uma_mtx);
3273 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3274 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3275 count++;
3276 }
3277
3278 /*
3279 * Insert stream header.
3280 */
3281 bzero(&ush, sizeof(ush));
3282 ush.ush_version = UMA_STREAM_VERSION;
3283 ush.ush_maxcpus = (mp_maxid + 1);
3284 ush.ush_count = count;
3285 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3286
3287 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3288 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3289 bzero(&uth, sizeof(uth));
3290 ZONE_LOCK(z);
3291 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3292 uth.uth_align = kz->uk_align;
3293 uth.uth_size = kz->uk_size;
3294 uth.uth_rsize = kz->uk_rsize;
3295 LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3296 k = kl->kl_keg;
3297 uth.uth_maxpages += k->uk_maxpages;
3298 uth.uth_pages += k->uk_pages;
3299 uth.uth_keg_free += k->uk_free;
3300 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3301 * k->uk_ipers;
3302 }
3303
3304 /*
3305 * A zone is secondary is it is not the first entry
3306 * on the keg's zone list.
3307 */
3308 if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3309 (LIST_FIRST(&kz->uk_zones) != z))
3310 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3311
3312 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
3313 uth.uth_zone_free += bucket->ub_cnt;
3314 uth.uth_allocs = z->uz_allocs;
3315 uth.uth_frees = z->uz_frees;
3316 uth.uth_fails = z->uz_fails;
3317 uth.uth_sleeps = z->uz_sleeps;
3318 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3319 /*
3320 * While it is not normally safe to access the cache
3321 * bucket pointers while not on the CPU that owns the
3322 * cache, we only allow the pointers to be exchanged
3323 * without the zone lock held, not invalidated, so
3324 * accept the possible race associated with bucket
3325 * exchange during monitoring.
3326 */
3327 for (i = 0; i < (mp_maxid + 1); i++) {
3328 bzero(&ups, sizeof(ups));
3329 if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3330 goto skip;
3331 if (CPU_ABSENT(i))
3332 goto skip;
3333 cache = &z->uz_cpu[i];
3334 if (cache->uc_allocbucket != NULL)
3335 ups.ups_cache_free +=
3336 cache->uc_allocbucket->ub_cnt;
3337 if (cache->uc_freebucket != NULL)
3338 ups.ups_cache_free +=
3339 cache->uc_freebucket->ub_cnt;
3340 ups.ups_allocs = cache->uc_allocs;
3341 ups.ups_frees = cache->uc_frees;
3342skip:
3343 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3344 }
3345 ZONE_UNLOCK(z);
3346 }
3347 }
3348 mtx_unlock(&uma_mtx);
3349 error = sbuf_finish(&sbuf);
3350 sbuf_delete(&sbuf);
3351 return (error);
3352}
3353
3354#ifdef DDB
3355DB_SHOW_COMMAND(uma, db_show_uma)
3356{
3357 u_int64_t allocs, frees, sleeps;
3358 uma_bucket_t bucket;
3359 uma_keg_t kz;
3360 uma_zone_t z;
3361 int cachefree;
3362
3363 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3364 "Requests", "Sleeps");
3365 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3366 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3367 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3368 allocs = z->uz_allocs;
3369 frees = z->uz_frees;
3370 sleeps = z->uz_sleeps;
3371 cachefree = 0;
3372 } else
3373 uma_zone_sumstat(z, &cachefree, &allocs,
3374 &frees, &sleeps);
3375 if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3376 (LIST_FIRST(&kz->uk_zones) != z)))
3377 cachefree += kz->uk_free;
3378 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
3379 cachefree += bucket->ub_cnt;
3380 db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name,
3381 (uintmax_t)kz->uk_size,
3382 (intmax_t)(allocs - frees), cachefree,
3383 (uintmax_t)allocs, sleeps);
3384 }
3385 }
3386}
3387#endif