Deleted Added
full compact
uma_core.c (137309) uma_core.c (139318)
1/*
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
2 * Copyright (c) 2004, 2005,
3 * Bosko Milekic <bmilekic@freebsd.org>
4 * Copyright (c) 2002, 2003, 2004, 2005,
5 * Jeffrey Roberson <jeff@freebsd.org>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/*
28 * uma_core.c Implementation of the Universal Memory allocator
29 *
30 * This allocator is intended to replace the multitude of similar object caches
31 * in the standard FreeBSD kernel. The intent is to be flexible as well as
32 * effecient. A primary design goal is to return unused memory to the rest of
33 * the system. This will make the system as a whole more flexible due to the
34 * ability to move memory to subsystems which most need it instead of leaving
35 * pools of reserved memory unused.
36 *
37 * The basic ideas stem from similar slab/zone based allocators whose algorithms
38 * are well known.
39 *
40 */
41
42/*
43 * TODO:
44 * - Improve memory usage for large allocations
45 * - Investigate cache size adjustments
46 */
47
48#include <sys/cdefs.h>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * uma_core.c Implementation of the Universal Memory allocator
31 *
32 * This allocator is intended to replace the multitude of similar object caches
33 * in the standard FreeBSD kernel. The intent is to be flexible as well as
34 * effecient. A primary design goal is to return unused memory to the rest of
35 * the system. This will make the system as a whole more flexible due to the
36 * ability to move memory to subsystems which most need it instead of leaving
37 * pools of reserved memory unused.
38 *
39 * The basic ideas stem from similar slab/zone based allocators whose algorithms
40 * are well known.
41 *
42 */
43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
49__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 137309 2004-11-06 11:43:30Z rwatson $");
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 139318 2004-12-26 00:35:12Z bmilekic $");
50
51/* I should really use ktr.. */
52/*
53#define UMA_DEBUG 1
54#define UMA_DEBUG_ALLOC 1
55#define UMA_DEBUG_ALLOC_1 1
56*/
57
58#include "opt_param.h"
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/kernel.h>
62#include <sys/types.h>
63#include <sys/queue.h>
64#include <sys/malloc.h>
65#include <sys/ktr.h>
66#include <sys/lock.h>
67#include <sys/sysctl.h>
68#include <sys/mutex.h>
69#include <sys/proc.h>
70#include <sys/smp.h>
71#include <sys/vmmeter.h>
72
73#include <vm/vm.h>
74#include <vm/vm_object.h>
75#include <vm/vm_page.h>
76#include <vm/vm_param.h>
77#include <vm/vm_map.h>
78#include <vm/vm_kern.h>
79#include <vm/vm_extern.h>
80#include <vm/uma.h>
81#include <vm/uma_int.h>
82#include <vm/uma_dbg.h>
83
84#include <machine/vmparam.h>
85
86/*
87 * This is the zone and keg from which all zones are spawned. The idea is that
88 * even the zone & keg heads are allocated from the allocator, so we use the
89 * bss section to bootstrap us.
90 */
91static struct uma_keg masterkeg;
92static struct uma_zone masterzone_k;
93static struct uma_zone masterzone_z;
94static uma_zone_t kegs = &masterzone_k;
95static uma_zone_t zones = &masterzone_z;
96
97/* This is the zone from which all of uma_slab_t's are allocated. */
98static uma_zone_t slabzone;
99static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
100
101/*
102 * The initial hash tables come out of this zone so they can be allocated
103 * prior to malloc coming up.
104 */
105static uma_zone_t hashzone;
106
107static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
108
109/*
110 * Are we allowed to allocate buckets?
111 */
112static int bucketdisable = 1;
113
114/* Linked list of all kegs in the system */
115static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs);
116
117/* This mutex protects the keg list */
118static struct mtx uma_mtx;
119
120/* These are the pcpu cache locks */
121static struct mtx uma_pcpu_mtx[MAXCPU];
122
123/* Linked list of boot time pages */
124static LIST_HEAD(,uma_slab) uma_boot_pages =
125 LIST_HEAD_INITIALIZER(&uma_boot_pages);
126
127/* Count of free boottime pages */
128static int uma_boot_free = 0;
129
130/* Is the VM done starting up? */
131static int booted = 0;
132
133/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
134static u_int uma_max_ipers;
135static u_int uma_max_ipers_ref;
136
137/*
138 * This is the handle used to schedule events that need to happen
139 * outside of the allocation fast path.
140 */
141static struct callout uma_callout;
142#define UMA_TIMEOUT 20 /* Seconds for callout interval. */
143
144/*
145 * This structure is passed as the zone ctor arg so that I don't have to create
146 * a special allocation function just for zones.
147 */
148struct uma_zctor_args {
149 char *name;
150 size_t size;
151 uma_ctor ctor;
152 uma_dtor dtor;
153 uma_init uminit;
154 uma_fini fini;
155 uma_keg_t keg;
156 int align;
157 u_int16_t flags;
158};
159
160struct uma_kctor_args {
161 uma_zone_t zone;
162 size_t size;
163 uma_init uminit;
164 uma_fini fini;
165 int align;
166 u_int16_t flags;
167};
168
169struct uma_bucket_zone {
170 uma_zone_t ubz_zone;
171 char *ubz_name;
172 int ubz_entries;
173};
174
175#define BUCKET_MAX 128
176
177struct uma_bucket_zone bucket_zones[] = {
178 { NULL, "16 Bucket", 16 },
179 { NULL, "32 Bucket", 32 },
180 { NULL, "64 Bucket", 64 },
181 { NULL, "128 Bucket", 128 },
182 { NULL, NULL, 0}
183};
184
185#define BUCKET_SHIFT 4
186#define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1)
187
188/*
189 * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
190 * of approximately the right size.
191 */
192static uint8_t bucket_size[BUCKET_ZONES];
193
194enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
195
196/* Prototypes.. */
197
198static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
199static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
200static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
201static void page_free(void *, int, u_int8_t);
202static uma_slab_t slab_zalloc(uma_zone_t, int);
203static void cache_drain(uma_zone_t);
204static void bucket_drain(uma_zone_t, uma_bucket_t);
205static void bucket_cache_drain(uma_zone_t zone);
206static int keg_ctor(void *, int, void *, int);
207static void keg_dtor(void *, int, void *);
208static int zone_ctor(void *, int, void *, int);
209static void zone_dtor(void *, int, void *);
210static int zero_init(void *, int, int);
211static void zone_small_init(uma_zone_t zone);
212static void zone_large_init(uma_zone_t zone);
213static void zone_foreach(void (*zfunc)(uma_zone_t));
214static void zone_timeout(uma_zone_t zone);
215static int hash_alloc(struct uma_hash *);
216static int hash_expand(struct uma_hash *, struct uma_hash *);
217static void hash_free(struct uma_hash *hash);
218static void uma_timeout(void *);
219static void uma_startup3(void);
220static void *uma_zalloc_internal(uma_zone_t, void *, int);
221static void uma_zfree_internal(uma_zone_t, void *, void *, enum zfreeskip);
222static void bucket_enable(void);
223static void bucket_init(void);
224static uma_bucket_t bucket_alloc(int, int);
225static void bucket_free(uma_bucket_t);
226static void bucket_zone_drain(void);
227static int uma_zalloc_bucket(uma_zone_t zone, int flags);
228static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
229static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
230static void zone_drain(uma_zone_t);
231static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
232 uma_fini fini, int align, u_int16_t flags);
233
234void uma_print_zone(uma_zone_t);
235void uma_print_stats(void);
236static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
237
238#ifdef WITNESS
239static int nosleepwithlocks = 1;
240SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
241 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
242#else
243static int nosleepwithlocks = 0;
244SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
245 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
246#endif
247SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
248 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
249SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
250
251/*
252 * This routine checks to see whether or not it's safe to enable buckets.
253 */
254
255static void
256bucket_enable(void)
257{
258 if (cnt.v_free_count < cnt.v_free_min)
259 bucketdisable = 1;
260 else
261 bucketdisable = 0;
262}
263
264/*
265 * Initialize bucket_zones, the array of zones of buckets of various sizes.
266 *
267 * For each zone, calculate the memory required for each bucket, consisting
268 * of the header and an array of pointers. Initialize bucket_size[] to point
269 * the range of appropriate bucket sizes at the zone.
270 */
271static void
272bucket_init(void)
273{
274 struct uma_bucket_zone *ubz;
275 int i;
276 int j;
277
278 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
279 int size;
280
281 ubz = &bucket_zones[j];
282 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
283 size += sizeof(void *) * ubz->ubz_entries;
284 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
285 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
286 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
287 bucket_size[i >> BUCKET_SHIFT] = j;
288 }
289}
290
291/*
292 * Given a desired number of entries for a bucket, return the zone from which
293 * to allocate the bucket.
294 */
295static struct uma_bucket_zone *
296bucket_zone_lookup(int entries)
297{
298 int idx;
299
300 idx = howmany(entries, 1 << BUCKET_SHIFT);
301 return (&bucket_zones[bucket_size[idx]]);
302}
303
304static uma_bucket_t
305bucket_alloc(int entries, int bflags)
306{
307 struct uma_bucket_zone *ubz;
308 uma_bucket_t bucket;
309
310 /*
311 * This is to stop us from allocating per cpu buckets while we're
312 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the
313 * boot pages. This also prevents us from allocating buckets in
314 * low memory situations.
315 */
316 if (bucketdisable)
317 return (NULL);
318
319 ubz = bucket_zone_lookup(entries);
320 bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags);
321 if (bucket) {
322#ifdef INVARIANTS
323 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
324#endif
325 bucket->ub_cnt = 0;
326 bucket->ub_entries = ubz->ubz_entries;
327 }
328
329 return (bucket);
330}
331
332static void
333bucket_free(uma_bucket_t bucket)
334{
335 struct uma_bucket_zone *ubz;
336
337 ubz = bucket_zone_lookup(bucket->ub_entries);
338 uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE);
339}
340
341static void
342bucket_zone_drain(void)
343{
344 struct uma_bucket_zone *ubz;
345
346 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
347 zone_drain(ubz->ubz_zone);
348}
349
350
351/*
352 * Routine called by timeout which is used to fire off some time interval
353 * based calculations. (stats, hash size, etc.)
354 *
355 * Arguments:
356 * arg Unused
357 *
358 * Returns:
359 * Nothing
360 */
361static void
362uma_timeout(void *unused)
363{
364 bucket_enable();
365 zone_foreach(zone_timeout);
366
367 /* Reschedule this event */
368 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
369}
370
371/*
372 * Routine to perform timeout driven calculations. This expands the
373 * hashes and does per cpu statistics aggregation.
374 *
375 * Arguments:
376 * zone The zone to operate on
377 *
378 * Returns:
379 * Nothing
380 */
381static void
382zone_timeout(uma_zone_t zone)
383{
384 uma_keg_t keg;
385 uma_cache_t cache;
386 u_int64_t alloc;
387 int cpu;
388
389 keg = zone->uz_keg;
390 alloc = 0;
391
392 /*
393 * Aggregate per cpu cache statistics back to the zone.
394 *
395 * XXX This should be done in the sysctl handler.
396 *
397 * I may rewrite this to set a flag in the per cpu cache instead of
398 * locking. If the flag is not cleared on the next round I will have
399 * to lock and do it here instead so that the statistics don't get too
400 * far out of sync.
401 */
402 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) {
403 for (cpu = 0; cpu <= mp_maxid; cpu++) {
404 if (CPU_ABSENT(cpu))
405 continue;
406 CPU_LOCK(cpu);
407 cache = &zone->uz_cpu[cpu];
408 /* Add them up, and reset */
409 alloc += cache->uc_allocs;
410 cache->uc_allocs = 0;
411 CPU_UNLOCK(cpu);
412 }
413 }
414
415 /* Now push these stats back into the zone.. */
416 ZONE_LOCK(zone);
417 zone->uz_allocs += alloc;
418
419 /*
420 * Expand the zone hash table.
421 *
422 * This is done if the number of slabs is larger than the hash size.
423 * What I'm trying to do here is completely reduce collisions. This
424 * may be a little aggressive. Should I allow for two collisions max?
425 */
426
427 if (keg->uk_flags & UMA_ZONE_HASH &&
428 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
429 struct uma_hash newhash;
430 struct uma_hash oldhash;
431 int ret;
432
433 /*
434 * This is so involved because allocating and freeing
435 * while the zone lock is held will lead to deadlock.
436 * I have to do everything in stages and check for
437 * races.
438 */
439 newhash = keg->uk_hash;
440 ZONE_UNLOCK(zone);
441 ret = hash_alloc(&newhash);
442 ZONE_LOCK(zone);
443 if (ret) {
444 if (hash_expand(&keg->uk_hash, &newhash)) {
445 oldhash = keg->uk_hash;
446 keg->uk_hash = newhash;
447 } else
448 oldhash = newhash;
449
450 ZONE_UNLOCK(zone);
451 hash_free(&oldhash);
452 ZONE_LOCK(zone);
453 }
454 }
455 ZONE_UNLOCK(zone);
456}
457
458/*
459 * Allocate and zero fill the next sized hash table from the appropriate
460 * backing store.
461 *
462 * Arguments:
463 * hash A new hash structure with the old hash size in uh_hashsize
464 *
465 * Returns:
466 * 1 on sucess and 0 on failure.
467 */
468static int
469hash_alloc(struct uma_hash *hash)
470{
471 int oldsize;
472 int alloc;
473
474 oldsize = hash->uh_hashsize;
475
476 /* We're just going to go to a power of two greater */
477 if (oldsize) {
478 hash->uh_hashsize = oldsize * 2;
479 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
480 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
481 M_UMAHASH, M_NOWAIT);
482 } else {
483 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
484 hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL,
485 M_WAITOK);
486 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
487 }
488 if (hash->uh_slab_hash) {
489 bzero(hash->uh_slab_hash, alloc);
490 hash->uh_hashmask = hash->uh_hashsize - 1;
491 return (1);
492 }
493
494 return (0);
495}
496
497/*
498 * Expands the hash table for HASH zones. This is done from zone_timeout
499 * to reduce collisions. This must not be done in the regular allocation
500 * path, otherwise, we can recurse on the vm while allocating pages.
501 *
502 * Arguments:
503 * oldhash The hash you want to expand
504 * newhash The hash structure for the new table
505 *
506 * Returns:
507 * Nothing
508 *
509 * Discussion:
510 */
511static int
512hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
513{
514 uma_slab_t slab;
515 int hval;
516 int i;
517
518 if (!newhash->uh_slab_hash)
519 return (0);
520
521 if (oldhash->uh_hashsize >= newhash->uh_hashsize)
522 return (0);
523
524 /*
525 * I need to investigate hash algorithms for resizing without a
526 * full rehash.
527 */
528
529 for (i = 0; i < oldhash->uh_hashsize; i++)
530 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
531 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
532 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
533 hval = UMA_HASH(newhash, slab->us_data);
534 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
535 slab, us_hlink);
536 }
537
538 return (1);
539}
540
541/*
542 * Free the hash bucket to the appropriate backing store.
543 *
544 * Arguments:
545 * slab_hash The hash bucket we're freeing
546 * hashsize The number of entries in that hash bucket
547 *
548 * Returns:
549 * Nothing
550 */
551static void
552hash_free(struct uma_hash *hash)
553{
554 if (hash->uh_slab_hash == NULL)
555 return;
556 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
557 uma_zfree_internal(hashzone,
558 hash->uh_slab_hash, NULL, SKIP_NONE);
559 else
560 free(hash->uh_slab_hash, M_UMAHASH);
561}
562
563/*
564 * Frees all outstanding items in a bucket
565 *
566 * Arguments:
567 * zone The zone to free to, must be unlocked.
568 * bucket The free/alloc bucket with items, cpu queue must be locked.
569 *
570 * Returns:
571 * Nothing
572 */
573
574static void
575bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
576{
577 uma_slab_t slab;
578 int mzone;
579 void *item;
580
581 if (bucket == NULL)
582 return;
583
584 slab = NULL;
585 mzone = 0;
586
587 /* We have to lookup the slab again for malloc.. */
588 if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC)
589 mzone = 1;
590
591 while (bucket->ub_cnt > 0) {
592 bucket->ub_cnt--;
593 item = bucket->ub_bucket[bucket->ub_cnt];
594#ifdef INVARIANTS
595 bucket->ub_bucket[bucket->ub_cnt] = NULL;
596 KASSERT(item != NULL,
597 ("bucket_drain: botched ptr, item is NULL"));
598#endif
599 /*
600 * This is extremely inefficient. The slab pointer was passed
601 * to uma_zfree_arg, but we lost it because the buckets don't
602 * hold them. This will go away when free() gets a size passed
603 * to it.
604 */
605 if (mzone)
606 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
607 uma_zfree_internal(zone, item, slab, SKIP_DTOR);
608 }
609}
610
611/*
612 * Drains the per cpu caches for a zone.
613 *
614 * Arguments:
615 * zone The zone to drain, must be unlocked.
616 *
617 * Returns:
618 * Nothing
619 */
620static void
621cache_drain(uma_zone_t zone)
622{
623 uma_cache_t cache;
624 int cpu;
625
626 /*
627 * We have to lock each cpu cache before locking the zone
628 */
629 for (cpu = 0; cpu <= mp_maxid; cpu++) {
630 if (CPU_ABSENT(cpu))
631 continue;
632 CPU_LOCK(cpu);
633 cache = &zone->uz_cpu[cpu];
634 bucket_drain(zone, cache->uc_allocbucket);
635 bucket_drain(zone, cache->uc_freebucket);
636 if (cache->uc_allocbucket != NULL)
637 bucket_free(cache->uc_allocbucket);
638 if (cache->uc_freebucket != NULL)
639 bucket_free(cache->uc_freebucket);
640 cache->uc_allocbucket = cache->uc_freebucket = NULL;
641 }
642 ZONE_LOCK(zone);
643 bucket_cache_drain(zone);
644 ZONE_UNLOCK(zone);
645 for (cpu = 0; cpu <= mp_maxid; cpu++) {
646 if (CPU_ABSENT(cpu))
647 continue;
648 CPU_UNLOCK(cpu);
649 }
650}
651
652/*
653 * Drain the cached buckets from a zone. Expects a locked zone on entry.
654 */
655static void
656bucket_cache_drain(uma_zone_t zone)
657{
658 uma_bucket_t bucket;
659
660 /*
661 * Drain the bucket queues and free the buckets, we just keep two per
662 * cpu (alloc/free).
663 */
664 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
665 LIST_REMOVE(bucket, ub_link);
666 ZONE_UNLOCK(zone);
667 bucket_drain(zone, bucket);
668 bucket_free(bucket);
669 ZONE_LOCK(zone);
670 }
671
672 /* Now we do the free queue.. */
673 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
674 LIST_REMOVE(bucket, ub_link);
675 bucket_free(bucket);
676 }
677}
678
679/*
680 * Frees pages from a zone back to the system. This is done on demand from
681 * the pageout daemon.
682 *
683 * Arguments:
684 * zone The zone to free pages from
685 * all Should we drain all items?
686 *
687 * Returns:
688 * Nothing.
689 */
690static void
691zone_drain(uma_zone_t zone)
692{
693 struct slabhead freeslabs = {};
694 uma_keg_t keg;
695 uma_slab_t slab;
696 uma_slab_t n;
697 u_int8_t flags;
698 u_int8_t *mem;
699 int i;
700
701 keg = zone->uz_keg;
702
703 /*
704 * We don't want to take pages from statically allocated zones at this
705 * time
706 */
707 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
708 return;
709
710 ZONE_LOCK(zone);
711
712#ifdef UMA_DEBUG
713 printf("%s free items: %u\n", zone->uz_name, keg->uk_free);
714#endif
715 bucket_cache_drain(zone);
716 if (keg->uk_free == 0)
717 goto finished;
718
719 slab = LIST_FIRST(&keg->uk_free_slab);
720 while (slab) {
721 n = LIST_NEXT(slab, us_link);
722
723 /* We have no where to free these to */
724 if (slab->us_flags & UMA_SLAB_BOOT) {
725 slab = n;
726 continue;
727 }
728
729 LIST_REMOVE(slab, us_link);
730 keg->uk_pages -= keg->uk_ppera;
731 keg->uk_free -= keg->uk_ipers;
732
733 if (keg->uk_flags & UMA_ZONE_HASH)
734 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
735
736 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
737
738 slab = n;
739 }
740finished:
741 ZONE_UNLOCK(zone);
742
743 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
744 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
745 if (keg->uk_fini)
746 for (i = 0; i < keg->uk_ipers; i++)
747 keg->uk_fini(
748 slab->us_data + (keg->uk_rsize * i),
749 keg->uk_size);
750 flags = slab->us_flags;
751 mem = slab->us_data;
752
753 if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
754 (keg->uk_flags & UMA_ZONE_REFCNT)) {
755 vm_object_t obj;
756
757 if (flags & UMA_SLAB_KMEM)
758 obj = kmem_object;
759 else
760 obj = NULL;
761 for (i = 0; i < keg->uk_ppera; i++)
762 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
763 obj);
764 }
765 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
766 uma_zfree_internal(keg->uk_slabzone, slab, NULL,
767 SKIP_NONE);
768#ifdef UMA_DEBUG
769 printf("%s: Returning %d bytes.\n",
770 zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera);
771#endif
772 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
773 }
774}
775
776/*
777 * Allocate a new slab for a zone. This does not insert the slab onto a list.
778 *
779 * Arguments:
780 * zone The zone to allocate slabs for
781 * wait Shall we wait?
782 *
783 * Returns:
784 * The slab that was allocated or NULL if there is no memory and the
785 * caller specified M_NOWAIT.
786 */
787static uma_slab_t
788slab_zalloc(uma_zone_t zone, int wait)
789{
790 uma_slabrefcnt_t slabref;
791 uma_slab_t slab;
792 uma_keg_t keg;
793 u_int8_t *mem;
794 u_int8_t flags;
795 int i;
796
797 slab = NULL;
798 keg = zone->uz_keg;
799
800#ifdef UMA_DEBUG
801 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name);
802#endif
803 ZONE_UNLOCK(zone);
804
805 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
806 slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait);
807 if (slab == NULL) {
808 ZONE_LOCK(zone);
809 return NULL;
810 }
811 }
812
813 /*
814 * This reproduces the old vm_zone behavior of zero filling pages the
815 * first time they are added to a zone.
816 *
817 * Malloced items are zeroed in uma_zalloc.
818 */
819
820 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
821 wait |= M_ZERO;
822 else
823 wait &= ~M_ZERO;
824
825 mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE,
826 &flags, wait);
827 if (mem == NULL) {
828 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
829 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 0);
830 ZONE_LOCK(zone);
831 return (NULL);
832 }
833
834 /* Point the slab into the allocated memory */
835 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
836 slab = (uma_slab_t )(mem + keg->uk_pgoff);
837
838 if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
839 (keg->uk_flags & UMA_ZONE_REFCNT))
840 for (i = 0; i < keg->uk_ppera; i++)
841 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
842
843 slab->us_keg = keg;
844 slab->us_data = mem;
845 slab->us_freecount = keg->uk_ipers;
846 slab->us_firstfree = 0;
847 slab->us_flags = flags;
848
849 if (keg->uk_flags & UMA_ZONE_REFCNT) {
850 slabref = (uma_slabrefcnt_t)slab;
851 for (i = 0; i < keg->uk_ipers; i++) {
852 slabref->us_freelist[i].us_refcnt = 0;
853 slabref->us_freelist[i].us_item = i+1;
854 }
855 } else {
856 for (i = 0; i < keg->uk_ipers; i++)
857 slab->us_freelist[i].us_item = i+1;
858 }
859
860 if (keg->uk_init != NULL) {
861 for (i = 0; i < keg->uk_ipers; i++)
862 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
863 keg->uk_size, wait) != 0)
864 break;
865 if (i != keg->uk_ipers) {
866 if (keg->uk_fini != NULL) {
867 for (i--; i > -1; i--)
868 keg->uk_fini(slab->us_data +
869 (keg->uk_rsize * i),
870 keg->uk_size);
871 }
872 if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
873 (keg->uk_flags & UMA_ZONE_REFCNT))
874 for (i = 0; i < keg->uk_ppera; i++)
875 vsetobj((vm_offset_t)mem +
876 (i * PAGE_SIZE), NULL);
877 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
878 uma_zfree_internal(keg->uk_slabzone, slab,
879 NULL, SKIP_NONE);
880 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
881 flags);
882 ZONE_LOCK(zone);
883 return (NULL);
884 }
885 }
886 ZONE_LOCK(zone);
887
888 if (keg->uk_flags & UMA_ZONE_HASH)
889 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
890
891 keg->uk_pages += keg->uk_ppera;
892 keg->uk_free += keg->uk_ipers;
893
894 return (slab);
895}
896
897/*
898 * This function is intended to be used early on in place of page_alloc() so
899 * that we may use the boot time page cache to satisfy allocations before
900 * the VM is ready.
901 */
902static void *
903startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
904{
905 uma_keg_t keg;
906
907 keg = zone->uz_keg;
908
909 /*
910 * Check our small startup cache to see if it has pages remaining.
911 */
912 mtx_lock(&uma_mtx);
913 if (uma_boot_free != 0) {
914 uma_slab_t tmps;
915
916 tmps = LIST_FIRST(&uma_boot_pages);
917 LIST_REMOVE(tmps, us_link);
918 uma_boot_free--;
919 mtx_unlock(&uma_mtx);
920 *pflag = tmps->us_flags;
921 return (tmps->us_data);
922 }
923 mtx_unlock(&uma_mtx);
924 if (booted == 0)
925 panic("UMA: Increase UMA_BOOT_PAGES");
926 /*
927 * Now that we've booted reset these users to their real allocator.
928 */
929#ifdef UMA_MD_SMALL_ALLOC
930 keg->uk_allocf = uma_small_alloc;
931#else
932 keg->uk_allocf = page_alloc;
933#endif
934 return keg->uk_allocf(zone, bytes, pflag, wait);
935}
936
937/*
938 * Allocates a number of pages from the system
939 *
940 * Arguments:
941 * zone Unused
942 * bytes The number of bytes requested
943 * wait Shall we wait?
944 *
945 * Returns:
946 * A pointer to the alloced memory or possibly
947 * NULL if M_NOWAIT is set.
948 */
949static void *
950page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
951{
952 void *p; /* Returned page */
953
954 *pflag = UMA_SLAB_KMEM;
955 p = (void *) kmem_malloc(kmem_map, bytes, wait);
956
957 return (p);
958}
959
960/*
961 * Allocates a number of pages from within an object
962 *
963 * Arguments:
964 * zone Unused
965 * bytes The number of bytes requested
966 * wait Shall we wait?
967 *
968 * Returns:
969 * A pointer to the alloced memory or possibly
970 * NULL if M_NOWAIT is set.
971 */
972static void *
973obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
974{
975 vm_object_t object;
976 vm_offset_t retkva, zkva;
977 vm_page_t p;
978 int pages, startpages;
979
980 object = zone->uz_keg->uk_obj;
981 retkva = 0;
982
983 /*
984 * This looks a little weird since we're getting one page at a time.
985 */
986 VM_OBJECT_LOCK(object);
987 p = TAILQ_LAST(&object->memq, pglist);
988 pages = p != NULL ? p->pindex + 1 : 0;
989 startpages = pages;
990 zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE;
991 for (; bytes > 0; bytes -= PAGE_SIZE) {
992 p = vm_page_alloc(object, pages,
993 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
994 if (p == NULL) {
995 if (pages != startpages)
996 pmap_qremove(retkva, pages - startpages);
997 while (pages != startpages) {
998 pages--;
999 p = TAILQ_LAST(&object->memq, pglist);
1000 vm_page_lock_queues();
1001 vm_page_unwire(p, 0);
1002 vm_page_free(p);
1003 vm_page_unlock_queues();
1004 }
1005 retkva = 0;
1006 goto done;
1007 }
1008 pmap_qenter(zkva, &p, 1);
1009 if (retkva == 0)
1010 retkva = zkva;
1011 zkva += PAGE_SIZE;
1012 pages += 1;
1013 }
1014done:
1015 VM_OBJECT_UNLOCK(object);
1016 *flags = UMA_SLAB_PRIV;
1017
1018 return ((void *)retkva);
1019}
1020
1021/*
1022 * Frees a number of pages to the system
1023 *
1024 * Arguments:
1025 * mem A pointer to the memory to be freed
1026 * size The size of the memory being freed
1027 * flags The original p->us_flags field
1028 *
1029 * Returns:
1030 * Nothing
1031 */
1032static void
1033page_free(void *mem, int size, u_int8_t flags)
1034{
1035 vm_map_t map;
1036
1037 if (flags & UMA_SLAB_KMEM)
1038 map = kmem_map;
1039 else
1040 panic("UMA: page_free used with invalid flags %d\n", flags);
1041
1042 kmem_free(map, (vm_offset_t)mem, size);
1043}
1044
1045/*
1046 * Zero fill initializer
1047 *
1048 * Arguments/Returns follow uma_init specifications
1049 */
1050static int
1051zero_init(void *mem, int size, int flags)
1052{
1053 bzero(mem, size);
1054 return (0);
1055}
1056
1057/*
1058 * Finish creating a small uma zone. This calculates ipers, and the zone size.
1059 *
1060 * Arguments
1061 * zone The zone we should initialize
1062 *
1063 * Returns
1064 * Nothing
1065 */
1066static void
1067zone_small_init(uma_zone_t zone)
1068{
1069 uma_keg_t keg;
1070 u_int rsize;
1071 u_int memused;
1072 u_int wastedspace;
1073 u_int shsize;
1074
1075 keg = zone->uz_keg;
1076 KASSERT(keg != NULL, ("Keg is null in zone_small_init"));
1077 rsize = keg->uk_size;
1078
1079 if (rsize < UMA_SMALLEST_UNIT)
1080 rsize = UMA_SMALLEST_UNIT;
1081 if (rsize & keg->uk_align)
1082 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1083
1084 keg->uk_rsize = rsize;
1085 keg->uk_ppera = 1;
1086
1087 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1088 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */
1089 shsize = sizeof(struct uma_slab_refcnt);
1090 } else {
1091 rsize += UMA_FRITM_SZ; /* Account for linkage */
1092 shsize = sizeof(struct uma_slab);
1093 }
1094
1095 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
1096 KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0"));
1097 memused = keg->uk_ipers * rsize + shsize;
1098 wastedspace = UMA_SLAB_SIZE - memused;
1099
1100 /*
1101 * We can't do OFFPAGE if we're internal or if we've been
1102 * asked to not go to the VM for buckets. If we do this we
1103 * may end up going to the VM (kmem_map) for slabs which we
1104 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1105 * result of UMA_ZONE_VM, which clearly forbids it.
1106 */
1107 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1108 (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1109 return;
1110
1111 if ((wastedspace >= UMA_MAX_WASTE) &&
1112 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
1113 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
1114 KASSERT(keg->uk_ipers <= 255,
1115 ("zone_small_init: keg->uk_ipers too high!"));
1116#ifdef UMA_DEBUG
1117 printf("UMA decided we need offpage slab headers for "
1118 "zone: %s, calculated wastedspace = %d, "
1119 "maximum wasted space allowed = %d, "
1120 "calculated ipers = %d, "
1121 "new wasted space = %d\n", zone->uz_name, wastedspace,
1122 UMA_MAX_WASTE, keg->uk_ipers,
1123 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
1124#endif
1125 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1126 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1127 keg->uk_flags |= UMA_ZONE_HASH;
1128 }
1129}
1130
1131/*
1132 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do
1133 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
1134 * more complicated.
1135 *
1136 * Arguments
1137 * zone The zone we should initialize
1138 *
1139 * Returns
1140 * Nothing
1141 */
1142static void
1143zone_large_init(uma_zone_t zone)
1144{
1145 uma_keg_t keg;
1146 int pages;
1147
1148 keg = zone->uz_keg;
1149
1150 KASSERT(keg != NULL, ("Keg is null in zone_large_init"));
1151 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1152 ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone"));
1153
1154 pages = keg->uk_size / UMA_SLAB_SIZE;
1155
1156 /* Account for remainder */
1157 if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
1158 pages++;
1159
1160 keg->uk_ppera = pages;
1161 keg->uk_ipers = 1;
1162
1163 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1164 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1165 keg->uk_flags |= UMA_ZONE_HASH;
1166
1167 keg->uk_rsize = keg->uk_size;
1168}
1169
1170/*
1171 * Keg header ctor. This initializes all fields, locks, etc. And inserts
1172 * the keg onto the global keg list.
1173 *
1174 * Arguments/Returns follow uma_ctor specifications
1175 * udata Actually uma_kctor_args
1176 */
1177static int
1178keg_ctor(void *mem, int size, void *udata, int flags)
1179{
1180 struct uma_kctor_args *arg = udata;
1181 uma_keg_t keg = mem;
1182 uma_zone_t zone;
1183
1184 bzero(keg, size);
1185 keg->uk_size = arg->size;
1186 keg->uk_init = arg->uminit;
1187 keg->uk_fini = arg->fini;
1188 keg->uk_align = arg->align;
1189 keg->uk_free = 0;
1190 keg->uk_pages = 0;
1191 keg->uk_flags = arg->flags;
1192 keg->uk_allocf = page_alloc;
1193 keg->uk_freef = page_free;
1194 keg->uk_recurse = 0;
1195 keg->uk_slabzone = NULL;
1196
1197 /*
1198 * The master zone is passed to us at keg-creation time.
1199 */
1200 zone = arg->zone;
1201 zone->uz_keg = keg;
1202
1203 if (arg->flags & UMA_ZONE_VM)
1204 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1205
1206 if (arg->flags & UMA_ZONE_ZINIT)
1207 keg->uk_init = zero_init;
1208
1209 /*
1210 * The +UMA_FRITM_SZ added to uk_size is to account for the
1211 * linkage that is added to the size in zone_small_init(). If
1212 * we don't account for this here then we may end up in
1213 * zone_small_init() with a calculated 'ipers' of 0.
1214 */
1215 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1216 if ((keg->uk_size+UMA_FRITMREF_SZ) >
1217 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
1218 zone_large_init(zone);
1219 else
1220 zone_small_init(zone);
1221 } else {
1222 if ((keg->uk_size+UMA_FRITM_SZ) >
1223 (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1224 zone_large_init(zone);
1225 else
1226 zone_small_init(zone);
1227 }
1228
1229 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1230 if (keg->uk_flags & UMA_ZONE_REFCNT)
1231 keg->uk_slabzone = slabrefzone;
1232 else
1233 keg->uk_slabzone = slabzone;
1234 }
1235
1236 /*
1237 * If we haven't booted yet we need allocations to go through the
1238 * startup cache until the vm is ready.
1239 */
1240 if (keg->uk_ppera == 1) {
1241#ifdef UMA_MD_SMALL_ALLOC
1242 keg->uk_allocf = uma_small_alloc;
1243 keg->uk_freef = uma_small_free;
1244#endif
1245 if (booted == 0)
1246 keg->uk_allocf = startup_alloc;
1247 }
1248
1249 /*
1250 * Initialize keg's lock (shared among zones) through
1251 * Master zone
1252 */
1253 zone->uz_lock = &keg->uk_lock;
1254 if (arg->flags & UMA_ZONE_MTXCLASS)
1255 ZONE_LOCK_INIT(zone, 1);
1256 else
1257 ZONE_LOCK_INIT(zone, 0);
1258
1259 /*
1260 * If we're putting the slab header in the actual page we need to
1261 * figure out where in each page it goes. This calculates a right
1262 * justified offset into the memory on an ALIGN_PTR boundary.
1263 */
1264 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1265 u_int totsize;
1266
1267 /* Size of the slab struct and free list */
1268 if (keg->uk_flags & UMA_ZONE_REFCNT)
1269 totsize = sizeof(struct uma_slab_refcnt) +
1270 keg->uk_ipers * UMA_FRITMREF_SZ;
1271 else
1272 totsize = sizeof(struct uma_slab) +
1273 keg->uk_ipers * UMA_FRITM_SZ;
1274
1275 if (totsize & UMA_ALIGN_PTR)
1276 totsize = (totsize & ~UMA_ALIGN_PTR) +
1277 (UMA_ALIGN_PTR + 1);
1278 keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
1279
1280 if (keg->uk_flags & UMA_ZONE_REFCNT)
1281 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
1282 + keg->uk_ipers * UMA_FRITMREF_SZ;
1283 else
1284 totsize = keg->uk_pgoff + sizeof(struct uma_slab)
1285 + keg->uk_ipers * UMA_FRITM_SZ;
1286
1287 /*
1288 * The only way the following is possible is if with our
1289 * UMA_ALIGN_PTR adjustments we are now bigger than
1290 * UMA_SLAB_SIZE. I haven't checked whether this is
1291 * mathematically possible for all cases, so we make
1292 * sure here anyway.
1293 */
1294 if (totsize > UMA_SLAB_SIZE) {
1295 printf("zone %s ipers %d rsize %d size %d\n",
1296 zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1297 keg->uk_size);
1298 panic("UMA slab won't fit.\n");
1299 }
1300 }
1301
1302 if (keg->uk_flags & UMA_ZONE_HASH)
1303 hash_alloc(&keg->uk_hash);
1304
1305#ifdef UMA_DEBUG
1306 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
1307 zone->uz_name, zone,
1308 keg->uk_size, keg->uk_ipers,
1309 keg->uk_ppera, keg->uk_pgoff);
1310#endif
1311
1312 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1313
1314 mtx_lock(&uma_mtx);
1315 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1316 mtx_unlock(&uma_mtx);
1317 return (0);
1318}
1319
1320/*
1321 * Zone header ctor. This initializes all fields, locks, etc.
1322 *
1323 * Arguments/Returns follow uma_ctor specifications
1324 * udata Actually uma_zctor_args
1325 */
1326
1327static int
1328zone_ctor(void *mem, int size, void *udata, int flags)
1329{
1330 struct uma_zctor_args *arg = udata;
1331 uma_zone_t zone = mem;
1332 uma_zone_t z;
1333 uma_keg_t keg;
1334
1335 bzero(zone, size);
1336 zone->uz_name = arg->name;
1337 zone->uz_ctor = arg->ctor;
1338 zone->uz_dtor = arg->dtor;
1339 zone->uz_init = NULL;
1340 zone->uz_fini = NULL;
1341 zone->uz_allocs = 0;
1342 zone->uz_fills = zone->uz_count = 0;
1343
1344 if (arg->flags & UMA_ZONE_SECONDARY) {
1345 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1346 keg = arg->keg;
1347 zone->uz_keg = keg;
1348 zone->uz_init = arg->uminit;
1349 zone->uz_fini = arg->fini;
1350 zone->uz_lock = &keg->uk_lock;
1351 mtx_lock(&uma_mtx);
1352 ZONE_LOCK(zone);
1353 keg->uk_flags |= UMA_ZONE_SECONDARY;
1354 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1355 if (LIST_NEXT(z, uz_link) == NULL) {
1356 LIST_INSERT_AFTER(z, zone, uz_link);
1357 break;
1358 }
1359 }
1360 ZONE_UNLOCK(zone);
1361 mtx_unlock(&uma_mtx);
1362 } else if (arg->keg == NULL) {
1363 if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1364 arg->align, arg->flags) == NULL)
1365 return (ENOMEM);
1366 } else {
1367 struct uma_kctor_args karg;
1368 int error;
1369
1370 /* We should only be here from uma_startup() */
1371 karg.size = arg->size;
1372 karg.uminit = arg->uminit;
1373 karg.fini = arg->fini;
1374 karg.align = arg->align;
1375 karg.flags = arg->flags;
1376 karg.zone = zone;
1377 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1378 flags);
1379 if (error)
1380 return (error);
1381 }
1382 keg = zone->uz_keg;
1383 zone->uz_lock = &keg->uk_lock;
1384
1385 /*
1386 * Some internal zones don't have room allocated for the per cpu
1387 * caches. If we're internal, bail out here.
1388 */
1389 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1390 KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0,
1391 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1392 return (0);
1393 }
1394
1395 if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
1396 zone->uz_count = BUCKET_MAX;
1397 else if (keg->uk_ipers <= BUCKET_MAX)
1398 zone->uz_count = keg->uk_ipers;
1399 else
1400 zone->uz_count = BUCKET_MAX;
1401 return (0);
1402}
1403
1404/*
1405 * Keg header dtor. This frees all data, destroys locks, frees the hash
1406 * table and removes the keg from the global list.
1407 *
1408 * Arguments/Returns follow uma_dtor specifications
1409 * udata unused
1410 */
1411static void
1412keg_dtor(void *arg, int size, void *udata)
1413{
1414 uma_keg_t keg;
1415
1416 keg = (uma_keg_t)arg;
1417 mtx_lock(&keg->uk_lock);
1418 if (keg->uk_free != 0) {
1419 printf("Freed UMA keg was not empty (%d items). "
1420 " Lost %d pages of memory.\n",
1421 keg->uk_free, keg->uk_pages);
1422 }
1423 mtx_unlock(&keg->uk_lock);
1424
1425 if (keg->uk_flags & UMA_ZONE_HASH)
1426 hash_free(&keg->uk_hash);
1427
1428 mtx_destroy(&keg->uk_lock);
1429}
1430
1431/*
1432 * Zone header dtor.
1433 *
1434 * Arguments/Returns follow uma_dtor specifications
1435 * udata unused
1436 */
1437static void
1438zone_dtor(void *arg, int size, void *udata)
1439{
1440 uma_zone_t zone;
1441 uma_keg_t keg;
1442
1443 zone = (uma_zone_t)arg;
1444 keg = zone->uz_keg;
1445
1446 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL))
1447 cache_drain(zone);
1448
1449 mtx_lock(&uma_mtx);
1450 zone_drain(zone);
1451 if (keg->uk_flags & UMA_ZONE_SECONDARY) {
1452 LIST_REMOVE(zone, uz_link);
1453 /*
1454 * XXX there are some races here where
1455 * the zone can be drained but zone lock
1456 * released and then refilled before we
1457 * remove it... we dont care for now
1458 */
1459 ZONE_LOCK(zone);
1460 if (LIST_EMPTY(&keg->uk_zones))
1461 keg->uk_flags &= ~UMA_ZONE_SECONDARY;
1462 ZONE_UNLOCK(zone);
1463 mtx_unlock(&uma_mtx);
1464 } else {
1465 LIST_REMOVE(keg, uk_link);
1466 LIST_REMOVE(zone, uz_link);
1467 mtx_unlock(&uma_mtx);
1468 uma_zfree_internal(kegs, keg, NULL, SKIP_NONE);
1469 }
1470 zone->uz_keg = NULL;
1471}
1472
1473/*
1474 * Traverses every zone in the system and calls a callback
1475 *
1476 * Arguments:
1477 * zfunc A pointer to a function which accepts a zone
1478 * as an argument.
1479 *
1480 * Returns:
1481 * Nothing
1482 */
1483static void
1484zone_foreach(void (*zfunc)(uma_zone_t))
1485{
1486 uma_keg_t keg;
1487 uma_zone_t zone;
1488
1489 mtx_lock(&uma_mtx);
1490 LIST_FOREACH(keg, &uma_kegs, uk_link) {
1491 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1492 zfunc(zone);
1493 }
1494 mtx_unlock(&uma_mtx);
1495}
1496
1497/* Public functions */
1498/* See uma.h */
1499void
1500uma_startup(void *bootmem)
1501{
1502 struct uma_zctor_args args;
1503 uma_slab_t slab;
1504 u_int slabsize;
1505 u_int objsize, totsize, wsize;
1506 int i;
1507
1508#ifdef UMA_DEBUG
1509 printf("Creating uma keg headers zone and keg.\n");
1510#endif
1511 /*
1512 * The general UMA lock is a recursion-allowed lock because
1513 * there is a code path where, while we're still configured
1514 * to use startup_alloc() for backend page allocations, we
1515 * may end up in uma_reclaim() which calls zone_foreach(zone_drain),
1516 * which grabs uma_mtx, only to later call into startup_alloc()
1517 * because while freeing we needed to allocate a bucket. Since
1518 * startup_alloc() also takes uma_mtx, we need to be able to
1519 * recurse on it.
1520 */
1521 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE);
1522
1523 /*
1524 * Figure out the maximum number of items-per-slab we'll have if
1525 * we're using the OFFPAGE slab header to track free items, given
1526 * all possible object sizes and the maximum desired wastage
1527 * (UMA_MAX_WASTE).
1528 *
1529 * We iterate until we find an object size for
1530 * which the calculated wastage in zone_small_init() will be
1531 * enough to warrant OFFPAGE. Since wastedspace versus objsize
1532 * is an overall increasing see-saw function, we find the smallest
1533 * objsize such that the wastage is always acceptable for objects
1534 * with that objsize or smaller. Since a smaller objsize always
1535 * generates a larger possible uma_max_ipers, we use this computed
1536 * objsize to calculate the largest ipers possible. Since the
1537 * ipers calculated for OFFPAGE slab headers is always larger than
1538 * the ipers initially calculated in zone_small_init(), we use
1539 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
1540 * obtain the maximum ipers possible for offpage slab headers.
1541 *
1542 * It should be noted that ipers versus objsize is an inversly
1543 * proportional function which drops off rather quickly so as
1544 * long as our UMA_MAX_WASTE is such that the objsize we calculate
1545 * falls into the portion of the inverse relation AFTER the steep
1546 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
1547 *
1548 * Note that we have 8-bits (1 byte) to use as a freelist index
1549 * inside the actual slab header itself and this is enough to
1550 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized
1551 * object with offpage slab header would have ipers =
1552 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
1553 * 1 greater than what our byte-integer freelist index can
1554 * accomodate, but we know that this situation never occurs as
1555 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
1556 * that we need to go to offpage slab headers. Or, if we do,
1557 * then we trap that condition below and panic in the INVARIANTS case.
1558 */
1559 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
1560 totsize = wsize;
1561 objsize = UMA_SMALLEST_UNIT;
1562 while (totsize >= wsize) {
1563 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
1564 (objsize + UMA_FRITM_SZ);
1565 totsize *= (UMA_FRITM_SZ + objsize);
1566 objsize++;
1567 }
1568 if (objsize > UMA_SMALLEST_UNIT)
1569 objsize--;
1570 uma_max_ipers = UMA_SLAB_SIZE / objsize;
1571
1572 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
1573 totsize = wsize;
1574 objsize = UMA_SMALLEST_UNIT;
1575 while (totsize >= wsize) {
1576 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
1577 (objsize + UMA_FRITMREF_SZ);
1578 totsize *= (UMA_FRITMREF_SZ + objsize);
1579 objsize++;
1580 }
1581 if (objsize > UMA_SMALLEST_UNIT)
1582 objsize--;
1583 uma_max_ipers_ref = UMA_SLAB_SIZE / objsize;
1584
1585 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
1586 ("uma_startup: calculated uma_max_ipers values too large!"));
1587
1588#ifdef UMA_DEBUG
1589 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
1590 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
1591 uma_max_ipers_ref);
1592#endif
1593
1594 /* "manually" create the initial zone */
1595 args.name = "UMA Kegs";
1596 args.size = sizeof(struct uma_keg);
1597 args.ctor = keg_ctor;
1598 args.dtor = keg_dtor;
1599 args.uminit = zero_init;
1600 args.fini = NULL;
1601 args.keg = &masterkeg;
1602 args.align = 32 - 1;
1603 args.flags = UMA_ZFLAG_INTERNAL;
1604 /* The initial zone has no Per cpu queues so it's smaller */
1605 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1606
1607#ifdef UMA_DEBUG
1608 printf("Filling boot free list.\n");
1609#endif
1610 for (i = 0; i < UMA_BOOT_PAGES; i++) {
1611 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1612 slab->us_data = (u_int8_t *)slab;
1613 slab->us_flags = UMA_SLAB_BOOT;
1614 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1615 uma_boot_free++;
1616 }
1617
1618#ifdef UMA_DEBUG
1619 printf("Creating uma zone headers zone and keg.\n");
1620#endif
1621 args.name = "UMA Zones";
1622 args.size = sizeof(struct uma_zone) +
1623 (sizeof(struct uma_cache) * (mp_maxid + 1));
1624 args.ctor = zone_ctor;
1625 args.dtor = zone_dtor;
1626 args.uminit = zero_init;
1627 args.fini = NULL;
1628 args.keg = NULL;
1629 args.align = 32 - 1;
1630 args.flags = UMA_ZFLAG_INTERNAL;
1631 /* The initial zone has no Per cpu queues so it's smaller */
1632 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1633
1634#ifdef UMA_DEBUG
1635 printf("Initializing pcpu cache locks.\n");
1636#endif
1637 /* Initialize the pcpu cache lock set once and for all */
1638 for (i = 0; i <= mp_maxid; i++)
1639 CPU_LOCK_INIT(i);
1640
1641#ifdef UMA_DEBUG
1642 printf("Creating slab and hash zones.\n");
1643#endif
1644
1645 /*
1646 * This is the max number of free list items we'll have with
1647 * offpage slabs.
1648 */
1649 slabsize = uma_max_ipers * UMA_FRITM_SZ;
1650 slabsize += sizeof(struct uma_slab);
1651
1652 /* Now make a zone for slab headers */
1653 slabzone = uma_zcreate("UMA Slabs",
1654 slabsize,
1655 NULL, NULL, NULL, NULL,
1656 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1657
1658 /*
1659 * We also create a zone for the bigger slabs with reference
1660 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1661 */
1662 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
1663 slabsize += sizeof(struct uma_slab_refcnt);
1664 slabrefzone = uma_zcreate("UMA RCntSlabs",
1665 slabsize,
1666 NULL, NULL, NULL, NULL,
1667 UMA_ALIGN_PTR,
1668 UMA_ZFLAG_INTERNAL);
1669
1670 hashzone = uma_zcreate("UMA Hash",
1671 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1672 NULL, NULL, NULL, NULL,
1673 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1674
1675 bucket_init();
1676
1677#ifdef UMA_MD_SMALL_ALLOC
1678 booted = 1;
1679#endif
1680
1681#ifdef UMA_DEBUG
1682 printf("UMA startup complete.\n");
1683#endif
1684}
1685
1686/* see uma.h */
1687void
1688uma_startup2(void)
1689{
1690 booted = 1;
1691 bucket_enable();
1692#ifdef UMA_DEBUG
1693 printf("UMA startup2 complete.\n");
1694#endif
1695}
1696
1697/*
1698 * Initialize our callout handle
1699 *
1700 */
1701
1702static void
1703uma_startup3(void)
1704{
1705#ifdef UMA_DEBUG
1706 printf("Starting callout.\n");
1707#endif
1708 callout_init(&uma_callout, CALLOUT_MPSAFE);
1709 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1710#ifdef UMA_DEBUG
1711 printf("UMA startup3 complete.\n");
1712#endif
1713}
1714
1715static uma_zone_t
1716uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1717 int align, u_int16_t flags)
1718{
1719 struct uma_kctor_args args;
1720
1721 args.size = size;
1722 args.uminit = uminit;
1723 args.fini = fini;
1724 args.align = align;
1725 args.flags = flags;
1726 args.zone = zone;
1727 return (uma_zalloc_internal(kegs, &args, M_WAITOK));
1728}
1729
1730/* See uma.h */
1731uma_zone_t
1732uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1733 uma_init uminit, uma_fini fini, int align, u_int16_t flags)
1734
1735{
1736 struct uma_zctor_args args;
1737
1738 /* This stuff is essential for the zone ctor */
1739 args.name = name;
1740 args.size = size;
1741 args.ctor = ctor;
1742 args.dtor = dtor;
1743 args.uminit = uminit;
1744 args.fini = fini;
1745 args.align = align;
1746 args.flags = flags;
1747 args.keg = NULL;
1748
1749 return (uma_zalloc_internal(zones, &args, M_WAITOK));
1750}
1751
1752/* See uma.h */
1753uma_zone_t
1754uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1755 uma_init zinit, uma_fini zfini, uma_zone_t master)
1756{
1757 struct uma_zctor_args args;
1758
1759 args.name = name;
1760 args.size = master->uz_keg->uk_size;
1761 args.ctor = ctor;
1762 args.dtor = dtor;
1763 args.uminit = zinit;
1764 args.fini = zfini;
1765 args.align = master->uz_keg->uk_align;
1766 args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY;
1767 args.keg = master->uz_keg;
1768
1769 return (uma_zalloc_internal(zones, &args, M_WAITOK));
1770}
1771
1772/* See uma.h */
1773void
1774uma_zdestroy(uma_zone_t zone)
1775{
1776 uma_zfree_internal(zones, zone, NULL, SKIP_NONE);
1777}
1778
1779/* See uma.h */
1780void *
1781uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1782{
1783 void *item;
1784 uma_cache_t cache;
1785 uma_bucket_t bucket;
1786 int cpu;
1787 int badness;
1788
1789 /* This is the fast path allocation */
1790#ifdef UMA_DEBUG_ALLOC_1
1791 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1792#endif
1793 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
1794 zone->uz_name, flags);
1795
1796 if (!(flags & M_NOWAIT)) {
1797 KASSERT(curthread->td_intr_nesting_level == 0,
1798 ("malloc(M_WAITOK) in interrupt context"));
1799 if (nosleepwithlocks) {
1800#ifdef WITNESS
1801 badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
1802 NULL,
1803 "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT",
1804 zone->uz_name);
1805#else
1806 badness = 1;
1807#endif
1808 } else {
1809 badness = 0;
1810#ifdef WITNESS
1811 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1812 "malloc(M_WAITOK) of \"%s\"", zone->uz_name);
1813#endif
1814 }
1815 if (badness) {
1816 flags &= ~M_WAITOK;
1817 flags |= M_NOWAIT;
1818 }
1819 }
1820
1821zalloc_restart:
1822 cpu = PCPU_GET(cpuid);
1823 CPU_LOCK(cpu);
1824 cache = &zone->uz_cpu[cpu];
1825
1826zalloc_start:
1827 bucket = cache->uc_allocbucket;
1828
1829 if (bucket) {
1830 if (bucket->ub_cnt > 0) {
1831 bucket->ub_cnt--;
1832 item = bucket->ub_bucket[bucket->ub_cnt];
1833#ifdef INVARIANTS
1834 bucket->ub_bucket[bucket->ub_cnt] = NULL;
1835#endif
1836 KASSERT(item != NULL,
1837 ("uma_zalloc: Bucket pointer mangled."));
1838 cache->uc_allocs++;
1839#ifdef INVARIANTS
1840 ZONE_LOCK(zone);
1841 uma_dbg_alloc(zone, NULL, item);
1842 ZONE_UNLOCK(zone);
1843#endif
1844 CPU_UNLOCK(cpu);
1845 if (zone->uz_ctor != NULL) {
1846 if (zone->uz_ctor(item, zone->uz_keg->uk_size,
1847 udata, flags) != 0) {
1848 uma_zfree_internal(zone, item, udata,
1849 SKIP_DTOR);
1850 return (NULL);
1851 }
1852 }
1853 if (flags & M_ZERO)
1854 bzero(item, zone->uz_keg->uk_size);
1855 return (item);
1856 } else if (cache->uc_freebucket) {
1857 /*
1858 * We have run out of items in our allocbucket.
1859 * See if we can switch with our free bucket.
1860 */
1861 if (cache->uc_freebucket->ub_cnt > 0) {
1862#ifdef UMA_DEBUG_ALLOC
1863 printf("uma_zalloc: Swapping empty with"
1864 " alloc.\n");
1865#endif
1866 bucket = cache->uc_freebucket;
1867 cache->uc_freebucket = cache->uc_allocbucket;
1868 cache->uc_allocbucket = bucket;
1869
1870 goto zalloc_start;
1871 }
1872 }
1873 }
1874 ZONE_LOCK(zone);
1875 /* Since we have locked the zone we may as well send back our stats */
1876 zone->uz_allocs += cache->uc_allocs;
1877 cache->uc_allocs = 0;
1878
1879 /* Our old one is now a free bucket */
1880 if (cache->uc_allocbucket) {
1881 KASSERT(cache->uc_allocbucket->ub_cnt == 0,
1882 ("uma_zalloc_arg: Freeing a non free bucket."));
1883 LIST_INSERT_HEAD(&zone->uz_free_bucket,
1884 cache->uc_allocbucket, ub_link);
1885 cache->uc_allocbucket = NULL;
1886 }
1887
1888 /* Check the free list for a new alloc bucket */
1889 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1890 KASSERT(bucket->ub_cnt != 0,
1891 ("uma_zalloc_arg: Returning an empty bucket."));
1892
1893 LIST_REMOVE(bucket, ub_link);
1894 cache->uc_allocbucket = bucket;
1895 ZONE_UNLOCK(zone);
1896 goto zalloc_start;
1897 }
1898 /* We are no longer associated with this cpu!!! */
1899 CPU_UNLOCK(cpu);
1900
1901 /* Bump up our uz_count so we get here less */
1902 if (zone->uz_count < BUCKET_MAX)
1903 zone->uz_count++;
1904
1905 /*
1906 * Now lets just fill a bucket and put it on the free list. If that
1907 * works we'll restart the allocation from the begining.
1908 */
1909 if (uma_zalloc_bucket(zone, flags)) {
1910 ZONE_UNLOCK(zone);
1911 goto zalloc_restart;
1912 }
1913 ZONE_UNLOCK(zone);
1914 /*
1915 * We may not be able to get a bucket so return an actual item.
1916 */
1917#ifdef UMA_DEBUG
1918 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1919#endif
1920
1921 return (uma_zalloc_internal(zone, udata, flags));
1922}
1923
1924static uma_slab_t
1925uma_zone_slab(uma_zone_t zone, int flags)
1926{
1927 uma_slab_t slab;
1928 uma_keg_t keg;
1929
1930 keg = zone->uz_keg;
1931
1932 /*
1933 * This is to prevent us from recursively trying to allocate
1934 * buckets. The problem is that if an allocation forces us to
1935 * grab a new bucket we will call page_alloc, which will go off
1936 * and cause the vm to allocate vm_map_entries. If we need new
1937 * buckets there too we will recurse in kmem_alloc and bad
1938 * things happen. So instead we return a NULL bucket, and make
1939 * the code that allocates buckets smart enough to deal with it
1940 */
1941 if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0)
1942 return (NULL);
1943
1944 slab = NULL;
1945
1946 for (;;) {
1947 /*
1948 * Find a slab with some space. Prefer slabs that are partially
1949 * used over those that are totally full. This helps to reduce
1950 * fragmentation.
1951 */
1952 if (keg->uk_free != 0) {
1953 if (!LIST_EMPTY(&keg->uk_part_slab)) {
1954 slab = LIST_FIRST(&keg->uk_part_slab);
1955 } else {
1956 slab = LIST_FIRST(&keg->uk_free_slab);
1957 LIST_REMOVE(slab, us_link);
1958 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
1959 us_link);
1960 }
1961 return (slab);
1962 }
1963
1964 /*
1965 * M_NOVM means don't ask at all!
1966 */
1967 if (flags & M_NOVM)
1968 break;
1969
1970 if (keg->uk_maxpages &&
1971 keg->uk_pages >= keg->uk_maxpages) {
1972 keg->uk_flags |= UMA_ZFLAG_FULL;
1973
1974 if (flags & M_NOWAIT)
1975 break;
1976 else
1977 msleep(keg, &keg->uk_lock, PVM,
1978 "zonelimit", 0);
1979 continue;
1980 }
1981 keg->uk_recurse++;
1982 slab = slab_zalloc(zone, flags);
1983 keg->uk_recurse--;
1984
1985 /*
1986 * If we got a slab here it's safe to mark it partially used
1987 * and return. We assume that the caller is going to remove
1988 * at least one item.
1989 */
1990 if (slab) {
1991 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
1992 return (slab);
1993 }
1994 /*
1995 * We might not have been able to get a slab but another cpu
1996 * could have while we were unlocked. Check again before we
1997 * fail.
1998 */
1999 if (flags & M_NOWAIT)
2000 flags |= M_NOVM;
2001 }
2002 return (slab);
2003}
2004
2005static void *
2006uma_slab_alloc(uma_zone_t zone, uma_slab_t slab)
2007{
2008 uma_keg_t keg;
2009 uma_slabrefcnt_t slabref;
2010 void *item;
2011 u_int8_t freei;
2012
2013 keg = zone->uz_keg;
2014
2015 freei = slab->us_firstfree;
2016 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2017 slabref = (uma_slabrefcnt_t)slab;
2018 slab->us_firstfree = slabref->us_freelist[freei].us_item;
2019 } else {
2020 slab->us_firstfree = slab->us_freelist[freei].us_item;
2021 }
2022 item = slab->us_data + (keg->uk_rsize * freei);
2023
2024 slab->us_freecount--;
2025 keg->uk_free--;
2026#ifdef INVARIANTS
2027 uma_dbg_alloc(zone, slab, item);
2028#endif
2029 /* Move this slab to the full list */
2030 if (slab->us_freecount == 0) {
2031 LIST_REMOVE(slab, us_link);
2032 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2033 }
2034
2035 return (item);
2036}
2037
2038static int
2039uma_zalloc_bucket(uma_zone_t zone, int flags)
2040{
2041 uma_bucket_t bucket;
2042 uma_slab_t slab;
2043 int16_t saved;
2044 int max, origflags = flags;
2045
2046 /*
2047 * Try this zone's free list first so we don't allocate extra buckets.
2048 */
2049 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2050 KASSERT(bucket->ub_cnt == 0,
2051 ("uma_zalloc_bucket: Bucket on free list is not empty."));
2052 LIST_REMOVE(bucket, ub_link);
2053 } else {
2054 int bflags;
2055
2056 bflags = (flags & ~M_ZERO);
2057 if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY)
2058 bflags |= M_NOVM;
2059
2060 ZONE_UNLOCK(zone);
2061 bucket = bucket_alloc(zone->uz_count, bflags);
2062 ZONE_LOCK(zone);
2063 }
2064
2065 if (bucket == NULL)
2066 return (0);
2067
2068#ifdef SMP
2069 /*
2070 * This code is here to limit the number of simultaneous bucket fills
2071 * for any given zone to the number of per cpu caches in this zone. This
2072 * is done so that we don't allocate more memory than we really need.
2073 */
2074 if (zone->uz_fills >= mp_ncpus)
2075 goto done;
2076
2077#endif
2078 zone->uz_fills++;
2079
2080 max = MIN(bucket->ub_entries, zone->uz_count);
2081 /* Try to keep the buckets totally full */
2082 saved = bucket->ub_cnt;
2083 while (bucket->ub_cnt < max &&
2084 (slab = uma_zone_slab(zone, flags)) != NULL) {
2085 while (slab->us_freecount && bucket->ub_cnt < max) {
2086 bucket->ub_bucket[bucket->ub_cnt++] =
2087 uma_slab_alloc(zone, slab);
2088 }
2089
2090 /* Don't block on the next fill */
2091 flags |= M_NOWAIT;
2092 }
2093
2094 /*
2095 * We unlock here because we need to call the zone's init.
2096 * It should be safe to unlock because the slab dealt with
2097 * above is already on the appropriate list within the keg
2098 * and the bucket we filled is not yet on any list, so we
2099 * own it.
2100 */
2101 if (zone->uz_init != NULL) {
2102 int i;
2103
2104 ZONE_UNLOCK(zone);
2105 for (i = saved; i < bucket->ub_cnt; i++)
2106 if (zone->uz_init(bucket->ub_bucket[i],
2107 zone->uz_keg->uk_size, origflags) != 0)
2108 break;
2109 /*
2110 * If we couldn't initialize the whole bucket, put the
2111 * rest back onto the freelist.
2112 */
2113 if (i != bucket->ub_cnt) {
2114 int j;
2115
2116 for (j = i; j < bucket->ub_cnt; j++) {
2117 uma_zfree_internal(zone, bucket->ub_bucket[j],
2118 NULL, SKIP_FINI);
2119#ifdef INVARIANTS
2120 bucket->ub_bucket[j] = NULL;
2121#endif
2122 }
2123 bucket->ub_cnt = i;
2124 }
2125 ZONE_LOCK(zone);
2126 }
2127
2128 zone->uz_fills--;
2129 if (bucket->ub_cnt != 0) {
2130 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2131 bucket, ub_link);
2132 return (1);
2133 }
2134#ifdef SMP
2135done:
2136#endif
2137 bucket_free(bucket);
2138
2139 return (0);
2140}
2141/*
2142 * Allocates an item for an internal zone
2143 *
2144 * Arguments
2145 * zone The zone to alloc for.
2146 * udata The data to be passed to the constructor.
2147 * flags M_WAITOK, M_NOWAIT, M_ZERO.
2148 *
2149 * Returns
2150 * NULL if there is no memory and M_NOWAIT is set
2151 * An item if successful
2152 */
2153
2154static void *
2155uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
2156{
2157 uma_keg_t keg;
2158 uma_slab_t slab;
2159 void *item;
2160
2161 item = NULL;
2162 keg = zone->uz_keg;
2163
2164#ifdef UMA_DEBUG_ALLOC
2165 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2166#endif
2167 ZONE_LOCK(zone);
2168
2169 slab = uma_zone_slab(zone, flags);
2170 if (slab == NULL) {
2171 ZONE_UNLOCK(zone);
2172 return (NULL);
2173 }
2174
2175 item = uma_slab_alloc(zone, slab);
2176
2177 ZONE_UNLOCK(zone);
2178
2179 /*
2180 * We have to call both the zone's init (not the keg's init)
2181 * and the zone's ctor. This is because the item is going from
2182 * a keg slab directly to the user, and the user is expecting it
2183 * to be both zone-init'd as well as zone-ctor'd.
2184 */
2185 if (zone->uz_init != NULL) {
2186 if (zone->uz_init(item, keg->uk_size, flags) != 0) {
2187 uma_zfree_internal(zone, item, udata, SKIP_FINI);
2188 return (NULL);
2189 }
2190 }
2191 if (zone->uz_ctor != NULL) {
2192 if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) {
2193 uma_zfree_internal(zone, item, udata, SKIP_DTOR);
2194 return (NULL);
2195 }
2196 }
2197 if (flags & M_ZERO)
2198 bzero(item, keg->uk_size);
2199
2200 return (item);
2201}
2202
2203/* See uma.h */
2204void
2205uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2206{
2207 uma_keg_t keg;
2208 uma_cache_t cache;
2209 uma_bucket_t bucket;
2210 int bflags;
2211 int cpu;
2212 enum zfreeskip skip;
2213
2214 /* This is the fast path free */
2215 skip = SKIP_NONE;
2216 keg = zone->uz_keg;
2217
2218#ifdef UMA_DEBUG_ALLOC_1
2219 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2220#endif
2221 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2222 zone->uz_name);
2223
2224 /*
2225 * The race here is acceptable. If we miss it we'll just have to wait
2226 * a little longer for the limits to be reset.
2227 */
2228
2229 if (keg->uk_flags & UMA_ZFLAG_FULL)
2230 goto zfree_internal;
2231
2232 if (zone->uz_dtor) {
2233 zone->uz_dtor(item, keg->uk_size, udata);
2234 skip = SKIP_DTOR;
2235 }
2236
2237zfree_restart:
2238 cpu = PCPU_GET(cpuid);
2239 CPU_LOCK(cpu);
2240 cache = &zone->uz_cpu[cpu];
2241
2242zfree_start:
2243 bucket = cache->uc_freebucket;
2244
2245 if (bucket) {
2246 /*
2247 * Do we have room in our bucket? It is OK for this uz count
2248 * check to be slightly out of sync.
2249 */
2250
2251 if (bucket->ub_cnt < bucket->ub_entries) {
2252 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2253 ("uma_zfree: Freeing to non free bucket index."));
2254 bucket->ub_bucket[bucket->ub_cnt] = item;
2255 bucket->ub_cnt++;
2256#ifdef INVARIANTS
2257 ZONE_LOCK(zone);
2258 if (keg->uk_flags & UMA_ZONE_MALLOC)
2259 uma_dbg_free(zone, udata, item);
2260 else
2261 uma_dbg_free(zone, NULL, item);
2262 ZONE_UNLOCK(zone);
2263#endif
2264 CPU_UNLOCK(cpu);
2265 return;
2266 } else if (cache->uc_allocbucket) {
2267#ifdef UMA_DEBUG_ALLOC
2268 printf("uma_zfree: Swapping buckets.\n");
2269#endif
2270 /*
2271 * We have run out of space in our freebucket.
2272 * See if we can switch with our alloc bucket.
2273 */
2274 if (cache->uc_allocbucket->ub_cnt <
2275 cache->uc_freebucket->ub_cnt) {
2276 bucket = cache->uc_freebucket;
2277 cache->uc_freebucket = cache->uc_allocbucket;
2278 cache->uc_allocbucket = bucket;
2279 goto zfree_start;
2280 }
2281 }
2282 }
2283 /*
2284 * We can get here for two reasons:
2285 *
2286 * 1) The buckets are NULL
2287 * 2) The alloc and free buckets are both somewhat full.
2288 */
2289
2290 ZONE_LOCK(zone);
2291
2292 bucket = cache->uc_freebucket;
2293 cache->uc_freebucket = NULL;
2294
2295 /* Can we throw this on the zone full list? */
2296 if (bucket != NULL) {
2297#ifdef UMA_DEBUG_ALLOC
2298 printf("uma_zfree: Putting old bucket on the free list.\n");
2299#endif
2300 /* ub_cnt is pointing to the last free item */
2301 KASSERT(bucket->ub_cnt != 0,
2302 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2303 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2304 bucket, ub_link);
2305 }
2306 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2307 LIST_REMOVE(bucket, ub_link);
2308 ZONE_UNLOCK(zone);
2309 cache->uc_freebucket = bucket;
2310 goto zfree_start;
2311 }
2312 /* We're done with this CPU now */
2313 CPU_UNLOCK(cpu);
2314
2315 /* And the zone.. */
2316 ZONE_UNLOCK(zone);
2317
2318#ifdef UMA_DEBUG_ALLOC
2319 printf("uma_zfree: Allocating new free bucket.\n");
2320#endif
2321 bflags = M_NOWAIT;
2322
2323 if (keg->uk_flags & UMA_ZFLAG_CACHEONLY)
2324 bflags |= M_NOVM;
2325 bucket = bucket_alloc(zone->uz_count, bflags);
2326 if (bucket) {
2327 ZONE_LOCK(zone);
2328 LIST_INSERT_HEAD(&zone->uz_free_bucket,
2329 bucket, ub_link);
2330 ZONE_UNLOCK(zone);
2331 goto zfree_restart;
2332 }
2333
2334 /*
2335 * If nothing else caught this, we'll just do an internal free.
2336 */
2337
2338zfree_internal:
2339
2340#ifdef INVARIANTS
2341 /*
2342 * If we need to skip the dtor and the uma_dbg_free in
2343 * uma_zfree_internal because we've already called the dtor
2344 * above, but we ended up here, then we need to make sure
2345 * that we take care of the uma_dbg_free immediately.
2346 */
2347 if (skip) {
2348 ZONE_LOCK(zone);
2349 if (keg->uk_flags & UMA_ZONE_MALLOC)
2350 uma_dbg_free(zone, udata, item);
2351 else
2352 uma_dbg_free(zone, NULL, item);
2353 ZONE_UNLOCK(zone);
2354 }
2355#endif
2356 uma_zfree_internal(zone, item, udata, skip);
2357
2358 return;
2359}
2360
2361/*
2362 * Frees an item to an INTERNAL zone or allocates a free bucket
2363 *
2364 * Arguments:
2365 * zone The zone to free to
2366 * item The item we're freeing
2367 * udata User supplied data for the dtor
2368 * skip Skip dtors and finis
2369 */
2370static void
2371uma_zfree_internal(uma_zone_t zone, void *item, void *udata,
2372 enum zfreeskip skip)
2373{
2374 uma_slab_t slab;
2375 uma_slabrefcnt_t slabref;
2376 uma_keg_t keg;
2377 u_int8_t *mem;
2378 u_int8_t freei;
2379
2380 keg = zone->uz_keg;
2381
2382 if (skip < SKIP_DTOR && zone->uz_dtor)
2383 zone->uz_dtor(item, keg->uk_size, udata);
2384 if (skip < SKIP_FINI && zone->uz_fini)
2385 zone->uz_fini(item, keg->uk_size);
2386
2387 ZONE_LOCK(zone);
2388
2389 if (!(keg->uk_flags & UMA_ZONE_MALLOC)) {
2390 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
2391 if (keg->uk_flags & UMA_ZONE_HASH)
2392 slab = hash_sfind(&keg->uk_hash, mem);
2393 else {
2394 mem += keg->uk_pgoff;
2395 slab = (uma_slab_t)mem;
2396 }
2397 } else {
2398 slab = (uma_slab_t)udata;
2399 }
2400
2401 /* Do we need to remove from any lists? */
2402 if (slab->us_freecount+1 == keg->uk_ipers) {
2403 LIST_REMOVE(slab, us_link);
2404 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2405 } else if (slab->us_freecount == 0) {
2406 LIST_REMOVE(slab, us_link);
2407 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2408 }
2409
2410 /* Slab management stuff */
2411 freei = ((unsigned long)item - (unsigned long)slab->us_data)
2412 / keg->uk_rsize;
2413
2414#ifdef INVARIANTS
2415 if (!skip)
2416 uma_dbg_free(zone, slab, item);
2417#endif
2418
2419 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2420 slabref = (uma_slabrefcnt_t)slab;
2421 slabref->us_freelist[freei].us_item = slab->us_firstfree;
2422 } else {
2423 slab->us_freelist[freei].us_item = slab->us_firstfree;
2424 }
2425 slab->us_firstfree = freei;
2426 slab->us_freecount++;
2427
2428 /* Zone statistics */
2429 keg->uk_free++;
2430
2431 if (keg->uk_flags & UMA_ZFLAG_FULL) {
2432 if (keg->uk_pages < keg->uk_maxpages)
2433 keg->uk_flags &= ~UMA_ZFLAG_FULL;
2434
2435 /* We can handle one more allocation */
2436 wakeup_one(keg);
2437 }
2438
2439 ZONE_UNLOCK(zone);
2440}
2441
2442/* See uma.h */
2443void
2444uma_zone_set_max(uma_zone_t zone, int nitems)
2445{
2446 uma_keg_t keg;
2447
2448 keg = zone->uz_keg;
2449 ZONE_LOCK(zone);
2450 if (keg->uk_ppera > 1)
2451 keg->uk_maxpages = nitems * keg->uk_ppera;
2452 else
2453 keg->uk_maxpages = nitems / keg->uk_ipers;
2454
2455 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2456 keg->uk_maxpages++;
2457
2458 ZONE_UNLOCK(zone);
2459}
2460
2461/* See uma.h */
2462void
2463uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2464{
2465 ZONE_LOCK(zone);
2466 KASSERT(zone->uz_keg->uk_pages == 0,
2467 ("uma_zone_set_init on non-empty keg"));
2468 zone->uz_keg->uk_init = uminit;
2469 ZONE_UNLOCK(zone);
2470}
2471
2472/* See uma.h */
2473void
2474uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2475{
2476 ZONE_LOCK(zone);
2477 KASSERT(zone->uz_keg->uk_pages == 0,
2478 ("uma_zone_set_fini on non-empty keg"));
2479 zone->uz_keg->uk_fini = fini;
2480 ZONE_UNLOCK(zone);
2481}
2482
2483/* See uma.h */
2484void
2485uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2486{
2487 ZONE_LOCK(zone);
2488 KASSERT(zone->uz_keg->uk_pages == 0,
2489 ("uma_zone_set_zinit on non-empty keg"));
2490 zone->uz_init = zinit;
2491 ZONE_UNLOCK(zone);
2492}
2493
2494/* See uma.h */
2495void
2496uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2497{
2498 ZONE_LOCK(zone);
2499 KASSERT(zone->uz_keg->uk_pages == 0,
2500 ("uma_zone_set_zfini on non-empty keg"));
2501 zone->uz_fini = zfini;
2502 ZONE_UNLOCK(zone);
2503}
2504
2505/* See uma.h */
2506/* XXX uk_freef is not actually used with the zone locked */
2507void
2508uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2509{
2510 ZONE_LOCK(zone);
2511 zone->uz_keg->uk_freef = freef;
2512 ZONE_UNLOCK(zone);
2513}
2514
2515/* See uma.h */
2516/* XXX uk_allocf is not actually used with the zone locked */
2517void
2518uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2519{
2520 ZONE_LOCK(zone);
2521 zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2522 zone->uz_keg->uk_allocf = allocf;
2523 ZONE_UNLOCK(zone);
2524}
2525
2526/* See uma.h */
2527int
2528uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
2529{
2530 uma_keg_t keg;
2531 vm_offset_t kva;
2532 int pages;
2533
2534 keg = zone->uz_keg;
2535 pages = count / keg->uk_ipers;
2536
2537 if (pages * keg->uk_ipers < count)
2538 pages++;
2539
2540 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2541
2542 if (kva == 0)
2543 return (0);
2544 if (obj == NULL) {
2545 obj = vm_object_allocate(OBJT_DEFAULT,
2546 pages);
2547 } else {
2548 VM_OBJECT_LOCK_INIT(obj, "uma object");
2549 _vm_object_allocate(OBJT_DEFAULT,
2550 pages, obj);
2551 }
2552 ZONE_LOCK(zone);
2553 keg->uk_kva = kva;
2554 keg->uk_obj = obj;
2555 keg->uk_maxpages = pages;
2556 keg->uk_allocf = obj_alloc;
2557 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2558 ZONE_UNLOCK(zone);
2559 return (1);
2560}
2561
2562/* See uma.h */
2563void
2564uma_prealloc(uma_zone_t zone, int items)
2565{
2566 int slabs;
2567 uma_slab_t slab;
2568 uma_keg_t keg;
2569
2570 keg = zone->uz_keg;
2571 ZONE_LOCK(zone);
2572 slabs = items / keg->uk_ipers;
2573 if (slabs * keg->uk_ipers < items)
2574 slabs++;
2575 while (slabs > 0) {
2576 slab = slab_zalloc(zone, M_WAITOK);
2577 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2578 slabs--;
2579 }
2580 ZONE_UNLOCK(zone);
2581}
2582
2583/* See uma.h */
2584u_int32_t *
2585uma_find_refcnt(uma_zone_t zone, void *item)
2586{
2587 uma_slabrefcnt_t slabref;
2588 uma_keg_t keg;
2589 u_int32_t *refcnt;
2590 int idx;
2591
2592 keg = zone->uz_keg;
2593 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
2594 (~UMA_SLAB_MASK));
2595 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
2596 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
2597 idx = ((unsigned long)item - (unsigned long)slabref->us_data)
2598 / keg->uk_rsize;
2599 refcnt = &slabref->us_freelist[idx].us_refcnt;
2600 return refcnt;
2601}
2602
2603/* See uma.h */
2604void
2605uma_reclaim(void)
2606{
2607#ifdef UMA_DEBUG
2608 printf("UMA: vm asked us to release pages!\n");
2609#endif
2610 bucket_enable();
2611 zone_foreach(zone_drain);
2612 /*
2613 * Some slabs may have been freed but this zone will be visited early
2614 * we visit again so that we can free pages that are empty once other
2615 * zones are drained. We have to do the same for buckets.
2616 */
2617 zone_drain(slabzone);
2618 zone_drain(slabrefzone);
2619 bucket_zone_drain();
2620}
2621
2622void *
2623uma_large_malloc(int size, int wait)
2624{
2625 void *mem;
2626 uma_slab_t slab;
2627 u_int8_t flags;
2628
2629 slab = uma_zalloc_internal(slabzone, NULL, wait);
2630 if (slab == NULL)
2631 return (NULL);
2632 mem = page_alloc(NULL, size, &flags, wait);
2633 if (mem) {
2634 vsetslab((vm_offset_t)mem, slab);
2635 slab->us_data = mem;
2636 slab->us_flags = flags | UMA_SLAB_MALLOC;
2637 slab->us_size = size;
2638 } else {
2639 uma_zfree_internal(slabzone, slab, NULL, 0);
2640 }
2641
2642 return (mem);
2643}
2644
2645void
2646uma_large_free(uma_slab_t slab)
2647{
2648 vsetobj((vm_offset_t)slab->us_data, kmem_object);
2649 page_free(slab->us_data, slab->us_size, slab->us_flags);
2650 uma_zfree_internal(slabzone, slab, NULL, 0);
2651}
2652
2653void
2654uma_print_stats(void)
2655{
2656 zone_foreach(uma_print_zone);
2657}
2658
2659static void
2660slab_print(uma_slab_t slab)
2661{
2662 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
2663 slab->us_keg, slab->us_data, slab->us_freecount,
2664 slab->us_firstfree);
2665}
2666
2667static void
2668cache_print(uma_cache_t cache)
2669{
2670 printf("alloc: %p(%d), free: %p(%d)\n",
2671 cache->uc_allocbucket,
2672 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
2673 cache->uc_freebucket,
2674 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
2675}
2676
2677void
2678uma_print_zone(uma_zone_t zone)
2679{
2680 uma_cache_t cache;
2681 uma_keg_t keg;
2682 uma_slab_t slab;
2683 int i;
2684
2685 keg = zone->uz_keg;
2686 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
2687 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
2688 keg->uk_ipers, keg->uk_ppera,
2689 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
2690 printf("Part slabs:\n");
2691 LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
2692 slab_print(slab);
2693 printf("Free slabs:\n");
2694 LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
2695 slab_print(slab);
2696 printf("Full slabs:\n");
2697 LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
2698 slab_print(slab);
2699 for (i = 0; i <= mp_maxid; i++) {
2700 if (CPU_ABSENT(i))
2701 continue;
2702 cache = &zone->uz_cpu[i];
2703 printf("CPU %d Cache:\n", i);
2704 cache_print(cache);
2705 }
2706}
2707
2708/*
2709 * Sysctl handler for vm.zone
2710 *
2711 * stolen from vm_zone.c
2712 */
2713static int
2714sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
2715{
2716 int error, len, cnt;
2717 const int linesize = 128; /* conservative */
2718 int totalfree;
2719 char *tmpbuf, *offset;
2720 uma_zone_t z;
2721 uma_keg_t zk;
2722 char *p;
2723 int cpu;
2724 int cachefree;
2725 uma_bucket_t bucket;
2726 uma_cache_t cache;
2727
2728 cnt = 0;
2729 mtx_lock(&uma_mtx);
2730 LIST_FOREACH(zk, &uma_kegs, uk_link) {
2731 LIST_FOREACH(z, &zk->uk_zones, uz_link)
2732 cnt++;
2733 }
2734 mtx_unlock(&uma_mtx);
2735 MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
2736 M_TEMP, M_WAITOK);
2737 len = snprintf(tmpbuf, linesize,
2738 "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n");
2739 if (cnt == 0)
2740 tmpbuf[len - 1] = '\0';
2741 error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
2742 if (error || cnt == 0)
2743 goto out;
2744 offset = tmpbuf;
2745 mtx_lock(&uma_mtx);
2746 LIST_FOREACH(zk, &uma_kegs, uk_link) {
2747 LIST_FOREACH(z, &zk->uk_zones, uz_link) {
2748 if (cnt == 0) /* list may have changed size */
2749 break;
2750 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2751 for (cpu = 0; cpu <= mp_maxid; cpu++) {
2752 if (CPU_ABSENT(cpu))
2753 continue;
2754 CPU_LOCK(cpu);
2755 }
2756 }
2757 ZONE_LOCK(z);
2758 cachefree = 0;
2759 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2760 for (cpu = 0; cpu <= mp_maxid; cpu++) {
2761 if (CPU_ABSENT(cpu))
2762 continue;
2763 cache = &z->uz_cpu[cpu];
2764 if (cache->uc_allocbucket != NULL)
2765 cachefree += cache->uc_allocbucket->ub_cnt;
2766 if (cache->uc_freebucket != NULL)
2767 cachefree += cache->uc_freebucket->ub_cnt;
2768 CPU_UNLOCK(cpu);
2769 }
2770 }
2771 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
2772 cachefree += bucket->ub_cnt;
2773 }
2774 totalfree = zk->uk_free + cachefree;
2775 len = snprintf(offset, linesize,
2776 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
2777 z->uz_name, zk->uk_size,
2778 zk->uk_maxpages * zk->uk_ipers,
2779 (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree,
2780 totalfree,
2781 (unsigned long long)z->uz_allocs);
2782 ZONE_UNLOCK(z);
2783 for (p = offset + 12; p > offset && *p == ' '; --p)
2784 /* nothing */ ;
2785 p[1] = ':';
2786 cnt--;
2787 offset += len;
2788 }
2789 }
2790 mtx_unlock(&uma_mtx);
2791 *offset++ = '\0';
2792 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
2793out:
2794 FREE(tmpbuf, M_TEMP);
2795 return (error);
2796}
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59
60#include "opt_param.h"
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/kernel.h>
64#include <sys/types.h>
65#include <sys/queue.h>
66#include <sys/malloc.h>
67#include <sys/ktr.h>
68#include <sys/lock.h>
69#include <sys/sysctl.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/smp.h>
73#include <sys/vmmeter.h>
74
75#include <vm/vm.h>
76#include <vm/vm_object.h>
77#include <vm/vm_page.h>
78#include <vm/vm_param.h>
79#include <vm/vm_map.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_extern.h>
82#include <vm/uma.h>
83#include <vm/uma_int.h>
84#include <vm/uma_dbg.h>
85
86#include <machine/vmparam.h>
87
88/*
89 * This is the zone and keg from which all zones are spawned. The idea is that
90 * even the zone & keg heads are allocated from the allocator, so we use the
91 * bss section to bootstrap us.
92 */
93static struct uma_keg masterkeg;
94static struct uma_zone masterzone_k;
95static struct uma_zone masterzone_z;
96static uma_zone_t kegs = &masterzone_k;
97static uma_zone_t zones = &masterzone_z;
98
99/* This is the zone from which all of uma_slab_t's are allocated. */
100static uma_zone_t slabzone;
101static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
102
103/*
104 * The initial hash tables come out of this zone so they can be allocated
105 * prior to malloc coming up.
106 */
107static uma_zone_t hashzone;
108
109static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
110
111/*
112 * Are we allowed to allocate buckets?
113 */
114static int bucketdisable = 1;
115
116/* Linked list of all kegs in the system */
117static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs);
118
119/* This mutex protects the keg list */
120static struct mtx uma_mtx;
121
122/* These are the pcpu cache locks */
123static struct mtx uma_pcpu_mtx[MAXCPU];
124
125/* Linked list of boot time pages */
126static LIST_HEAD(,uma_slab) uma_boot_pages =
127 LIST_HEAD_INITIALIZER(&uma_boot_pages);
128
129/* Count of free boottime pages */
130static int uma_boot_free = 0;
131
132/* Is the VM done starting up? */
133static int booted = 0;
134
135/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
136static u_int uma_max_ipers;
137static u_int uma_max_ipers_ref;
138
139/*
140 * This is the handle used to schedule events that need to happen
141 * outside of the allocation fast path.
142 */
143static struct callout uma_callout;
144#define UMA_TIMEOUT 20 /* Seconds for callout interval. */
145
146/*
147 * This structure is passed as the zone ctor arg so that I don't have to create
148 * a special allocation function just for zones.
149 */
150struct uma_zctor_args {
151 char *name;
152 size_t size;
153 uma_ctor ctor;
154 uma_dtor dtor;
155 uma_init uminit;
156 uma_fini fini;
157 uma_keg_t keg;
158 int align;
159 u_int16_t flags;
160};
161
162struct uma_kctor_args {
163 uma_zone_t zone;
164 size_t size;
165 uma_init uminit;
166 uma_fini fini;
167 int align;
168 u_int16_t flags;
169};
170
171struct uma_bucket_zone {
172 uma_zone_t ubz_zone;
173 char *ubz_name;
174 int ubz_entries;
175};
176
177#define BUCKET_MAX 128
178
179struct uma_bucket_zone bucket_zones[] = {
180 { NULL, "16 Bucket", 16 },
181 { NULL, "32 Bucket", 32 },
182 { NULL, "64 Bucket", 64 },
183 { NULL, "128 Bucket", 128 },
184 { NULL, NULL, 0}
185};
186
187#define BUCKET_SHIFT 4
188#define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1)
189
190/*
191 * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
192 * of approximately the right size.
193 */
194static uint8_t bucket_size[BUCKET_ZONES];
195
196enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
197
198/* Prototypes.. */
199
200static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
201static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
202static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
203static void page_free(void *, int, u_int8_t);
204static uma_slab_t slab_zalloc(uma_zone_t, int);
205static void cache_drain(uma_zone_t);
206static void bucket_drain(uma_zone_t, uma_bucket_t);
207static void bucket_cache_drain(uma_zone_t zone);
208static int keg_ctor(void *, int, void *, int);
209static void keg_dtor(void *, int, void *);
210static int zone_ctor(void *, int, void *, int);
211static void zone_dtor(void *, int, void *);
212static int zero_init(void *, int, int);
213static void zone_small_init(uma_zone_t zone);
214static void zone_large_init(uma_zone_t zone);
215static void zone_foreach(void (*zfunc)(uma_zone_t));
216static void zone_timeout(uma_zone_t zone);
217static int hash_alloc(struct uma_hash *);
218static int hash_expand(struct uma_hash *, struct uma_hash *);
219static void hash_free(struct uma_hash *hash);
220static void uma_timeout(void *);
221static void uma_startup3(void);
222static void *uma_zalloc_internal(uma_zone_t, void *, int);
223static void uma_zfree_internal(uma_zone_t, void *, void *, enum zfreeskip);
224static void bucket_enable(void);
225static void bucket_init(void);
226static uma_bucket_t bucket_alloc(int, int);
227static void bucket_free(uma_bucket_t);
228static void bucket_zone_drain(void);
229static int uma_zalloc_bucket(uma_zone_t zone, int flags);
230static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
231static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
232static void zone_drain(uma_zone_t);
233static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
234 uma_fini fini, int align, u_int16_t flags);
235
236void uma_print_zone(uma_zone_t);
237void uma_print_stats(void);
238static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
239
240#ifdef WITNESS
241static int nosleepwithlocks = 1;
242SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
243 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
244#else
245static int nosleepwithlocks = 0;
246SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
247 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
248#endif
249SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
250 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
251SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
252
253/*
254 * This routine checks to see whether or not it's safe to enable buckets.
255 */
256
257static void
258bucket_enable(void)
259{
260 if (cnt.v_free_count < cnt.v_free_min)
261 bucketdisable = 1;
262 else
263 bucketdisable = 0;
264}
265
266/*
267 * Initialize bucket_zones, the array of zones of buckets of various sizes.
268 *
269 * For each zone, calculate the memory required for each bucket, consisting
270 * of the header and an array of pointers. Initialize bucket_size[] to point
271 * the range of appropriate bucket sizes at the zone.
272 */
273static void
274bucket_init(void)
275{
276 struct uma_bucket_zone *ubz;
277 int i;
278 int j;
279
280 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
281 int size;
282
283 ubz = &bucket_zones[j];
284 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
285 size += sizeof(void *) * ubz->ubz_entries;
286 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
287 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
288 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
289 bucket_size[i >> BUCKET_SHIFT] = j;
290 }
291}
292
293/*
294 * Given a desired number of entries for a bucket, return the zone from which
295 * to allocate the bucket.
296 */
297static struct uma_bucket_zone *
298bucket_zone_lookup(int entries)
299{
300 int idx;
301
302 idx = howmany(entries, 1 << BUCKET_SHIFT);
303 return (&bucket_zones[bucket_size[idx]]);
304}
305
306static uma_bucket_t
307bucket_alloc(int entries, int bflags)
308{
309 struct uma_bucket_zone *ubz;
310 uma_bucket_t bucket;
311
312 /*
313 * This is to stop us from allocating per cpu buckets while we're
314 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the
315 * boot pages. This also prevents us from allocating buckets in
316 * low memory situations.
317 */
318 if (bucketdisable)
319 return (NULL);
320
321 ubz = bucket_zone_lookup(entries);
322 bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags);
323 if (bucket) {
324#ifdef INVARIANTS
325 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
326#endif
327 bucket->ub_cnt = 0;
328 bucket->ub_entries = ubz->ubz_entries;
329 }
330
331 return (bucket);
332}
333
334static void
335bucket_free(uma_bucket_t bucket)
336{
337 struct uma_bucket_zone *ubz;
338
339 ubz = bucket_zone_lookup(bucket->ub_entries);
340 uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE);
341}
342
343static void
344bucket_zone_drain(void)
345{
346 struct uma_bucket_zone *ubz;
347
348 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
349 zone_drain(ubz->ubz_zone);
350}
351
352
353/*
354 * Routine called by timeout which is used to fire off some time interval
355 * based calculations. (stats, hash size, etc.)
356 *
357 * Arguments:
358 * arg Unused
359 *
360 * Returns:
361 * Nothing
362 */
363static void
364uma_timeout(void *unused)
365{
366 bucket_enable();
367 zone_foreach(zone_timeout);
368
369 /* Reschedule this event */
370 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
371}
372
373/*
374 * Routine to perform timeout driven calculations. This expands the
375 * hashes and does per cpu statistics aggregation.
376 *
377 * Arguments:
378 * zone The zone to operate on
379 *
380 * Returns:
381 * Nothing
382 */
383static void
384zone_timeout(uma_zone_t zone)
385{
386 uma_keg_t keg;
387 uma_cache_t cache;
388 u_int64_t alloc;
389 int cpu;
390
391 keg = zone->uz_keg;
392 alloc = 0;
393
394 /*
395 * Aggregate per cpu cache statistics back to the zone.
396 *
397 * XXX This should be done in the sysctl handler.
398 *
399 * I may rewrite this to set a flag in the per cpu cache instead of
400 * locking. If the flag is not cleared on the next round I will have
401 * to lock and do it here instead so that the statistics don't get too
402 * far out of sync.
403 */
404 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) {
405 for (cpu = 0; cpu <= mp_maxid; cpu++) {
406 if (CPU_ABSENT(cpu))
407 continue;
408 CPU_LOCK(cpu);
409 cache = &zone->uz_cpu[cpu];
410 /* Add them up, and reset */
411 alloc += cache->uc_allocs;
412 cache->uc_allocs = 0;
413 CPU_UNLOCK(cpu);
414 }
415 }
416
417 /* Now push these stats back into the zone.. */
418 ZONE_LOCK(zone);
419 zone->uz_allocs += alloc;
420
421 /*
422 * Expand the zone hash table.
423 *
424 * This is done if the number of slabs is larger than the hash size.
425 * What I'm trying to do here is completely reduce collisions. This
426 * may be a little aggressive. Should I allow for two collisions max?
427 */
428
429 if (keg->uk_flags & UMA_ZONE_HASH &&
430 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
431 struct uma_hash newhash;
432 struct uma_hash oldhash;
433 int ret;
434
435 /*
436 * This is so involved because allocating and freeing
437 * while the zone lock is held will lead to deadlock.
438 * I have to do everything in stages and check for
439 * races.
440 */
441 newhash = keg->uk_hash;
442 ZONE_UNLOCK(zone);
443 ret = hash_alloc(&newhash);
444 ZONE_LOCK(zone);
445 if (ret) {
446 if (hash_expand(&keg->uk_hash, &newhash)) {
447 oldhash = keg->uk_hash;
448 keg->uk_hash = newhash;
449 } else
450 oldhash = newhash;
451
452 ZONE_UNLOCK(zone);
453 hash_free(&oldhash);
454 ZONE_LOCK(zone);
455 }
456 }
457 ZONE_UNLOCK(zone);
458}
459
460/*
461 * Allocate and zero fill the next sized hash table from the appropriate
462 * backing store.
463 *
464 * Arguments:
465 * hash A new hash structure with the old hash size in uh_hashsize
466 *
467 * Returns:
468 * 1 on sucess and 0 on failure.
469 */
470static int
471hash_alloc(struct uma_hash *hash)
472{
473 int oldsize;
474 int alloc;
475
476 oldsize = hash->uh_hashsize;
477
478 /* We're just going to go to a power of two greater */
479 if (oldsize) {
480 hash->uh_hashsize = oldsize * 2;
481 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
482 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
483 M_UMAHASH, M_NOWAIT);
484 } else {
485 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
486 hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL,
487 M_WAITOK);
488 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
489 }
490 if (hash->uh_slab_hash) {
491 bzero(hash->uh_slab_hash, alloc);
492 hash->uh_hashmask = hash->uh_hashsize - 1;
493 return (1);
494 }
495
496 return (0);
497}
498
499/*
500 * Expands the hash table for HASH zones. This is done from zone_timeout
501 * to reduce collisions. This must not be done in the regular allocation
502 * path, otherwise, we can recurse on the vm while allocating pages.
503 *
504 * Arguments:
505 * oldhash The hash you want to expand
506 * newhash The hash structure for the new table
507 *
508 * Returns:
509 * Nothing
510 *
511 * Discussion:
512 */
513static int
514hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
515{
516 uma_slab_t slab;
517 int hval;
518 int i;
519
520 if (!newhash->uh_slab_hash)
521 return (0);
522
523 if (oldhash->uh_hashsize >= newhash->uh_hashsize)
524 return (0);
525
526 /*
527 * I need to investigate hash algorithms for resizing without a
528 * full rehash.
529 */
530
531 for (i = 0; i < oldhash->uh_hashsize; i++)
532 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
533 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
534 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
535 hval = UMA_HASH(newhash, slab->us_data);
536 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
537 slab, us_hlink);
538 }
539
540 return (1);
541}
542
543/*
544 * Free the hash bucket to the appropriate backing store.
545 *
546 * Arguments:
547 * slab_hash The hash bucket we're freeing
548 * hashsize The number of entries in that hash bucket
549 *
550 * Returns:
551 * Nothing
552 */
553static void
554hash_free(struct uma_hash *hash)
555{
556 if (hash->uh_slab_hash == NULL)
557 return;
558 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
559 uma_zfree_internal(hashzone,
560 hash->uh_slab_hash, NULL, SKIP_NONE);
561 else
562 free(hash->uh_slab_hash, M_UMAHASH);
563}
564
565/*
566 * Frees all outstanding items in a bucket
567 *
568 * Arguments:
569 * zone The zone to free to, must be unlocked.
570 * bucket The free/alloc bucket with items, cpu queue must be locked.
571 *
572 * Returns:
573 * Nothing
574 */
575
576static void
577bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
578{
579 uma_slab_t slab;
580 int mzone;
581 void *item;
582
583 if (bucket == NULL)
584 return;
585
586 slab = NULL;
587 mzone = 0;
588
589 /* We have to lookup the slab again for malloc.. */
590 if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC)
591 mzone = 1;
592
593 while (bucket->ub_cnt > 0) {
594 bucket->ub_cnt--;
595 item = bucket->ub_bucket[bucket->ub_cnt];
596#ifdef INVARIANTS
597 bucket->ub_bucket[bucket->ub_cnt] = NULL;
598 KASSERT(item != NULL,
599 ("bucket_drain: botched ptr, item is NULL"));
600#endif
601 /*
602 * This is extremely inefficient. The slab pointer was passed
603 * to uma_zfree_arg, but we lost it because the buckets don't
604 * hold them. This will go away when free() gets a size passed
605 * to it.
606 */
607 if (mzone)
608 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
609 uma_zfree_internal(zone, item, slab, SKIP_DTOR);
610 }
611}
612
613/*
614 * Drains the per cpu caches for a zone.
615 *
616 * Arguments:
617 * zone The zone to drain, must be unlocked.
618 *
619 * Returns:
620 * Nothing
621 */
622static void
623cache_drain(uma_zone_t zone)
624{
625 uma_cache_t cache;
626 int cpu;
627
628 /*
629 * We have to lock each cpu cache before locking the zone
630 */
631 for (cpu = 0; cpu <= mp_maxid; cpu++) {
632 if (CPU_ABSENT(cpu))
633 continue;
634 CPU_LOCK(cpu);
635 cache = &zone->uz_cpu[cpu];
636 bucket_drain(zone, cache->uc_allocbucket);
637 bucket_drain(zone, cache->uc_freebucket);
638 if (cache->uc_allocbucket != NULL)
639 bucket_free(cache->uc_allocbucket);
640 if (cache->uc_freebucket != NULL)
641 bucket_free(cache->uc_freebucket);
642 cache->uc_allocbucket = cache->uc_freebucket = NULL;
643 }
644 ZONE_LOCK(zone);
645 bucket_cache_drain(zone);
646 ZONE_UNLOCK(zone);
647 for (cpu = 0; cpu <= mp_maxid; cpu++) {
648 if (CPU_ABSENT(cpu))
649 continue;
650 CPU_UNLOCK(cpu);
651 }
652}
653
654/*
655 * Drain the cached buckets from a zone. Expects a locked zone on entry.
656 */
657static void
658bucket_cache_drain(uma_zone_t zone)
659{
660 uma_bucket_t bucket;
661
662 /*
663 * Drain the bucket queues and free the buckets, we just keep two per
664 * cpu (alloc/free).
665 */
666 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
667 LIST_REMOVE(bucket, ub_link);
668 ZONE_UNLOCK(zone);
669 bucket_drain(zone, bucket);
670 bucket_free(bucket);
671 ZONE_LOCK(zone);
672 }
673
674 /* Now we do the free queue.. */
675 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
676 LIST_REMOVE(bucket, ub_link);
677 bucket_free(bucket);
678 }
679}
680
681/*
682 * Frees pages from a zone back to the system. This is done on demand from
683 * the pageout daemon.
684 *
685 * Arguments:
686 * zone The zone to free pages from
687 * all Should we drain all items?
688 *
689 * Returns:
690 * Nothing.
691 */
692static void
693zone_drain(uma_zone_t zone)
694{
695 struct slabhead freeslabs = {};
696 uma_keg_t keg;
697 uma_slab_t slab;
698 uma_slab_t n;
699 u_int8_t flags;
700 u_int8_t *mem;
701 int i;
702
703 keg = zone->uz_keg;
704
705 /*
706 * We don't want to take pages from statically allocated zones at this
707 * time
708 */
709 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
710 return;
711
712 ZONE_LOCK(zone);
713
714#ifdef UMA_DEBUG
715 printf("%s free items: %u\n", zone->uz_name, keg->uk_free);
716#endif
717 bucket_cache_drain(zone);
718 if (keg->uk_free == 0)
719 goto finished;
720
721 slab = LIST_FIRST(&keg->uk_free_slab);
722 while (slab) {
723 n = LIST_NEXT(slab, us_link);
724
725 /* We have no where to free these to */
726 if (slab->us_flags & UMA_SLAB_BOOT) {
727 slab = n;
728 continue;
729 }
730
731 LIST_REMOVE(slab, us_link);
732 keg->uk_pages -= keg->uk_ppera;
733 keg->uk_free -= keg->uk_ipers;
734
735 if (keg->uk_flags & UMA_ZONE_HASH)
736 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
737
738 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
739
740 slab = n;
741 }
742finished:
743 ZONE_UNLOCK(zone);
744
745 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
746 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
747 if (keg->uk_fini)
748 for (i = 0; i < keg->uk_ipers; i++)
749 keg->uk_fini(
750 slab->us_data + (keg->uk_rsize * i),
751 keg->uk_size);
752 flags = slab->us_flags;
753 mem = slab->us_data;
754
755 if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
756 (keg->uk_flags & UMA_ZONE_REFCNT)) {
757 vm_object_t obj;
758
759 if (flags & UMA_SLAB_KMEM)
760 obj = kmem_object;
761 else
762 obj = NULL;
763 for (i = 0; i < keg->uk_ppera; i++)
764 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
765 obj);
766 }
767 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
768 uma_zfree_internal(keg->uk_slabzone, slab, NULL,
769 SKIP_NONE);
770#ifdef UMA_DEBUG
771 printf("%s: Returning %d bytes.\n",
772 zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera);
773#endif
774 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
775 }
776}
777
778/*
779 * Allocate a new slab for a zone. This does not insert the slab onto a list.
780 *
781 * Arguments:
782 * zone The zone to allocate slabs for
783 * wait Shall we wait?
784 *
785 * Returns:
786 * The slab that was allocated or NULL if there is no memory and the
787 * caller specified M_NOWAIT.
788 */
789static uma_slab_t
790slab_zalloc(uma_zone_t zone, int wait)
791{
792 uma_slabrefcnt_t slabref;
793 uma_slab_t slab;
794 uma_keg_t keg;
795 u_int8_t *mem;
796 u_int8_t flags;
797 int i;
798
799 slab = NULL;
800 keg = zone->uz_keg;
801
802#ifdef UMA_DEBUG
803 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name);
804#endif
805 ZONE_UNLOCK(zone);
806
807 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
808 slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait);
809 if (slab == NULL) {
810 ZONE_LOCK(zone);
811 return NULL;
812 }
813 }
814
815 /*
816 * This reproduces the old vm_zone behavior of zero filling pages the
817 * first time they are added to a zone.
818 *
819 * Malloced items are zeroed in uma_zalloc.
820 */
821
822 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
823 wait |= M_ZERO;
824 else
825 wait &= ~M_ZERO;
826
827 mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE,
828 &flags, wait);
829 if (mem == NULL) {
830 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
831 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 0);
832 ZONE_LOCK(zone);
833 return (NULL);
834 }
835
836 /* Point the slab into the allocated memory */
837 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
838 slab = (uma_slab_t )(mem + keg->uk_pgoff);
839
840 if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
841 (keg->uk_flags & UMA_ZONE_REFCNT))
842 for (i = 0; i < keg->uk_ppera; i++)
843 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
844
845 slab->us_keg = keg;
846 slab->us_data = mem;
847 slab->us_freecount = keg->uk_ipers;
848 slab->us_firstfree = 0;
849 slab->us_flags = flags;
850
851 if (keg->uk_flags & UMA_ZONE_REFCNT) {
852 slabref = (uma_slabrefcnt_t)slab;
853 for (i = 0; i < keg->uk_ipers; i++) {
854 slabref->us_freelist[i].us_refcnt = 0;
855 slabref->us_freelist[i].us_item = i+1;
856 }
857 } else {
858 for (i = 0; i < keg->uk_ipers; i++)
859 slab->us_freelist[i].us_item = i+1;
860 }
861
862 if (keg->uk_init != NULL) {
863 for (i = 0; i < keg->uk_ipers; i++)
864 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
865 keg->uk_size, wait) != 0)
866 break;
867 if (i != keg->uk_ipers) {
868 if (keg->uk_fini != NULL) {
869 for (i--; i > -1; i--)
870 keg->uk_fini(slab->us_data +
871 (keg->uk_rsize * i),
872 keg->uk_size);
873 }
874 if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
875 (keg->uk_flags & UMA_ZONE_REFCNT))
876 for (i = 0; i < keg->uk_ppera; i++)
877 vsetobj((vm_offset_t)mem +
878 (i * PAGE_SIZE), NULL);
879 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
880 uma_zfree_internal(keg->uk_slabzone, slab,
881 NULL, SKIP_NONE);
882 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
883 flags);
884 ZONE_LOCK(zone);
885 return (NULL);
886 }
887 }
888 ZONE_LOCK(zone);
889
890 if (keg->uk_flags & UMA_ZONE_HASH)
891 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
892
893 keg->uk_pages += keg->uk_ppera;
894 keg->uk_free += keg->uk_ipers;
895
896 return (slab);
897}
898
899/*
900 * This function is intended to be used early on in place of page_alloc() so
901 * that we may use the boot time page cache to satisfy allocations before
902 * the VM is ready.
903 */
904static void *
905startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
906{
907 uma_keg_t keg;
908
909 keg = zone->uz_keg;
910
911 /*
912 * Check our small startup cache to see if it has pages remaining.
913 */
914 mtx_lock(&uma_mtx);
915 if (uma_boot_free != 0) {
916 uma_slab_t tmps;
917
918 tmps = LIST_FIRST(&uma_boot_pages);
919 LIST_REMOVE(tmps, us_link);
920 uma_boot_free--;
921 mtx_unlock(&uma_mtx);
922 *pflag = tmps->us_flags;
923 return (tmps->us_data);
924 }
925 mtx_unlock(&uma_mtx);
926 if (booted == 0)
927 panic("UMA: Increase UMA_BOOT_PAGES");
928 /*
929 * Now that we've booted reset these users to their real allocator.
930 */
931#ifdef UMA_MD_SMALL_ALLOC
932 keg->uk_allocf = uma_small_alloc;
933#else
934 keg->uk_allocf = page_alloc;
935#endif
936 return keg->uk_allocf(zone, bytes, pflag, wait);
937}
938
939/*
940 * Allocates a number of pages from the system
941 *
942 * Arguments:
943 * zone Unused
944 * bytes The number of bytes requested
945 * wait Shall we wait?
946 *
947 * Returns:
948 * A pointer to the alloced memory or possibly
949 * NULL if M_NOWAIT is set.
950 */
951static void *
952page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
953{
954 void *p; /* Returned page */
955
956 *pflag = UMA_SLAB_KMEM;
957 p = (void *) kmem_malloc(kmem_map, bytes, wait);
958
959 return (p);
960}
961
962/*
963 * Allocates a number of pages from within an object
964 *
965 * Arguments:
966 * zone Unused
967 * bytes The number of bytes requested
968 * wait Shall we wait?
969 *
970 * Returns:
971 * A pointer to the alloced memory or possibly
972 * NULL if M_NOWAIT is set.
973 */
974static void *
975obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
976{
977 vm_object_t object;
978 vm_offset_t retkva, zkva;
979 vm_page_t p;
980 int pages, startpages;
981
982 object = zone->uz_keg->uk_obj;
983 retkva = 0;
984
985 /*
986 * This looks a little weird since we're getting one page at a time.
987 */
988 VM_OBJECT_LOCK(object);
989 p = TAILQ_LAST(&object->memq, pglist);
990 pages = p != NULL ? p->pindex + 1 : 0;
991 startpages = pages;
992 zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE;
993 for (; bytes > 0; bytes -= PAGE_SIZE) {
994 p = vm_page_alloc(object, pages,
995 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
996 if (p == NULL) {
997 if (pages != startpages)
998 pmap_qremove(retkva, pages - startpages);
999 while (pages != startpages) {
1000 pages--;
1001 p = TAILQ_LAST(&object->memq, pglist);
1002 vm_page_lock_queues();
1003 vm_page_unwire(p, 0);
1004 vm_page_free(p);
1005 vm_page_unlock_queues();
1006 }
1007 retkva = 0;
1008 goto done;
1009 }
1010 pmap_qenter(zkva, &p, 1);
1011 if (retkva == 0)
1012 retkva = zkva;
1013 zkva += PAGE_SIZE;
1014 pages += 1;
1015 }
1016done:
1017 VM_OBJECT_UNLOCK(object);
1018 *flags = UMA_SLAB_PRIV;
1019
1020 return ((void *)retkva);
1021}
1022
1023/*
1024 * Frees a number of pages to the system
1025 *
1026 * Arguments:
1027 * mem A pointer to the memory to be freed
1028 * size The size of the memory being freed
1029 * flags The original p->us_flags field
1030 *
1031 * Returns:
1032 * Nothing
1033 */
1034static void
1035page_free(void *mem, int size, u_int8_t flags)
1036{
1037 vm_map_t map;
1038
1039 if (flags & UMA_SLAB_KMEM)
1040 map = kmem_map;
1041 else
1042 panic("UMA: page_free used with invalid flags %d\n", flags);
1043
1044 kmem_free(map, (vm_offset_t)mem, size);
1045}
1046
1047/*
1048 * Zero fill initializer
1049 *
1050 * Arguments/Returns follow uma_init specifications
1051 */
1052static int
1053zero_init(void *mem, int size, int flags)
1054{
1055 bzero(mem, size);
1056 return (0);
1057}
1058
1059/*
1060 * Finish creating a small uma zone. This calculates ipers, and the zone size.
1061 *
1062 * Arguments
1063 * zone The zone we should initialize
1064 *
1065 * Returns
1066 * Nothing
1067 */
1068static void
1069zone_small_init(uma_zone_t zone)
1070{
1071 uma_keg_t keg;
1072 u_int rsize;
1073 u_int memused;
1074 u_int wastedspace;
1075 u_int shsize;
1076
1077 keg = zone->uz_keg;
1078 KASSERT(keg != NULL, ("Keg is null in zone_small_init"));
1079 rsize = keg->uk_size;
1080
1081 if (rsize < UMA_SMALLEST_UNIT)
1082 rsize = UMA_SMALLEST_UNIT;
1083 if (rsize & keg->uk_align)
1084 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1085
1086 keg->uk_rsize = rsize;
1087 keg->uk_ppera = 1;
1088
1089 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1090 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */
1091 shsize = sizeof(struct uma_slab_refcnt);
1092 } else {
1093 rsize += UMA_FRITM_SZ; /* Account for linkage */
1094 shsize = sizeof(struct uma_slab);
1095 }
1096
1097 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
1098 KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0"));
1099 memused = keg->uk_ipers * rsize + shsize;
1100 wastedspace = UMA_SLAB_SIZE - memused;
1101
1102 /*
1103 * We can't do OFFPAGE if we're internal or if we've been
1104 * asked to not go to the VM for buckets. If we do this we
1105 * may end up going to the VM (kmem_map) for slabs which we
1106 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1107 * result of UMA_ZONE_VM, which clearly forbids it.
1108 */
1109 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1110 (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1111 return;
1112
1113 if ((wastedspace >= UMA_MAX_WASTE) &&
1114 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
1115 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
1116 KASSERT(keg->uk_ipers <= 255,
1117 ("zone_small_init: keg->uk_ipers too high!"));
1118#ifdef UMA_DEBUG
1119 printf("UMA decided we need offpage slab headers for "
1120 "zone: %s, calculated wastedspace = %d, "
1121 "maximum wasted space allowed = %d, "
1122 "calculated ipers = %d, "
1123 "new wasted space = %d\n", zone->uz_name, wastedspace,
1124 UMA_MAX_WASTE, keg->uk_ipers,
1125 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
1126#endif
1127 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1128 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1129 keg->uk_flags |= UMA_ZONE_HASH;
1130 }
1131}
1132
1133/*
1134 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do
1135 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
1136 * more complicated.
1137 *
1138 * Arguments
1139 * zone The zone we should initialize
1140 *
1141 * Returns
1142 * Nothing
1143 */
1144static void
1145zone_large_init(uma_zone_t zone)
1146{
1147 uma_keg_t keg;
1148 int pages;
1149
1150 keg = zone->uz_keg;
1151
1152 KASSERT(keg != NULL, ("Keg is null in zone_large_init"));
1153 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1154 ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone"));
1155
1156 pages = keg->uk_size / UMA_SLAB_SIZE;
1157
1158 /* Account for remainder */
1159 if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
1160 pages++;
1161
1162 keg->uk_ppera = pages;
1163 keg->uk_ipers = 1;
1164
1165 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1166 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1167 keg->uk_flags |= UMA_ZONE_HASH;
1168
1169 keg->uk_rsize = keg->uk_size;
1170}
1171
1172/*
1173 * Keg header ctor. This initializes all fields, locks, etc. And inserts
1174 * the keg onto the global keg list.
1175 *
1176 * Arguments/Returns follow uma_ctor specifications
1177 * udata Actually uma_kctor_args
1178 */
1179static int
1180keg_ctor(void *mem, int size, void *udata, int flags)
1181{
1182 struct uma_kctor_args *arg = udata;
1183 uma_keg_t keg = mem;
1184 uma_zone_t zone;
1185
1186 bzero(keg, size);
1187 keg->uk_size = arg->size;
1188 keg->uk_init = arg->uminit;
1189 keg->uk_fini = arg->fini;
1190 keg->uk_align = arg->align;
1191 keg->uk_free = 0;
1192 keg->uk_pages = 0;
1193 keg->uk_flags = arg->flags;
1194 keg->uk_allocf = page_alloc;
1195 keg->uk_freef = page_free;
1196 keg->uk_recurse = 0;
1197 keg->uk_slabzone = NULL;
1198
1199 /*
1200 * The master zone is passed to us at keg-creation time.
1201 */
1202 zone = arg->zone;
1203 zone->uz_keg = keg;
1204
1205 if (arg->flags & UMA_ZONE_VM)
1206 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1207
1208 if (arg->flags & UMA_ZONE_ZINIT)
1209 keg->uk_init = zero_init;
1210
1211 /*
1212 * The +UMA_FRITM_SZ added to uk_size is to account for the
1213 * linkage that is added to the size in zone_small_init(). If
1214 * we don't account for this here then we may end up in
1215 * zone_small_init() with a calculated 'ipers' of 0.
1216 */
1217 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1218 if ((keg->uk_size+UMA_FRITMREF_SZ) >
1219 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
1220 zone_large_init(zone);
1221 else
1222 zone_small_init(zone);
1223 } else {
1224 if ((keg->uk_size+UMA_FRITM_SZ) >
1225 (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1226 zone_large_init(zone);
1227 else
1228 zone_small_init(zone);
1229 }
1230
1231 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1232 if (keg->uk_flags & UMA_ZONE_REFCNT)
1233 keg->uk_slabzone = slabrefzone;
1234 else
1235 keg->uk_slabzone = slabzone;
1236 }
1237
1238 /*
1239 * If we haven't booted yet we need allocations to go through the
1240 * startup cache until the vm is ready.
1241 */
1242 if (keg->uk_ppera == 1) {
1243#ifdef UMA_MD_SMALL_ALLOC
1244 keg->uk_allocf = uma_small_alloc;
1245 keg->uk_freef = uma_small_free;
1246#endif
1247 if (booted == 0)
1248 keg->uk_allocf = startup_alloc;
1249 }
1250
1251 /*
1252 * Initialize keg's lock (shared among zones) through
1253 * Master zone
1254 */
1255 zone->uz_lock = &keg->uk_lock;
1256 if (arg->flags & UMA_ZONE_MTXCLASS)
1257 ZONE_LOCK_INIT(zone, 1);
1258 else
1259 ZONE_LOCK_INIT(zone, 0);
1260
1261 /*
1262 * If we're putting the slab header in the actual page we need to
1263 * figure out where in each page it goes. This calculates a right
1264 * justified offset into the memory on an ALIGN_PTR boundary.
1265 */
1266 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1267 u_int totsize;
1268
1269 /* Size of the slab struct and free list */
1270 if (keg->uk_flags & UMA_ZONE_REFCNT)
1271 totsize = sizeof(struct uma_slab_refcnt) +
1272 keg->uk_ipers * UMA_FRITMREF_SZ;
1273 else
1274 totsize = sizeof(struct uma_slab) +
1275 keg->uk_ipers * UMA_FRITM_SZ;
1276
1277 if (totsize & UMA_ALIGN_PTR)
1278 totsize = (totsize & ~UMA_ALIGN_PTR) +
1279 (UMA_ALIGN_PTR + 1);
1280 keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
1281
1282 if (keg->uk_flags & UMA_ZONE_REFCNT)
1283 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
1284 + keg->uk_ipers * UMA_FRITMREF_SZ;
1285 else
1286 totsize = keg->uk_pgoff + sizeof(struct uma_slab)
1287 + keg->uk_ipers * UMA_FRITM_SZ;
1288
1289 /*
1290 * The only way the following is possible is if with our
1291 * UMA_ALIGN_PTR adjustments we are now bigger than
1292 * UMA_SLAB_SIZE. I haven't checked whether this is
1293 * mathematically possible for all cases, so we make
1294 * sure here anyway.
1295 */
1296 if (totsize > UMA_SLAB_SIZE) {
1297 printf("zone %s ipers %d rsize %d size %d\n",
1298 zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1299 keg->uk_size);
1300 panic("UMA slab won't fit.\n");
1301 }
1302 }
1303
1304 if (keg->uk_flags & UMA_ZONE_HASH)
1305 hash_alloc(&keg->uk_hash);
1306
1307#ifdef UMA_DEBUG
1308 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
1309 zone->uz_name, zone,
1310 keg->uk_size, keg->uk_ipers,
1311 keg->uk_ppera, keg->uk_pgoff);
1312#endif
1313
1314 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1315
1316 mtx_lock(&uma_mtx);
1317 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1318 mtx_unlock(&uma_mtx);
1319 return (0);
1320}
1321
1322/*
1323 * Zone header ctor. This initializes all fields, locks, etc.
1324 *
1325 * Arguments/Returns follow uma_ctor specifications
1326 * udata Actually uma_zctor_args
1327 */
1328
1329static int
1330zone_ctor(void *mem, int size, void *udata, int flags)
1331{
1332 struct uma_zctor_args *arg = udata;
1333 uma_zone_t zone = mem;
1334 uma_zone_t z;
1335 uma_keg_t keg;
1336
1337 bzero(zone, size);
1338 zone->uz_name = arg->name;
1339 zone->uz_ctor = arg->ctor;
1340 zone->uz_dtor = arg->dtor;
1341 zone->uz_init = NULL;
1342 zone->uz_fini = NULL;
1343 zone->uz_allocs = 0;
1344 zone->uz_fills = zone->uz_count = 0;
1345
1346 if (arg->flags & UMA_ZONE_SECONDARY) {
1347 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1348 keg = arg->keg;
1349 zone->uz_keg = keg;
1350 zone->uz_init = arg->uminit;
1351 zone->uz_fini = arg->fini;
1352 zone->uz_lock = &keg->uk_lock;
1353 mtx_lock(&uma_mtx);
1354 ZONE_LOCK(zone);
1355 keg->uk_flags |= UMA_ZONE_SECONDARY;
1356 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1357 if (LIST_NEXT(z, uz_link) == NULL) {
1358 LIST_INSERT_AFTER(z, zone, uz_link);
1359 break;
1360 }
1361 }
1362 ZONE_UNLOCK(zone);
1363 mtx_unlock(&uma_mtx);
1364 } else if (arg->keg == NULL) {
1365 if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1366 arg->align, arg->flags) == NULL)
1367 return (ENOMEM);
1368 } else {
1369 struct uma_kctor_args karg;
1370 int error;
1371
1372 /* We should only be here from uma_startup() */
1373 karg.size = arg->size;
1374 karg.uminit = arg->uminit;
1375 karg.fini = arg->fini;
1376 karg.align = arg->align;
1377 karg.flags = arg->flags;
1378 karg.zone = zone;
1379 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1380 flags);
1381 if (error)
1382 return (error);
1383 }
1384 keg = zone->uz_keg;
1385 zone->uz_lock = &keg->uk_lock;
1386
1387 /*
1388 * Some internal zones don't have room allocated for the per cpu
1389 * caches. If we're internal, bail out here.
1390 */
1391 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1392 KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0,
1393 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1394 return (0);
1395 }
1396
1397 if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
1398 zone->uz_count = BUCKET_MAX;
1399 else if (keg->uk_ipers <= BUCKET_MAX)
1400 zone->uz_count = keg->uk_ipers;
1401 else
1402 zone->uz_count = BUCKET_MAX;
1403 return (0);
1404}
1405
1406/*
1407 * Keg header dtor. This frees all data, destroys locks, frees the hash
1408 * table and removes the keg from the global list.
1409 *
1410 * Arguments/Returns follow uma_dtor specifications
1411 * udata unused
1412 */
1413static void
1414keg_dtor(void *arg, int size, void *udata)
1415{
1416 uma_keg_t keg;
1417
1418 keg = (uma_keg_t)arg;
1419 mtx_lock(&keg->uk_lock);
1420 if (keg->uk_free != 0) {
1421 printf("Freed UMA keg was not empty (%d items). "
1422 " Lost %d pages of memory.\n",
1423 keg->uk_free, keg->uk_pages);
1424 }
1425 mtx_unlock(&keg->uk_lock);
1426
1427 if (keg->uk_flags & UMA_ZONE_HASH)
1428 hash_free(&keg->uk_hash);
1429
1430 mtx_destroy(&keg->uk_lock);
1431}
1432
1433/*
1434 * Zone header dtor.
1435 *
1436 * Arguments/Returns follow uma_dtor specifications
1437 * udata unused
1438 */
1439static void
1440zone_dtor(void *arg, int size, void *udata)
1441{
1442 uma_zone_t zone;
1443 uma_keg_t keg;
1444
1445 zone = (uma_zone_t)arg;
1446 keg = zone->uz_keg;
1447
1448 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL))
1449 cache_drain(zone);
1450
1451 mtx_lock(&uma_mtx);
1452 zone_drain(zone);
1453 if (keg->uk_flags & UMA_ZONE_SECONDARY) {
1454 LIST_REMOVE(zone, uz_link);
1455 /*
1456 * XXX there are some races here where
1457 * the zone can be drained but zone lock
1458 * released and then refilled before we
1459 * remove it... we dont care for now
1460 */
1461 ZONE_LOCK(zone);
1462 if (LIST_EMPTY(&keg->uk_zones))
1463 keg->uk_flags &= ~UMA_ZONE_SECONDARY;
1464 ZONE_UNLOCK(zone);
1465 mtx_unlock(&uma_mtx);
1466 } else {
1467 LIST_REMOVE(keg, uk_link);
1468 LIST_REMOVE(zone, uz_link);
1469 mtx_unlock(&uma_mtx);
1470 uma_zfree_internal(kegs, keg, NULL, SKIP_NONE);
1471 }
1472 zone->uz_keg = NULL;
1473}
1474
1475/*
1476 * Traverses every zone in the system and calls a callback
1477 *
1478 * Arguments:
1479 * zfunc A pointer to a function which accepts a zone
1480 * as an argument.
1481 *
1482 * Returns:
1483 * Nothing
1484 */
1485static void
1486zone_foreach(void (*zfunc)(uma_zone_t))
1487{
1488 uma_keg_t keg;
1489 uma_zone_t zone;
1490
1491 mtx_lock(&uma_mtx);
1492 LIST_FOREACH(keg, &uma_kegs, uk_link) {
1493 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1494 zfunc(zone);
1495 }
1496 mtx_unlock(&uma_mtx);
1497}
1498
1499/* Public functions */
1500/* See uma.h */
1501void
1502uma_startup(void *bootmem)
1503{
1504 struct uma_zctor_args args;
1505 uma_slab_t slab;
1506 u_int slabsize;
1507 u_int objsize, totsize, wsize;
1508 int i;
1509
1510#ifdef UMA_DEBUG
1511 printf("Creating uma keg headers zone and keg.\n");
1512#endif
1513 /*
1514 * The general UMA lock is a recursion-allowed lock because
1515 * there is a code path where, while we're still configured
1516 * to use startup_alloc() for backend page allocations, we
1517 * may end up in uma_reclaim() which calls zone_foreach(zone_drain),
1518 * which grabs uma_mtx, only to later call into startup_alloc()
1519 * because while freeing we needed to allocate a bucket. Since
1520 * startup_alloc() also takes uma_mtx, we need to be able to
1521 * recurse on it.
1522 */
1523 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE);
1524
1525 /*
1526 * Figure out the maximum number of items-per-slab we'll have if
1527 * we're using the OFFPAGE slab header to track free items, given
1528 * all possible object sizes and the maximum desired wastage
1529 * (UMA_MAX_WASTE).
1530 *
1531 * We iterate until we find an object size for
1532 * which the calculated wastage in zone_small_init() will be
1533 * enough to warrant OFFPAGE. Since wastedspace versus objsize
1534 * is an overall increasing see-saw function, we find the smallest
1535 * objsize such that the wastage is always acceptable for objects
1536 * with that objsize or smaller. Since a smaller objsize always
1537 * generates a larger possible uma_max_ipers, we use this computed
1538 * objsize to calculate the largest ipers possible. Since the
1539 * ipers calculated for OFFPAGE slab headers is always larger than
1540 * the ipers initially calculated in zone_small_init(), we use
1541 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
1542 * obtain the maximum ipers possible for offpage slab headers.
1543 *
1544 * It should be noted that ipers versus objsize is an inversly
1545 * proportional function which drops off rather quickly so as
1546 * long as our UMA_MAX_WASTE is such that the objsize we calculate
1547 * falls into the portion of the inverse relation AFTER the steep
1548 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
1549 *
1550 * Note that we have 8-bits (1 byte) to use as a freelist index
1551 * inside the actual slab header itself and this is enough to
1552 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized
1553 * object with offpage slab header would have ipers =
1554 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
1555 * 1 greater than what our byte-integer freelist index can
1556 * accomodate, but we know that this situation never occurs as
1557 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
1558 * that we need to go to offpage slab headers. Or, if we do,
1559 * then we trap that condition below and panic in the INVARIANTS case.
1560 */
1561 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
1562 totsize = wsize;
1563 objsize = UMA_SMALLEST_UNIT;
1564 while (totsize >= wsize) {
1565 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
1566 (objsize + UMA_FRITM_SZ);
1567 totsize *= (UMA_FRITM_SZ + objsize);
1568 objsize++;
1569 }
1570 if (objsize > UMA_SMALLEST_UNIT)
1571 objsize--;
1572 uma_max_ipers = UMA_SLAB_SIZE / objsize;
1573
1574 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
1575 totsize = wsize;
1576 objsize = UMA_SMALLEST_UNIT;
1577 while (totsize >= wsize) {
1578 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
1579 (objsize + UMA_FRITMREF_SZ);
1580 totsize *= (UMA_FRITMREF_SZ + objsize);
1581 objsize++;
1582 }
1583 if (objsize > UMA_SMALLEST_UNIT)
1584 objsize--;
1585 uma_max_ipers_ref = UMA_SLAB_SIZE / objsize;
1586
1587 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
1588 ("uma_startup: calculated uma_max_ipers values too large!"));
1589
1590#ifdef UMA_DEBUG
1591 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
1592 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
1593 uma_max_ipers_ref);
1594#endif
1595
1596 /* "manually" create the initial zone */
1597 args.name = "UMA Kegs";
1598 args.size = sizeof(struct uma_keg);
1599 args.ctor = keg_ctor;
1600 args.dtor = keg_dtor;
1601 args.uminit = zero_init;
1602 args.fini = NULL;
1603 args.keg = &masterkeg;
1604 args.align = 32 - 1;
1605 args.flags = UMA_ZFLAG_INTERNAL;
1606 /* The initial zone has no Per cpu queues so it's smaller */
1607 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1608
1609#ifdef UMA_DEBUG
1610 printf("Filling boot free list.\n");
1611#endif
1612 for (i = 0; i < UMA_BOOT_PAGES; i++) {
1613 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1614 slab->us_data = (u_int8_t *)slab;
1615 slab->us_flags = UMA_SLAB_BOOT;
1616 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1617 uma_boot_free++;
1618 }
1619
1620#ifdef UMA_DEBUG
1621 printf("Creating uma zone headers zone and keg.\n");
1622#endif
1623 args.name = "UMA Zones";
1624 args.size = sizeof(struct uma_zone) +
1625 (sizeof(struct uma_cache) * (mp_maxid + 1));
1626 args.ctor = zone_ctor;
1627 args.dtor = zone_dtor;
1628 args.uminit = zero_init;
1629 args.fini = NULL;
1630 args.keg = NULL;
1631 args.align = 32 - 1;
1632 args.flags = UMA_ZFLAG_INTERNAL;
1633 /* The initial zone has no Per cpu queues so it's smaller */
1634 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1635
1636#ifdef UMA_DEBUG
1637 printf("Initializing pcpu cache locks.\n");
1638#endif
1639 /* Initialize the pcpu cache lock set once and for all */
1640 for (i = 0; i <= mp_maxid; i++)
1641 CPU_LOCK_INIT(i);
1642
1643#ifdef UMA_DEBUG
1644 printf("Creating slab and hash zones.\n");
1645#endif
1646
1647 /*
1648 * This is the max number of free list items we'll have with
1649 * offpage slabs.
1650 */
1651 slabsize = uma_max_ipers * UMA_FRITM_SZ;
1652 slabsize += sizeof(struct uma_slab);
1653
1654 /* Now make a zone for slab headers */
1655 slabzone = uma_zcreate("UMA Slabs",
1656 slabsize,
1657 NULL, NULL, NULL, NULL,
1658 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1659
1660 /*
1661 * We also create a zone for the bigger slabs with reference
1662 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1663 */
1664 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
1665 slabsize += sizeof(struct uma_slab_refcnt);
1666 slabrefzone = uma_zcreate("UMA RCntSlabs",
1667 slabsize,
1668 NULL, NULL, NULL, NULL,
1669 UMA_ALIGN_PTR,
1670 UMA_ZFLAG_INTERNAL);
1671
1672 hashzone = uma_zcreate("UMA Hash",
1673 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1674 NULL, NULL, NULL, NULL,
1675 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1676
1677 bucket_init();
1678
1679#ifdef UMA_MD_SMALL_ALLOC
1680 booted = 1;
1681#endif
1682
1683#ifdef UMA_DEBUG
1684 printf("UMA startup complete.\n");
1685#endif
1686}
1687
1688/* see uma.h */
1689void
1690uma_startup2(void)
1691{
1692 booted = 1;
1693 bucket_enable();
1694#ifdef UMA_DEBUG
1695 printf("UMA startup2 complete.\n");
1696#endif
1697}
1698
1699/*
1700 * Initialize our callout handle
1701 *
1702 */
1703
1704static void
1705uma_startup3(void)
1706{
1707#ifdef UMA_DEBUG
1708 printf("Starting callout.\n");
1709#endif
1710 callout_init(&uma_callout, CALLOUT_MPSAFE);
1711 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1712#ifdef UMA_DEBUG
1713 printf("UMA startup3 complete.\n");
1714#endif
1715}
1716
1717static uma_zone_t
1718uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1719 int align, u_int16_t flags)
1720{
1721 struct uma_kctor_args args;
1722
1723 args.size = size;
1724 args.uminit = uminit;
1725 args.fini = fini;
1726 args.align = align;
1727 args.flags = flags;
1728 args.zone = zone;
1729 return (uma_zalloc_internal(kegs, &args, M_WAITOK));
1730}
1731
1732/* See uma.h */
1733uma_zone_t
1734uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1735 uma_init uminit, uma_fini fini, int align, u_int16_t flags)
1736
1737{
1738 struct uma_zctor_args args;
1739
1740 /* This stuff is essential for the zone ctor */
1741 args.name = name;
1742 args.size = size;
1743 args.ctor = ctor;
1744 args.dtor = dtor;
1745 args.uminit = uminit;
1746 args.fini = fini;
1747 args.align = align;
1748 args.flags = flags;
1749 args.keg = NULL;
1750
1751 return (uma_zalloc_internal(zones, &args, M_WAITOK));
1752}
1753
1754/* See uma.h */
1755uma_zone_t
1756uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1757 uma_init zinit, uma_fini zfini, uma_zone_t master)
1758{
1759 struct uma_zctor_args args;
1760
1761 args.name = name;
1762 args.size = master->uz_keg->uk_size;
1763 args.ctor = ctor;
1764 args.dtor = dtor;
1765 args.uminit = zinit;
1766 args.fini = zfini;
1767 args.align = master->uz_keg->uk_align;
1768 args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY;
1769 args.keg = master->uz_keg;
1770
1771 return (uma_zalloc_internal(zones, &args, M_WAITOK));
1772}
1773
1774/* See uma.h */
1775void
1776uma_zdestroy(uma_zone_t zone)
1777{
1778 uma_zfree_internal(zones, zone, NULL, SKIP_NONE);
1779}
1780
1781/* See uma.h */
1782void *
1783uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1784{
1785 void *item;
1786 uma_cache_t cache;
1787 uma_bucket_t bucket;
1788 int cpu;
1789 int badness;
1790
1791 /* This is the fast path allocation */
1792#ifdef UMA_DEBUG_ALLOC_1
1793 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1794#endif
1795 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
1796 zone->uz_name, flags);
1797
1798 if (!(flags & M_NOWAIT)) {
1799 KASSERT(curthread->td_intr_nesting_level == 0,
1800 ("malloc(M_WAITOK) in interrupt context"));
1801 if (nosleepwithlocks) {
1802#ifdef WITNESS
1803 badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
1804 NULL,
1805 "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT",
1806 zone->uz_name);
1807#else
1808 badness = 1;
1809#endif
1810 } else {
1811 badness = 0;
1812#ifdef WITNESS
1813 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1814 "malloc(M_WAITOK) of \"%s\"", zone->uz_name);
1815#endif
1816 }
1817 if (badness) {
1818 flags &= ~M_WAITOK;
1819 flags |= M_NOWAIT;
1820 }
1821 }
1822
1823zalloc_restart:
1824 cpu = PCPU_GET(cpuid);
1825 CPU_LOCK(cpu);
1826 cache = &zone->uz_cpu[cpu];
1827
1828zalloc_start:
1829 bucket = cache->uc_allocbucket;
1830
1831 if (bucket) {
1832 if (bucket->ub_cnt > 0) {
1833 bucket->ub_cnt--;
1834 item = bucket->ub_bucket[bucket->ub_cnt];
1835#ifdef INVARIANTS
1836 bucket->ub_bucket[bucket->ub_cnt] = NULL;
1837#endif
1838 KASSERT(item != NULL,
1839 ("uma_zalloc: Bucket pointer mangled."));
1840 cache->uc_allocs++;
1841#ifdef INVARIANTS
1842 ZONE_LOCK(zone);
1843 uma_dbg_alloc(zone, NULL, item);
1844 ZONE_UNLOCK(zone);
1845#endif
1846 CPU_UNLOCK(cpu);
1847 if (zone->uz_ctor != NULL) {
1848 if (zone->uz_ctor(item, zone->uz_keg->uk_size,
1849 udata, flags) != 0) {
1850 uma_zfree_internal(zone, item, udata,
1851 SKIP_DTOR);
1852 return (NULL);
1853 }
1854 }
1855 if (flags & M_ZERO)
1856 bzero(item, zone->uz_keg->uk_size);
1857 return (item);
1858 } else if (cache->uc_freebucket) {
1859 /*
1860 * We have run out of items in our allocbucket.
1861 * See if we can switch with our free bucket.
1862 */
1863 if (cache->uc_freebucket->ub_cnt > 0) {
1864#ifdef UMA_DEBUG_ALLOC
1865 printf("uma_zalloc: Swapping empty with"
1866 " alloc.\n");
1867#endif
1868 bucket = cache->uc_freebucket;
1869 cache->uc_freebucket = cache->uc_allocbucket;
1870 cache->uc_allocbucket = bucket;
1871
1872 goto zalloc_start;
1873 }
1874 }
1875 }
1876 ZONE_LOCK(zone);
1877 /* Since we have locked the zone we may as well send back our stats */
1878 zone->uz_allocs += cache->uc_allocs;
1879 cache->uc_allocs = 0;
1880
1881 /* Our old one is now a free bucket */
1882 if (cache->uc_allocbucket) {
1883 KASSERT(cache->uc_allocbucket->ub_cnt == 0,
1884 ("uma_zalloc_arg: Freeing a non free bucket."));
1885 LIST_INSERT_HEAD(&zone->uz_free_bucket,
1886 cache->uc_allocbucket, ub_link);
1887 cache->uc_allocbucket = NULL;
1888 }
1889
1890 /* Check the free list for a new alloc bucket */
1891 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1892 KASSERT(bucket->ub_cnt != 0,
1893 ("uma_zalloc_arg: Returning an empty bucket."));
1894
1895 LIST_REMOVE(bucket, ub_link);
1896 cache->uc_allocbucket = bucket;
1897 ZONE_UNLOCK(zone);
1898 goto zalloc_start;
1899 }
1900 /* We are no longer associated with this cpu!!! */
1901 CPU_UNLOCK(cpu);
1902
1903 /* Bump up our uz_count so we get here less */
1904 if (zone->uz_count < BUCKET_MAX)
1905 zone->uz_count++;
1906
1907 /*
1908 * Now lets just fill a bucket and put it on the free list. If that
1909 * works we'll restart the allocation from the begining.
1910 */
1911 if (uma_zalloc_bucket(zone, flags)) {
1912 ZONE_UNLOCK(zone);
1913 goto zalloc_restart;
1914 }
1915 ZONE_UNLOCK(zone);
1916 /*
1917 * We may not be able to get a bucket so return an actual item.
1918 */
1919#ifdef UMA_DEBUG
1920 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1921#endif
1922
1923 return (uma_zalloc_internal(zone, udata, flags));
1924}
1925
1926static uma_slab_t
1927uma_zone_slab(uma_zone_t zone, int flags)
1928{
1929 uma_slab_t slab;
1930 uma_keg_t keg;
1931
1932 keg = zone->uz_keg;
1933
1934 /*
1935 * This is to prevent us from recursively trying to allocate
1936 * buckets. The problem is that if an allocation forces us to
1937 * grab a new bucket we will call page_alloc, which will go off
1938 * and cause the vm to allocate vm_map_entries. If we need new
1939 * buckets there too we will recurse in kmem_alloc and bad
1940 * things happen. So instead we return a NULL bucket, and make
1941 * the code that allocates buckets smart enough to deal with it
1942 */
1943 if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0)
1944 return (NULL);
1945
1946 slab = NULL;
1947
1948 for (;;) {
1949 /*
1950 * Find a slab with some space. Prefer slabs that are partially
1951 * used over those that are totally full. This helps to reduce
1952 * fragmentation.
1953 */
1954 if (keg->uk_free != 0) {
1955 if (!LIST_EMPTY(&keg->uk_part_slab)) {
1956 slab = LIST_FIRST(&keg->uk_part_slab);
1957 } else {
1958 slab = LIST_FIRST(&keg->uk_free_slab);
1959 LIST_REMOVE(slab, us_link);
1960 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
1961 us_link);
1962 }
1963 return (slab);
1964 }
1965
1966 /*
1967 * M_NOVM means don't ask at all!
1968 */
1969 if (flags & M_NOVM)
1970 break;
1971
1972 if (keg->uk_maxpages &&
1973 keg->uk_pages >= keg->uk_maxpages) {
1974 keg->uk_flags |= UMA_ZFLAG_FULL;
1975
1976 if (flags & M_NOWAIT)
1977 break;
1978 else
1979 msleep(keg, &keg->uk_lock, PVM,
1980 "zonelimit", 0);
1981 continue;
1982 }
1983 keg->uk_recurse++;
1984 slab = slab_zalloc(zone, flags);
1985 keg->uk_recurse--;
1986
1987 /*
1988 * If we got a slab here it's safe to mark it partially used
1989 * and return. We assume that the caller is going to remove
1990 * at least one item.
1991 */
1992 if (slab) {
1993 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
1994 return (slab);
1995 }
1996 /*
1997 * We might not have been able to get a slab but another cpu
1998 * could have while we were unlocked. Check again before we
1999 * fail.
2000 */
2001 if (flags & M_NOWAIT)
2002 flags |= M_NOVM;
2003 }
2004 return (slab);
2005}
2006
2007static void *
2008uma_slab_alloc(uma_zone_t zone, uma_slab_t slab)
2009{
2010 uma_keg_t keg;
2011 uma_slabrefcnt_t slabref;
2012 void *item;
2013 u_int8_t freei;
2014
2015 keg = zone->uz_keg;
2016
2017 freei = slab->us_firstfree;
2018 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2019 slabref = (uma_slabrefcnt_t)slab;
2020 slab->us_firstfree = slabref->us_freelist[freei].us_item;
2021 } else {
2022 slab->us_firstfree = slab->us_freelist[freei].us_item;
2023 }
2024 item = slab->us_data + (keg->uk_rsize * freei);
2025
2026 slab->us_freecount--;
2027 keg->uk_free--;
2028#ifdef INVARIANTS
2029 uma_dbg_alloc(zone, slab, item);
2030#endif
2031 /* Move this slab to the full list */
2032 if (slab->us_freecount == 0) {
2033 LIST_REMOVE(slab, us_link);
2034 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2035 }
2036
2037 return (item);
2038}
2039
2040static int
2041uma_zalloc_bucket(uma_zone_t zone, int flags)
2042{
2043 uma_bucket_t bucket;
2044 uma_slab_t slab;
2045 int16_t saved;
2046 int max, origflags = flags;
2047
2048 /*
2049 * Try this zone's free list first so we don't allocate extra buckets.
2050 */
2051 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2052 KASSERT(bucket->ub_cnt == 0,
2053 ("uma_zalloc_bucket: Bucket on free list is not empty."));
2054 LIST_REMOVE(bucket, ub_link);
2055 } else {
2056 int bflags;
2057
2058 bflags = (flags & ~M_ZERO);
2059 if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY)
2060 bflags |= M_NOVM;
2061
2062 ZONE_UNLOCK(zone);
2063 bucket = bucket_alloc(zone->uz_count, bflags);
2064 ZONE_LOCK(zone);
2065 }
2066
2067 if (bucket == NULL)
2068 return (0);
2069
2070#ifdef SMP
2071 /*
2072 * This code is here to limit the number of simultaneous bucket fills
2073 * for any given zone to the number of per cpu caches in this zone. This
2074 * is done so that we don't allocate more memory than we really need.
2075 */
2076 if (zone->uz_fills >= mp_ncpus)
2077 goto done;
2078
2079#endif
2080 zone->uz_fills++;
2081
2082 max = MIN(bucket->ub_entries, zone->uz_count);
2083 /* Try to keep the buckets totally full */
2084 saved = bucket->ub_cnt;
2085 while (bucket->ub_cnt < max &&
2086 (slab = uma_zone_slab(zone, flags)) != NULL) {
2087 while (slab->us_freecount && bucket->ub_cnt < max) {
2088 bucket->ub_bucket[bucket->ub_cnt++] =
2089 uma_slab_alloc(zone, slab);
2090 }
2091
2092 /* Don't block on the next fill */
2093 flags |= M_NOWAIT;
2094 }
2095
2096 /*
2097 * We unlock here because we need to call the zone's init.
2098 * It should be safe to unlock because the slab dealt with
2099 * above is already on the appropriate list within the keg
2100 * and the bucket we filled is not yet on any list, so we
2101 * own it.
2102 */
2103 if (zone->uz_init != NULL) {
2104 int i;
2105
2106 ZONE_UNLOCK(zone);
2107 for (i = saved; i < bucket->ub_cnt; i++)
2108 if (zone->uz_init(bucket->ub_bucket[i],
2109 zone->uz_keg->uk_size, origflags) != 0)
2110 break;
2111 /*
2112 * If we couldn't initialize the whole bucket, put the
2113 * rest back onto the freelist.
2114 */
2115 if (i != bucket->ub_cnt) {
2116 int j;
2117
2118 for (j = i; j < bucket->ub_cnt; j++) {
2119 uma_zfree_internal(zone, bucket->ub_bucket[j],
2120 NULL, SKIP_FINI);
2121#ifdef INVARIANTS
2122 bucket->ub_bucket[j] = NULL;
2123#endif
2124 }
2125 bucket->ub_cnt = i;
2126 }
2127 ZONE_LOCK(zone);
2128 }
2129
2130 zone->uz_fills--;
2131 if (bucket->ub_cnt != 0) {
2132 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2133 bucket, ub_link);
2134 return (1);
2135 }
2136#ifdef SMP
2137done:
2138#endif
2139 bucket_free(bucket);
2140
2141 return (0);
2142}
2143/*
2144 * Allocates an item for an internal zone
2145 *
2146 * Arguments
2147 * zone The zone to alloc for.
2148 * udata The data to be passed to the constructor.
2149 * flags M_WAITOK, M_NOWAIT, M_ZERO.
2150 *
2151 * Returns
2152 * NULL if there is no memory and M_NOWAIT is set
2153 * An item if successful
2154 */
2155
2156static void *
2157uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
2158{
2159 uma_keg_t keg;
2160 uma_slab_t slab;
2161 void *item;
2162
2163 item = NULL;
2164 keg = zone->uz_keg;
2165
2166#ifdef UMA_DEBUG_ALLOC
2167 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2168#endif
2169 ZONE_LOCK(zone);
2170
2171 slab = uma_zone_slab(zone, flags);
2172 if (slab == NULL) {
2173 ZONE_UNLOCK(zone);
2174 return (NULL);
2175 }
2176
2177 item = uma_slab_alloc(zone, slab);
2178
2179 ZONE_UNLOCK(zone);
2180
2181 /*
2182 * We have to call both the zone's init (not the keg's init)
2183 * and the zone's ctor. This is because the item is going from
2184 * a keg slab directly to the user, and the user is expecting it
2185 * to be both zone-init'd as well as zone-ctor'd.
2186 */
2187 if (zone->uz_init != NULL) {
2188 if (zone->uz_init(item, keg->uk_size, flags) != 0) {
2189 uma_zfree_internal(zone, item, udata, SKIP_FINI);
2190 return (NULL);
2191 }
2192 }
2193 if (zone->uz_ctor != NULL) {
2194 if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) {
2195 uma_zfree_internal(zone, item, udata, SKIP_DTOR);
2196 return (NULL);
2197 }
2198 }
2199 if (flags & M_ZERO)
2200 bzero(item, keg->uk_size);
2201
2202 return (item);
2203}
2204
2205/* See uma.h */
2206void
2207uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2208{
2209 uma_keg_t keg;
2210 uma_cache_t cache;
2211 uma_bucket_t bucket;
2212 int bflags;
2213 int cpu;
2214 enum zfreeskip skip;
2215
2216 /* This is the fast path free */
2217 skip = SKIP_NONE;
2218 keg = zone->uz_keg;
2219
2220#ifdef UMA_DEBUG_ALLOC_1
2221 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2222#endif
2223 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2224 zone->uz_name);
2225
2226 /*
2227 * The race here is acceptable. If we miss it we'll just have to wait
2228 * a little longer for the limits to be reset.
2229 */
2230
2231 if (keg->uk_flags & UMA_ZFLAG_FULL)
2232 goto zfree_internal;
2233
2234 if (zone->uz_dtor) {
2235 zone->uz_dtor(item, keg->uk_size, udata);
2236 skip = SKIP_DTOR;
2237 }
2238
2239zfree_restart:
2240 cpu = PCPU_GET(cpuid);
2241 CPU_LOCK(cpu);
2242 cache = &zone->uz_cpu[cpu];
2243
2244zfree_start:
2245 bucket = cache->uc_freebucket;
2246
2247 if (bucket) {
2248 /*
2249 * Do we have room in our bucket? It is OK for this uz count
2250 * check to be slightly out of sync.
2251 */
2252
2253 if (bucket->ub_cnt < bucket->ub_entries) {
2254 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2255 ("uma_zfree: Freeing to non free bucket index."));
2256 bucket->ub_bucket[bucket->ub_cnt] = item;
2257 bucket->ub_cnt++;
2258#ifdef INVARIANTS
2259 ZONE_LOCK(zone);
2260 if (keg->uk_flags & UMA_ZONE_MALLOC)
2261 uma_dbg_free(zone, udata, item);
2262 else
2263 uma_dbg_free(zone, NULL, item);
2264 ZONE_UNLOCK(zone);
2265#endif
2266 CPU_UNLOCK(cpu);
2267 return;
2268 } else if (cache->uc_allocbucket) {
2269#ifdef UMA_DEBUG_ALLOC
2270 printf("uma_zfree: Swapping buckets.\n");
2271#endif
2272 /*
2273 * We have run out of space in our freebucket.
2274 * See if we can switch with our alloc bucket.
2275 */
2276 if (cache->uc_allocbucket->ub_cnt <
2277 cache->uc_freebucket->ub_cnt) {
2278 bucket = cache->uc_freebucket;
2279 cache->uc_freebucket = cache->uc_allocbucket;
2280 cache->uc_allocbucket = bucket;
2281 goto zfree_start;
2282 }
2283 }
2284 }
2285 /*
2286 * We can get here for two reasons:
2287 *
2288 * 1) The buckets are NULL
2289 * 2) The alloc and free buckets are both somewhat full.
2290 */
2291
2292 ZONE_LOCK(zone);
2293
2294 bucket = cache->uc_freebucket;
2295 cache->uc_freebucket = NULL;
2296
2297 /* Can we throw this on the zone full list? */
2298 if (bucket != NULL) {
2299#ifdef UMA_DEBUG_ALLOC
2300 printf("uma_zfree: Putting old bucket on the free list.\n");
2301#endif
2302 /* ub_cnt is pointing to the last free item */
2303 KASSERT(bucket->ub_cnt != 0,
2304 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2305 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2306 bucket, ub_link);
2307 }
2308 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2309 LIST_REMOVE(bucket, ub_link);
2310 ZONE_UNLOCK(zone);
2311 cache->uc_freebucket = bucket;
2312 goto zfree_start;
2313 }
2314 /* We're done with this CPU now */
2315 CPU_UNLOCK(cpu);
2316
2317 /* And the zone.. */
2318 ZONE_UNLOCK(zone);
2319
2320#ifdef UMA_DEBUG_ALLOC
2321 printf("uma_zfree: Allocating new free bucket.\n");
2322#endif
2323 bflags = M_NOWAIT;
2324
2325 if (keg->uk_flags & UMA_ZFLAG_CACHEONLY)
2326 bflags |= M_NOVM;
2327 bucket = bucket_alloc(zone->uz_count, bflags);
2328 if (bucket) {
2329 ZONE_LOCK(zone);
2330 LIST_INSERT_HEAD(&zone->uz_free_bucket,
2331 bucket, ub_link);
2332 ZONE_UNLOCK(zone);
2333 goto zfree_restart;
2334 }
2335
2336 /*
2337 * If nothing else caught this, we'll just do an internal free.
2338 */
2339
2340zfree_internal:
2341
2342#ifdef INVARIANTS
2343 /*
2344 * If we need to skip the dtor and the uma_dbg_free in
2345 * uma_zfree_internal because we've already called the dtor
2346 * above, but we ended up here, then we need to make sure
2347 * that we take care of the uma_dbg_free immediately.
2348 */
2349 if (skip) {
2350 ZONE_LOCK(zone);
2351 if (keg->uk_flags & UMA_ZONE_MALLOC)
2352 uma_dbg_free(zone, udata, item);
2353 else
2354 uma_dbg_free(zone, NULL, item);
2355 ZONE_UNLOCK(zone);
2356 }
2357#endif
2358 uma_zfree_internal(zone, item, udata, skip);
2359
2360 return;
2361}
2362
2363/*
2364 * Frees an item to an INTERNAL zone or allocates a free bucket
2365 *
2366 * Arguments:
2367 * zone The zone to free to
2368 * item The item we're freeing
2369 * udata User supplied data for the dtor
2370 * skip Skip dtors and finis
2371 */
2372static void
2373uma_zfree_internal(uma_zone_t zone, void *item, void *udata,
2374 enum zfreeskip skip)
2375{
2376 uma_slab_t slab;
2377 uma_slabrefcnt_t slabref;
2378 uma_keg_t keg;
2379 u_int8_t *mem;
2380 u_int8_t freei;
2381
2382 keg = zone->uz_keg;
2383
2384 if (skip < SKIP_DTOR && zone->uz_dtor)
2385 zone->uz_dtor(item, keg->uk_size, udata);
2386 if (skip < SKIP_FINI && zone->uz_fini)
2387 zone->uz_fini(item, keg->uk_size);
2388
2389 ZONE_LOCK(zone);
2390
2391 if (!(keg->uk_flags & UMA_ZONE_MALLOC)) {
2392 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
2393 if (keg->uk_flags & UMA_ZONE_HASH)
2394 slab = hash_sfind(&keg->uk_hash, mem);
2395 else {
2396 mem += keg->uk_pgoff;
2397 slab = (uma_slab_t)mem;
2398 }
2399 } else {
2400 slab = (uma_slab_t)udata;
2401 }
2402
2403 /* Do we need to remove from any lists? */
2404 if (slab->us_freecount+1 == keg->uk_ipers) {
2405 LIST_REMOVE(slab, us_link);
2406 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2407 } else if (slab->us_freecount == 0) {
2408 LIST_REMOVE(slab, us_link);
2409 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2410 }
2411
2412 /* Slab management stuff */
2413 freei = ((unsigned long)item - (unsigned long)slab->us_data)
2414 / keg->uk_rsize;
2415
2416#ifdef INVARIANTS
2417 if (!skip)
2418 uma_dbg_free(zone, slab, item);
2419#endif
2420
2421 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2422 slabref = (uma_slabrefcnt_t)slab;
2423 slabref->us_freelist[freei].us_item = slab->us_firstfree;
2424 } else {
2425 slab->us_freelist[freei].us_item = slab->us_firstfree;
2426 }
2427 slab->us_firstfree = freei;
2428 slab->us_freecount++;
2429
2430 /* Zone statistics */
2431 keg->uk_free++;
2432
2433 if (keg->uk_flags & UMA_ZFLAG_FULL) {
2434 if (keg->uk_pages < keg->uk_maxpages)
2435 keg->uk_flags &= ~UMA_ZFLAG_FULL;
2436
2437 /* We can handle one more allocation */
2438 wakeup_one(keg);
2439 }
2440
2441 ZONE_UNLOCK(zone);
2442}
2443
2444/* See uma.h */
2445void
2446uma_zone_set_max(uma_zone_t zone, int nitems)
2447{
2448 uma_keg_t keg;
2449
2450 keg = zone->uz_keg;
2451 ZONE_LOCK(zone);
2452 if (keg->uk_ppera > 1)
2453 keg->uk_maxpages = nitems * keg->uk_ppera;
2454 else
2455 keg->uk_maxpages = nitems / keg->uk_ipers;
2456
2457 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2458 keg->uk_maxpages++;
2459
2460 ZONE_UNLOCK(zone);
2461}
2462
2463/* See uma.h */
2464void
2465uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2466{
2467 ZONE_LOCK(zone);
2468 KASSERT(zone->uz_keg->uk_pages == 0,
2469 ("uma_zone_set_init on non-empty keg"));
2470 zone->uz_keg->uk_init = uminit;
2471 ZONE_UNLOCK(zone);
2472}
2473
2474/* See uma.h */
2475void
2476uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2477{
2478 ZONE_LOCK(zone);
2479 KASSERT(zone->uz_keg->uk_pages == 0,
2480 ("uma_zone_set_fini on non-empty keg"));
2481 zone->uz_keg->uk_fini = fini;
2482 ZONE_UNLOCK(zone);
2483}
2484
2485/* See uma.h */
2486void
2487uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2488{
2489 ZONE_LOCK(zone);
2490 KASSERT(zone->uz_keg->uk_pages == 0,
2491 ("uma_zone_set_zinit on non-empty keg"));
2492 zone->uz_init = zinit;
2493 ZONE_UNLOCK(zone);
2494}
2495
2496/* See uma.h */
2497void
2498uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2499{
2500 ZONE_LOCK(zone);
2501 KASSERT(zone->uz_keg->uk_pages == 0,
2502 ("uma_zone_set_zfini on non-empty keg"));
2503 zone->uz_fini = zfini;
2504 ZONE_UNLOCK(zone);
2505}
2506
2507/* See uma.h */
2508/* XXX uk_freef is not actually used with the zone locked */
2509void
2510uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2511{
2512 ZONE_LOCK(zone);
2513 zone->uz_keg->uk_freef = freef;
2514 ZONE_UNLOCK(zone);
2515}
2516
2517/* See uma.h */
2518/* XXX uk_allocf is not actually used with the zone locked */
2519void
2520uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2521{
2522 ZONE_LOCK(zone);
2523 zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2524 zone->uz_keg->uk_allocf = allocf;
2525 ZONE_UNLOCK(zone);
2526}
2527
2528/* See uma.h */
2529int
2530uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
2531{
2532 uma_keg_t keg;
2533 vm_offset_t kva;
2534 int pages;
2535
2536 keg = zone->uz_keg;
2537 pages = count / keg->uk_ipers;
2538
2539 if (pages * keg->uk_ipers < count)
2540 pages++;
2541
2542 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2543
2544 if (kva == 0)
2545 return (0);
2546 if (obj == NULL) {
2547 obj = vm_object_allocate(OBJT_DEFAULT,
2548 pages);
2549 } else {
2550 VM_OBJECT_LOCK_INIT(obj, "uma object");
2551 _vm_object_allocate(OBJT_DEFAULT,
2552 pages, obj);
2553 }
2554 ZONE_LOCK(zone);
2555 keg->uk_kva = kva;
2556 keg->uk_obj = obj;
2557 keg->uk_maxpages = pages;
2558 keg->uk_allocf = obj_alloc;
2559 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2560 ZONE_UNLOCK(zone);
2561 return (1);
2562}
2563
2564/* See uma.h */
2565void
2566uma_prealloc(uma_zone_t zone, int items)
2567{
2568 int slabs;
2569 uma_slab_t slab;
2570 uma_keg_t keg;
2571
2572 keg = zone->uz_keg;
2573 ZONE_LOCK(zone);
2574 slabs = items / keg->uk_ipers;
2575 if (slabs * keg->uk_ipers < items)
2576 slabs++;
2577 while (slabs > 0) {
2578 slab = slab_zalloc(zone, M_WAITOK);
2579 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2580 slabs--;
2581 }
2582 ZONE_UNLOCK(zone);
2583}
2584
2585/* See uma.h */
2586u_int32_t *
2587uma_find_refcnt(uma_zone_t zone, void *item)
2588{
2589 uma_slabrefcnt_t slabref;
2590 uma_keg_t keg;
2591 u_int32_t *refcnt;
2592 int idx;
2593
2594 keg = zone->uz_keg;
2595 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
2596 (~UMA_SLAB_MASK));
2597 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
2598 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
2599 idx = ((unsigned long)item - (unsigned long)slabref->us_data)
2600 / keg->uk_rsize;
2601 refcnt = &slabref->us_freelist[idx].us_refcnt;
2602 return refcnt;
2603}
2604
2605/* See uma.h */
2606void
2607uma_reclaim(void)
2608{
2609#ifdef UMA_DEBUG
2610 printf("UMA: vm asked us to release pages!\n");
2611#endif
2612 bucket_enable();
2613 zone_foreach(zone_drain);
2614 /*
2615 * Some slabs may have been freed but this zone will be visited early
2616 * we visit again so that we can free pages that are empty once other
2617 * zones are drained. We have to do the same for buckets.
2618 */
2619 zone_drain(slabzone);
2620 zone_drain(slabrefzone);
2621 bucket_zone_drain();
2622}
2623
2624void *
2625uma_large_malloc(int size, int wait)
2626{
2627 void *mem;
2628 uma_slab_t slab;
2629 u_int8_t flags;
2630
2631 slab = uma_zalloc_internal(slabzone, NULL, wait);
2632 if (slab == NULL)
2633 return (NULL);
2634 mem = page_alloc(NULL, size, &flags, wait);
2635 if (mem) {
2636 vsetslab((vm_offset_t)mem, slab);
2637 slab->us_data = mem;
2638 slab->us_flags = flags | UMA_SLAB_MALLOC;
2639 slab->us_size = size;
2640 } else {
2641 uma_zfree_internal(slabzone, slab, NULL, 0);
2642 }
2643
2644 return (mem);
2645}
2646
2647void
2648uma_large_free(uma_slab_t slab)
2649{
2650 vsetobj((vm_offset_t)slab->us_data, kmem_object);
2651 page_free(slab->us_data, slab->us_size, slab->us_flags);
2652 uma_zfree_internal(slabzone, slab, NULL, 0);
2653}
2654
2655void
2656uma_print_stats(void)
2657{
2658 zone_foreach(uma_print_zone);
2659}
2660
2661static void
2662slab_print(uma_slab_t slab)
2663{
2664 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
2665 slab->us_keg, slab->us_data, slab->us_freecount,
2666 slab->us_firstfree);
2667}
2668
2669static void
2670cache_print(uma_cache_t cache)
2671{
2672 printf("alloc: %p(%d), free: %p(%d)\n",
2673 cache->uc_allocbucket,
2674 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
2675 cache->uc_freebucket,
2676 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
2677}
2678
2679void
2680uma_print_zone(uma_zone_t zone)
2681{
2682 uma_cache_t cache;
2683 uma_keg_t keg;
2684 uma_slab_t slab;
2685 int i;
2686
2687 keg = zone->uz_keg;
2688 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
2689 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
2690 keg->uk_ipers, keg->uk_ppera,
2691 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
2692 printf("Part slabs:\n");
2693 LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
2694 slab_print(slab);
2695 printf("Free slabs:\n");
2696 LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
2697 slab_print(slab);
2698 printf("Full slabs:\n");
2699 LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
2700 slab_print(slab);
2701 for (i = 0; i <= mp_maxid; i++) {
2702 if (CPU_ABSENT(i))
2703 continue;
2704 cache = &zone->uz_cpu[i];
2705 printf("CPU %d Cache:\n", i);
2706 cache_print(cache);
2707 }
2708}
2709
2710/*
2711 * Sysctl handler for vm.zone
2712 *
2713 * stolen from vm_zone.c
2714 */
2715static int
2716sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
2717{
2718 int error, len, cnt;
2719 const int linesize = 128; /* conservative */
2720 int totalfree;
2721 char *tmpbuf, *offset;
2722 uma_zone_t z;
2723 uma_keg_t zk;
2724 char *p;
2725 int cpu;
2726 int cachefree;
2727 uma_bucket_t bucket;
2728 uma_cache_t cache;
2729
2730 cnt = 0;
2731 mtx_lock(&uma_mtx);
2732 LIST_FOREACH(zk, &uma_kegs, uk_link) {
2733 LIST_FOREACH(z, &zk->uk_zones, uz_link)
2734 cnt++;
2735 }
2736 mtx_unlock(&uma_mtx);
2737 MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
2738 M_TEMP, M_WAITOK);
2739 len = snprintf(tmpbuf, linesize,
2740 "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n");
2741 if (cnt == 0)
2742 tmpbuf[len - 1] = '\0';
2743 error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
2744 if (error || cnt == 0)
2745 goto out;
2746 offset = tmpbuf;
2747 mtx_lock(&uma_mtx);
2748 LIST_FOREACH(zk, &uma_kegs, uk_link) {
2749 LIST_FOREACH(z, &zk->uk_zones, uz_link) {
2750 if (cnt == 0) /* list may have changed size */
2751 break;
2752 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2753 for (cpu = 0; cpu <= mp_maxid; cpu++) {
2754 if (CPU_ABSENT(cpu))
2755 continue;
2756 CPU_LOCK(cpu);
2757 }
2758 }
2759 ZONE_LOCK(z);
2760 cachefree = 0;
2761 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2762 for (cpu = 0; cpu <= mp_maxid; cpu++) {
2763 if (CPU_ABSENT(cpu))
2764 continue;
2765 cache = &z->uz_cpu[cpu];
2766 if (cache->uc_allocbucket != NULL)
2767 cachefree += cache->uc_allocbucket->ub_cnt;
2768 if (cache->uc_freebucket != NULL)
2769 cachefree += cache->uc_freebucket->ub_cnt;
2770 CPU_UNLOCK(cpu);
2771 }
2772 }
2773 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
2774 cachefree += bucket->ub_cnt;
2775 }
2776 totalfree = zk->uk_free + cachefree;
2777 len = snprintf(offset, linesize,
2778 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
2779 z->uz_name, zk->uk_size,
2780 zk->uk_maxpages * zk->uk_ipers,
2781 (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree,
2782 totalfree,
2783 (unsigned long long)z->uz_allocs);
2784 ZONE_UNLOCK(z);
2785 for (p = offset + 12; p > offset && *p == ' '; --p)
2786 /* nothing */ ;
2787 p[1] = ':';
2788 cnt--;
2789 offset += len;
2790 }
2791 }
2792 mtx_unlock(&uma_mtx);
2793 *offset++ = '\0';
2794 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
2795out:
2796 FREE(tmpbuf, M_TEMP);
2797 return (error);
2798}