Deleted Added
sdiff udiff text old ( 95432 ) new ( 95758 )
full compact
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/vm/uma_core.c 95432 2002-04-25 06:24:40Z arr $
27 *
28 */
29
30/*
31 * uma_core.c Implementation of the Universal Memory allocator
32 *
33 * This allocator is intended to replace the multitude of similar object caches
34 * in the standard FreeBSD kernel. The intent is to be flexible as well as
35 * effecient. A primary design goal is to return unused memory to the rest of
36 * the system. This will make the system as a whole more flexible due to the
37 * ability to move memory to subsystems which most need it instead of leaving
38 * pools of reserved memory unused.
39 *
40 * The basic ideas stem from similar slab/zone based allocators whose algorithms
41 * are well known.
42 *
43 */
44
45/*
46 * TODO:
47 * - Improve memory usage for large allocations
48 * - Improve INVARIANTS (0xdeadc0de write out)
49 * - Investigate cache size adjustments
50 */
51
52/* I should really use ktr.. */
53/*
54#define UMA_DEBUG 1
55#define UMA_DEBUG_ALLOC 1
56#define UMA_DEBUG_ALLOC_1 1
57*/
58
59
60#include "opt_param.h"
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/kernel.h>
64#include <sys/types.h>
65#include <sys/queue.h>
66#include <sys/malloc.h>
67#include <sys/lock.h>
68#include <sys/sysctl.h>
69#include <sys/mutex.h>
70#include <sys/smp.h>
71#include <sys/vmmeter.h>
72
73#include <machine/types.h>
74
75#include <vm/vm.h>
76#include <vm/vm_object.h>
77#include <vm/vm_page.h>
78#include <vm/vm_param.h>
79#include <vm/vm_map.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_extern.h>
82#include <vm/uma.h>
83#include <vm/uma_int.h>
84
85/*
86 * This is the zone from which all zones are spawned. The idea is that even
87 * the zone heads are allocated from the allocator, so we use the bss section
88 * to bootstrap us.
89 */
90static struct uma_zone masterzone;
91static uma_zone_t zones = &masterzone;
92
93/* This is the zone from which all of uma_slab_t's are allocated. */
94static uma_zone_t slabzone;
95
96/*
97 * The initial hash tables come out of this zone so they can be allocated
98 * prior to malloc coming up.
99 */
100static uma_zone_t hashzone;
101
102/*
103 * Zone that buckets come from.
104 */
105static uma_zone_t bucketzone;
106
107/*
108 * Are we allowed to allocate buckets?
109 */
110static int bucketdisable = 1;
111
112/* Linked list of all zones in the system */
113static LIST_HEAD(,uma_zone) uma_zones = LIST_HEAD_INITIALIZER(&uma_zones);
114
115/* This mutex protects the zone list */
116static struct mtx uma_mtx;
117
118/* Linked list of boot time pages */
119static LIST_HEAD(,uma_slab) uma_boot_pages =
120 LIST_HEAD_INITIALIZER(&uma_boot_pages);
121
122/* Count of free boottime pages */
123static int uma_boot_free = 0;
124
125/* Is the VM done starting up? */
126static int booted = 0;
127
128/* This is the handle used to schedule our working set calculator */
129static struct callout uma_callout;
130
131/* This is mp_maxid + 1, for use while looping over each cpu */
132static int maxcpu;
133
134/*
135 * This structure is passed as the zone ctor arg so that I don't have to create
136 * a special allocation function just for zones.
137 */
138struct uma_zctor_args {
139 char *name;
140 int size;
141 uma_ctor ctor;
142 uma_dtor dtor;
143 uma_init uminit;
144 uma_fini fini;
145 int align;
146 u_int16_t flags;
147};
148
149/*
150 * This is the malloc hash table which is used to find the zone that a
151 * malloc allocation came from. It is not currently resizeable. The
152 * memory for the actual hash bucket is allocated in kmeminit.
153 */
154struct uma_hash mhash;
155struct uma_hash *mallochash = &mhash;
156
157/* Prototypes.. */
158
159static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
160static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
161static void page_free(void *, int, u_int8_t);
162static uma_slab_t slab_zalloc(uma_zone_t, int);
163static void cache_drain(uma_zone_t);
164static void bucket_drain(uma_zone_t, uma_bucket_t);
165static void zone_drain(uma_zone_t);
166static void zone_ctor(void *, int, void *);
167static void zone_dtor(void *, int, void *);
168static void zero_init(void *, int);
169static void zone_small_init(uma_zone_t zone);
170static void zone_large_init(uma_zone_t zone);
171static void zone_foreach(void (*zfunc)(uma_zone_t));
172static void zone_timeout(uma_zone_t zone);
173static struct slabhead *hash_alloc(int *);
174static void hash_expand(struct uma_hash *, struct slabhead *, int);
175static void hash_free(struct slabhead *hash, int hashsize);
176static void uma_timeout(void *);
177static void uma_startup3(void);
178static void *uma_zalloc_internal(uma_zone_t, void *, int, uma_bucket_t);
179static void uma_zfree_internal(uma_zone_t, void *, void *, int);
180static void bucket_enable(void);
181void uma_print_zone(uma_zone_t);
182void uma_print_stats(void);
183static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
184
185SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
186 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
187SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
188
189/*
190 * This routine checks to see whether or not it's safe to enable buckets.
191 */
192
193static void
194bucket_enable(void)
195{
196 if (cnt.v_free_count < cnt.v_free_min)
197 bucketdisable = 1;
198 else
199 bucketdisable = 0;
200}
201
202
203/*
204 * Routine called by timeout which is used to fire off some time interval
205 * based calculations. (working set, stats, etc.)
206 *
207 * Arguments:
208 * arg Unused
209 *
210 * Returns:
211 * Nothing
212 */
213static void
214uma_timeout(void *unused)
215{
216 bucket_enable();
217 zone_foreach(zone_timeout);
218
219 /* Reschedule this event */
220 callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
221}
222
223/*
224 * Routine to perform timeout driven calculations. This does the working set
225 * as well as hash expanding, and per cpu statistics aggregation.
226 *
227 * Arguments:
228 * zone The zone to operate on
229 *
230 * Returns:
231 * Nothing
232 */
233static void
234zone_timeout(uma_zone_t zone)
235{
236 uma_cache_t cache;
237 u_int64_t alloc;
238 int free;
239 int cpu;
240
241 alloc = 0;
242 free = 0;
243
244 /*
245 * Aggregate per cpu cache statistics back to the zone.
246 *
247 * I may rewrite this to set a flag in the per cpu cache instead of
248 * locking. If the flag is not cleared on the next round I will have
249 * to lock and do it here instead so that the statistics don't get too
250 * far out of sync.
251 */
252 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) {
253 for (cpu = 0; cpu < maxcpu; cpu++) {
254 if (CPU_ABSENT(cpu))
255 continue;
256 CPU_LOCK(zone, cpu);
257 cache = &zone->uz_cpu[cpu];
258 /* Add them up, and reset */
259 alloc += cache->uc_allocs;
260 cache->uc_allocs = 0;
261 if (cache->uc_allocbucket)
262 free += cache->uc_allocbucket->ub_ptr + 1;
263 if (cache->uc_freebucket)
264 free += cache->uc_freebucket->ub_ptr + 1;
265 CPU_UNLOCK(zone, cpu);
266 }
267 }
268
269 /* Now push these stats back into the zone.. */
270 ZONE_LOCK(zone);
271 zone->uz_allocs += alloc;
272
273 /*
274 * cachefree is an instantanious snapshot of what is in the per cpu
275 * caches, not an accurate counter
276 */
277 zone->uz_cachefree = free;
278
279 /*
280 * Expand the zone hash table.
281 *
282 * This is done if the number of slabs is larger than the hash size.
283 * What I'm trying to do here is completely reduce collisions. This
284 * may be a little aggressive. Should I allow for two collisions max?
285 */
286
287 if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) &&
288 !(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
289 if (zone->uz_pages / zone->uz_ppera
290 >= zone->uz_hash.uh_hashsize) {
291 struct slabhead *newhash;
292 int newsize;
293
294 newsize = zone->uz_hash.uh_hashsize;
295 ZONE_UNLOCK(zone);
296 newhash = hash_alloc(&newsize);
297 ZONE_LOCK(zone);
298 hash_expand(&zone->uz_hash, newhash, newsize);
299 }
300 }
301
302 /*
303 * Here we compute the working set size as the total number of items
304 * left outstanding since the last time interval. This is slightly
305 * suboptimal. What we really want is the highest number of outstanding
306 * items during the last time quantum. This should be close enough.
307 *
308 * The working set size is used to throttle the zone_drain function.
309 * We don't want to return memory that we may need again immediately.
310 */
311 alloc = zone->uz_allocs - zone->uz_oallocs;
312 zone->uz_oallocs = zone->uz_allocs;
313 zone->uz_wssize = alloc;
314
315 ZONE_UNLOCK(zone);
316}
317
318/*
319 * Allocate and zero fill the next sized hash table from the appropriate
320 * backing store.
321 *
322 * Arguments:
323 * oldsize On input it's the size we're currently at and on output
324 * it is the expanded size.
325 *
326 * Returns:
327 * slabhead The new hash bucket or NULL if the allocation failed.
328 */
329struct slabhead *
330hash_alloc(int *oldsize)
331{
332 struct slabhead *newhash;
333 int newsize;
334 int alloc;
335
336 /* We're just going to go to a power of two greater */
337 if (*oldsize) {
338 newsize = (*oldsize) * 2;
339 alloc = sizeof(newhash[0]) * newsize;
340 /* XXX Shouldn't be abusing DEVBUF here */
341 newhash = (struct slabhead *)malloc(alloc, M_DEVBUF, M_NOWAIT);
342 } else {
343 alloc = sizeof(newhash[0]) * UMA_HASH_SIZE_INIT;
344 newhash = uma_zalloc_internal(hashzone, NULL, M_WAITOK, NULL);
345 newsize = UMA_HASH_SIZE_INIT;
346 }
347 if (newhash)
348 bzero(newhash, alloc);
349
350 *oldsize = newsize;
351
352 return (newhash);
353}
354
355/*
356 * Expands the hash table for OFFPAGE zones. This is done from zone_timeout
357 * to reduce collisions. This must not be done in the regular allocation path,
358 * otherwise, we can recurse on the vm while allocating pages.
359 *
360 * Arguments:
361 * hash The hash you want to expand by a factor of two.
362 *
363 * Returns:
364 * Nothing
365 *
366 * Discussion:
367 */
368static void
369hash_expand(struct uma_hash *hash, struct slabhead *newhash, int newsize)
370{
371 struct slabhead *oldhash;
372 uma_slab_t slab;
373 int oldsize;
374 int hval;
375 int i;
376
377 if (!newhash)
378 return;
379
380 oldsize = hash->uh_hashsize;
381 oldhash = hash->uh_slab_hash;
382
383 if (oldsize >= newsize) {
384 hash_free(newhash, newsize);
385 return;
386 }
387
388 hash->uh_hashmask = newsize - 1;
389
390 /*
391 * I need to investigate hash algorithms for resizing without a
392 * full rehash.
393 */
394
395 for (i = 0; i < oldsize; i++)
396 while (!SLIST_EMPTY(&hash->uh_slab_hash[i])) {
397 slab = SLIST_FIRST(&hash->uh_slab_hash[i]);
398 SLIST_REMOVE_HEAD(&hash->uh_slab_hash[i], us_hlink);
399 hval = UMA_HASH(hash, slab->us_data);
400 SLIST_INSERT_HEAD(&newhash[hval], slab, us_hlink);
401 }
402
403 if (oldhash)
404 hash_free(oldhash, oldsize);
405
406 hash->uh_slab_hash = newhash;
407 hash->uh_hashsize = newsize;
408
409 return;
410}
411
412/*
413 * Free the hash bucket to the appropriate backing store.
414 *
415 * Arguments:
416 * slab_hash The hash bucket we're freeing
417 * hashsize The number of entries in that hash bucket
418 *
419 * Returns:
420 * Nothing
421 */
422static void
423hash_free(struct slabhead *slab_hash, int hashsize)
424{
425 if (hashsize == UMA_HASH_SIZE_INIT)
426 uma_zfree_internal(hashzone,
427 slab_hash, NULL, 0);
428 else
429 free(slab_hash, M_DEVBUF);
430}
431
432/*
433 * Frees all outstanding items in a bucket
434 *
435 * Arguments:
436 * zone The zone to free to, must be unlocked.
437 * bucket The free/alloc bucket with items, cpu queue must be locked.
438 *
439 * Returns:
440 * Nothing
441 */
442
443static void
444bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
445{
446 uma_slab_t slab;
447 int mzone;
448 void *item;
449
450 if (bucket == NULL)
451 return;
452
453 slab = NULL;
454 mzone = 0;
455
456 /* We have to lookup the slab again for malloc.. */
457 if (zone->uz_flags & UMA_ZFLAG_MALLOC)
458 mzone = 1;
459
460 while (bucket->ub_ptr > -1) {
461 item = bucket->ub_bucket[bucket->ub_ptr];
462#ifdef INVARIANTS
463 bucket->ub_bucket[bucket->ub_ptr] = NULL;
464 KASSERT(item != NULL,
465 ("bucket_drain: botched ptr, item is NULL"));
466#endif
467 bucket->ub_ptr--;
468 /*
469 * This is extremely inefficient. The slab pointer was passed
470 * to uma_zfree_arg, but we lost it because the buckets don't
471 * hold them. This will go away when free() gets a size passed
472 * to it.
473 */
474 if (mzone)
475 slab = hash_sfind(mallochash,
476 (u_int8_t *)((unsigned long)item &
477 (~UMA_SLAB_MASK)));
478 uma_zfree_internal(zone, item, slab, 1);
479 }
480}
481
482/*
483 * Drains the per cpu caches for a zone.
484 *
485 * Arguments:
486 * zone The zone to drain, must be unlocked.
487 *
488 * Returns:
489 * Nothing
490 *
491 * This function returns with the zone locked so that the per cpu queues can
492 * not be filled until zone_drain is finished.
493 *
494 */
495static void
496cache_drain(uma_zone_t zone)
497{
498 uma_bucket_t bucket;
499 uma_cache_t cache;
500 int cpu;
501
502 /*
503 * Flush out the per cpu queues.
504 *
505 * XXX This causes unnecessary thrashing due to immediately having
506 * empty per cpu queues. I need to improve this.
507 */
508
509 /*
510 * We have to lock each cpu cache before locking the zone
511 */
512 ZONE_UNLOCK(zone);
513
514 for (cpu = 0; cpu < maxcpu; cpu++) {
515 if (CPU_ABSENT(cpu))
516 continue;
517 CPU_LOCK(zone, cpu);
518 cache = &zone->uz_cpu[cpu];
519 bucket_drain(zone, cache->uc_allocbucket);
520 bucket_drain(zone, cache->uc_freebucket);
521 }
522
523 /*
524 * Drain the bucket queues and free the buckets, we just keep two per
525 * cpu (alloc/free).
526 */
527 ZONE_LOCK(zone);
528 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
529 LIST_REMOVE(bucket, ub_link);
530 ZONE_UNLOCK(zone);
531 bucket_drain(zone, bucket);
532 uma_zfree_internal(bucketzone, bucket, NULL, 0);
533 ZONE_LOCK(zone);
534 }
535
536 /* Now we do the free queue.. */
537 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
538 LIST_REMOVE(bucket, ub_link);
539 uma_zfree_internal(bucketzone, bucket, NULL, 0);
540 }
541
542 /* We unlock here, but they will all block until the zone is unlocked */
543 for (cpu = 0; cpu < maxcpu; cpu++) {
544 if (CPU_ABSENT(cpu))
545 continue;
546 CPU_UNLOCK(zone, cpu);
547 }
548
549 zone->uz_cachefree = 0;
550}
551
552/*
553 * Frees pages from a zone back to the system. This is done on demand from
554 * the pageout daemon.
555 *
556 * Arguments:
557 * zone The zone to free pages from
558 * all Should we drain all items?
559 *
560 * Returns:
561 * Nothing.
562 */
563static void
564zone_drain(uma_zone_t zone)
565{
566 uma_slab_t slab;
567 uma_slab_t n;
568 u_int64_t extra;
569 u_int8_t flags;
570 u_int8_t *mem;
571 int i;
572
573 /*
574 * We don't want to take pages from staticly allocated zones at this
575 * time
576 */
577 if (zone->uz_flags & UMA_ZFLAG_NOFREE || zone->uz_freef == NULL)
578 return;
579
580 ZONE_LOCK(zone);
581
582 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
583 cache_drain(zone);
584
585 if (zone->uz_free < zone->uz_wssize)
586 goto finished;
587#ifdef UMA_DEBUG
588 printf("%s working set size: %llu free items: %u\n",
589 zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free);
590#endif
591 extra = zone->uz_free - zone->uz_wssize;
592 extra /= zone->uz_ipers;
593
594 /* extra is now the number of extra slabs that we can free */
595
596 if (extra == 0)
597 goto finished;
598
599 slab = LIST_FIRST(&zone->uz_free_slab);
600 while (slab && extra) {
601 n = LIST_NEXT(slab, us_link);
602
603 /* We have no where to free these to */
604 if (slab->us_flags & UMA_SLAB_BOOT) {
605 slab = n;
606 continue;
607 }
608
609 LIST_REMOVE(slab, us_link);
610 zone->uz_pages -= zone->uz_ppera;
611 zone->uz_free -= zone->uz_ipers;
612 if (zone->uz_fini)
613 for (i = 0; i < zone->uz_ipers; i++)
614 zone->uz_fini(
615 slab->us_data + (zone->uz_rsize * i),
616 zone->uz_size);
617 flags = slab->us_flags;
618 mem = slab->us_data;
619 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
620 if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
621 UMA_HASH_REMOVE(mallochash,
622 slab, slab->us_data);
623 } else {
624 UMA_HASH_REMOVE(&zone->uz_hash,
625 slab, slab->us_data);
626 }
627 uma_zfree_internal(slabzone, slab, NULL, 0);
628 } else if (zone->uz_flags & UMA_ZFLAG_MALLOC)
629 UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
630#ifdef UMA_DEBUG
631 printf("%s: Returning %d bytes.\n",
632 zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
633#endif
634 zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags);
635
636 slab = n;
637 extra--;
638 }
639
640finished:
641 ZONE_UNLOCK(zone);
642}
643
644/*
645 * Allocate a new slab for a zone. This does not insert the slab onto a list.
646 *
647 * Arguments:
648 * zone The zone to allocate slabs for
649 * wait Shall we wait?
650 *
651 * Returns:
652 * The slab that was allocated or NULL if there is no memory and the
653 * caller specified M_NOWAIT.
654 *
655 */
656static uma_slab_t
657slab_zalloc(uma_zone_t zone, int wait)
658{
659 uma_slab_t slab; /* Starting slab */
660 u_int8_t *mem;
661 u_int8_t flags;
662 int i;
663
664 slab = NULL;
665
666#ifdef UMA_DEBUG
667 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name);
668#endif
669 ZONE_UNLOCK(zone);
670
671 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
672 slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
673 if (slab == NULL) {
674 ZONE_LOCK(zone);
675 return NULL;
676 }
677 }
678
679 if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) {
680 mtx_lock(&Giant);
681 mem = zone->uz_allocf(zone,
682 zone->uz_ppera * UMA_SLAB_SIZE, &flags, wait);
683 mtx_unlock(&Giant);
684 if (mem == NULL) {
685 ZONE_LOCK(zone);
686 return (NULL);
687 }
688 } else {
689 uma_slab_t tmps;
690
691 if (zone->uz_ppera > 1)
692 panic("UMA: Attemping to allocate multiple pages before vm has started.\n");
693 if (zone->uz_flags & UMA_ZFLAG_MALLOC)
694 panic("Mallocing before uma_startup2 has been called.\n");
695 if (uma_boot_free == 0)
696 panic("UMA: Ran out of pre init pages, increase UMA_BOOT_PAGES\n");
697 tmps = LIST_FIRST(&uma_boot_pages);
698 LIST_REMOVE(tmps, us_link);
699 uma_boot_free--;
700 mem = tmps->us_data;
701 }
702
703 ZONE_LOCK(zone);
704
705 /* Alloc slab structure for offpage, otherwise adjust it's position */
706 if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
707 slab = (uma_slab_t )(mem + zone->uz_pgoff);
708 } else {
709 if (!(zone->uz_flags & UMA_ZFLAG_MALLOC))
710 UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
711 }
712 if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
713#ifdef UMA_DEBUG
714 printf("Inserting %p into malloc hash from slab %p\n",
715 mem, slab);
716#endif
717 /* XXX Yikes! No lock on the malloc hash! */
718 UMA_HASH_INSERT(mallochash, slab, mem);
719 }
720
721 slab->us_zone = zone;
722 slab->us_data = mem;
723
724 /*
725 * This is intended to spread data out across cache lines.
726 *
727 * This code doesn't seem to work properly on x86, and on alpha
728 * it makes absolutely no performance difference. I'm sure it could
729 * use some tuning, but sun makes outrageous claims about it's
730 * performance.
731 */
732#if 0
733 if (zone->uz_cachemax) {
734 slab->us_data += zone->uz_cacheoff;
735 zone->uz_cacheoff += UMA_CACHE_INC;
736 if (zone->uz_cacheoff > zone->uz_cachemax)
737 zone->uz_cacheoff = 0;
738 }
739#endif
740
741 slab->us_freecount = zone->uz_ipers;
742 slab->us_firstfree = 0;
743 slab->us_flags = flags;
744 for (i = 0; i < zone->uz_ipers; i++)
745 slab->us_freelist[i] = i+1;
746
747 if (zone->uz_init)
748 for (i = 0; i < zone->uz_ipers; i++)
749 zone->uz_init(slab->us_data + (zone->uz_rsize * i),
750 zone->uz_size);
751
752 zone->uz_pages += zone->uz_ppera;
753 zone->uz_free += zone->uz_ipers;
754
755 return (slab);
756}
757
758/*
759 * Allocates a number of pages from the system
760 *
761 * Arguments:
762 * zone Unused
763 * bytes The number of bytes requested
764 * wait Shall we wait?
765 *
766 * Returns:
767 * A pointer to the alloced memory or possibly
768 * NULL if M_NOWAIT is set.
769 */
770static void *
771page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
772{
773 void *p; /* Returned page */
774
775 /*
776 * XXX The original zone allocator did this, but I don't think it's
777 * necessary in current.
778 */
779
780 if (lockstatus(&kernel_map->lock, NULL)) {
781 *pflag = UMA_SLAB_KMEM;
782 p = (void *) kmem_malloc(kmem_map, bytes, wait);
783 } else {
784 *pflag = UMA_SLAB_KMAP;
785 p = (void *) kmem_alloc(kernel_map, bytes);
786 }
787
788 return (p);
789}
790
791/*
792 * Allocates a number of pages from within an object
793 *
794 * Arguments:
795 * zone Unused
796 * bytes The number of bytes requested
797 * wait Shall we wait?
798 *
799 * Returns:
800 * A pointer to the alloced memory or possibly
801 * NULL if M_NOWAIT is set.
802 */
803static void *
804obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
805{
806 vm_offset_t zkva;
807 vm_offset_t retkva;
808 vm_page_t p;
809 int pages;
810
811 retkva = NULL;
812 pages = zone->uz_pages;
813
814 /*
815 * This looks a little weird since we're getting one page at a time
816 */
817 while (bytes > 0) {
818 p = vm_page_alloc(zone->uz_obj, pages,
819 VM_ALLOC_INTERRUPT);
820 if (p == NULL)
821 return (NULL);
822
823 zkva = zone->uz_kva + pages * PAGE_SIZE;
824 if (retkva == NULL)
825 retkva = zkva;
826 pmap_qenter(zkva, &p, 1);
827 bytes -= PAGE_SIZE;
828 pages += 1;
829 }
830
831 *flags = UMA_SLAB_PRIV;
832
833 return ((void *)retkva);
834}
835
836/*
837 * Frees a number of pages to the system
838 *
839 * Arguments:
840 * mem A pointer to the memory to be freed
841 * size The size of the memory being freed
842 * flags The original p->us_flags field
843 *
844 * Returns:
845 * Nothing
846 *
847 */
848static void
849page_free(void *mem, int size, u_int8_t flags)
850{
851 vm_map_t map;
852 if (flags & UMA_SLAB_KMEM)
853 map = kmem_map;
854 else if (flags & UMA_SLAB_KMAP)
855 map = kernel_map;
856 else
857 panic("UMA: page_free used with invalid flags %d\n", flags);
858
859 kmem_free(map, (vm_offset_t)mem, size);
860}
861
862/*
863 * Zero fill initializer
864 *
865 * Arguments/Returns follow uma_init specifications
866 *
867 */
868static void
869zero_init(void *mem, int size)
870{
871 bzero(mem, size);
872}
873
874/*
875 * Finish creating a small uma zone. This calculates ipers, and the zone size.
876 *
877 * Arguments
878 * zone The zone we should initialize
879 *
880 * Returns
881 * Nothing
882 */
883static void
884zone_small_init(uma_zone_t zone)
885{
886 int rsize;
887 int memused;
888 int ipers;
889
890 rsize = zone->uz_size;
891
892 if (rsize < UMA_SMALLEST_UNIT)
893 rsize = UMA_SMALLEST_UNIT;
894
895 if (rsize & zone->uz_align)
896 rsize = (rsize & ~zone->uz_align) + (zone->uz_align + 1);
897
898 zone->uz_rsize = rsize;
899
900 rsize += 1; /* Account for the byte of linkage */
901 zone->uz_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize;
902 zone->uz_ppera = 1;
903
904 memused = zone->uz_ipers * zone->uz_rsize;
905
906 /* Can we do any better? */
907 if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) {
908 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
909 return;
910 ipers = UMA_SLAB_SIZE / zone->uz_rsize;
911 if (ipers > zone->uz_ipers) {
912 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
913 zone->uz_ipers = ipers;
914 }
915 }
916
917}
918
919/*
920 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do
921 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
922 * more complicated.
923 *
924 * Arguments
925 * zone The zone we should initialize
926 *
927 * Returns
928 * Nothing
929 */
930static void
931zone_large_init(uma_zone_t zone)
932{
933 int pages;
934
935 pages = zone->uz_size / UMA_SLAB_SIZE;
936
937 /* Account for remainder */
938 if ((pages * UMA_SLAB_SIZE) < zone->uz_size)
939 pages++;
940
941 zone->uz_ppera = pages;
942 zone->uz_ipers = 1;
943
944 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
945 zone->uz_rsize = zone->uz_size;
946}
947
948/*
949 * Zone header ctor. This initializes all fields, locks, etc. And inserts
950 * the zone onto the global zone list.
951 *
952 * Arguments/Returns follow uma_ctor specifications
953 * udata Actually uma_zcreat_args
954 *
955 */
956
957static void
958zone_ctor(void *mem, int size, void *udata)
959{
960 struct uma_zctor_args *arg = udata;
961 uma_zone_t zone = mem;
962 int cplen;
963 int cpu;
964
965 bzero(zone, size);
966 zone->uz_name = arg->name;
967 zone->uz_size = arg->size;
968 zone->uz_ctor = arg->ctor;
969 zone->uz_dtor = arg->dtor;
970 zone->uz_init = arg->uminit;
971 zone->uz_align = arg->align;
972 zone->uz_free = 0;
973 zone->uz_pages = 0;
974 zone->uz_flags = 0;
975 zone->uz_allocf = page_alloc;
976 zone->uz_freef = page_free;
977
978 if (arg->flags & UMA_ZONE_ZINIT)
979 zone->uz_init = zero_init;
980
981 if (arg->flags & UMA_ZONE_INTERNAL)
982 zone->uz_flags |= UMA_ZFLAG_INTERNAL;
983
984 if (arg->flags & UMA_ZONE_MALLOC)
985 zone->uz_flags |= UMA_ZFLAG_MALLOC;
986
987 if (arg->flags & UMA_ZONE_NOFREE)
988 zone->uz_flags |= UMA_ZFLAG_NOFREE;
989
990 if (zone->uz_size > UMA_SLAB_SIZE)
991 zone_large_init(zone);
992 else
993 zone_small_init(zone);
994
995 /* We do this so that the per cpu lock name is unique for each zone */
996 memcpy(zone->uz_lname, "PCPU ", 5);
997 cplen = min(strlen(zone->uz_name) + 1, LOCKNAME_LEN - 6);
998 memcpy(zone->uz_lname+5, zone->uz_name, cplen);
999 zone->uz_lname[LOCKNAME_LEN - 1] = '\0';
1000
1001 /*
1002 * If we're putting the slab header in the actual page we need to
1003 * figure out where in each page it goes. This calculates a right
1004 * justified offset into the memory on a ALIGN_PTR boundary.
1005 */
1006 if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
1007 int totsize;
1008 int waste;
1009
1010 /* Size of the slab struct and free list */
1011 totsize = sizeof(struct uma_slab) + zone->uz_ipers;
1012 if (totsize & UMA_ALIGN_PTR)
1013 totsize = (totsize & ~UMA_ALIGN_PTR) +
1014 (UMA_ALIGN_PTR + 1);
1015 zone->uz_pgoff = UMA_SLAB_SIZE - totsize;
1016
1017 waste = zone->uz_pgoff;
1018 waste -= (zone->uz_ipers * zone->uz_rsize);
1019
1020 /*
1021 * This calculates how much space we have for cache line size
1022 * optimizations. It works by offseting each slab slightly.
1023 * Currently it breaks on x86, and so it is disabled.
1024 */
1025
1026 if (zone->uz_align < UMA_CACHE_INC && waste > UMA_CACHE_INC) {
1027 zone->uz_cachemax = waste - UMA_CACHE_INC;
1028 zone->uz_cacheoff = 0;
1029 }
1030
1031 totsize = zone->uz_pgoff + sizeof(struct uma_slab)
1032 + zone->uz_ipers;
1033 /* I don't think it's possible, but I'll make sure anyway */
1034 if (totsize > UMA_SLAB_SIZE) {
1035 printf("zone %s ipers %d rsize %d size %d\n",
1036 zone->uz_name, zone->uz_ipers, zone->uz_rsize,
1037 zone->uz_size);
1038 panic("UMA slab won't fit.\n");
1039 }
1040 } else {
1041 struct slabhead *newhash;
1042 int hashsize;
1043
1044 hashsize = 0;
1045 newhash = hash_alloc(&hashsize);
1046 hash_expand(&zone->uz_hash, newhash, hashsize);
1047 zone->uz_pgoff = 0;
1048 }
1049
1050#ifdef UMA_DEBUG
1051 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
1052 zone->uz_name, zone,
1053 zone->uz_size, zone->uz_ipers,
1054 zone->uz_ppera, zone->uz_pgoff);
1055#endif
1056 ZONE_LOCK_INIT(zone);
1057
1058 mtx_lock(&uma_mtx);
1059 LIST_INSERT_HEAD(&uma_zones, zone, uz_link);
1060 mtx_unlock(&uma_mtx);
1061
1062 /*
1063 * Some internal zones don't have room allocated for the per cpu
1064 * caches. If we're internal, bail out here.
1065 */
1066
1067 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1068 return;
1069
1070 if (zone->uz_ipers < UMA_BUCKET_SIZE)
1071 zone->uz_count = zone->uz_ipers - 1;
1072 else
1073 zone->uz_count = UMA_BUCKET_SIZE - 1;
1074
1075 for (cpu = 0; cpu < maxcpu; cpu++)
1076 CPU_LOCK_INIT(zone, cpu);
1077}
1078
1079/*
1080 * Zone header dtor. This frees all data, destroys locks, frees the hash table
1081 * and removes the zone from the global list.
1082 *
1083 * Arguments/Returns follow uma_dtor specifications
1084 * udata unused
1085 */
1086
1087static void
1088zone_dtor(void *arg, int size, void *udata)
1089{
1090 uma_zone_t zone;
1091 int cpu;
1092
1093 zone = (uma_zone_t)arg;
1094
1095 mtx_lock(&uma_mtx);
1096 LIST_REMOVE(zone, uz_link);
1097 mtx_unlock(&uma_mtx);
1098
1099 ZONE_LOCK(zone);
1100 zone->uz_wssize = 0;
1101 ZONE_UNLOCK(zone);
1102
1103 zone_drain(zone);
1104 ZONE_LOCK(zone);
1105 if (zone->uz_free != 0)
1106 printf("Zone %s was not empty. Lost %d pages of memory.\n",
1107 zone->uz_name, zone->uz_pages);
1108
1109 if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) != 0)
1110 for (cpu = 0; cpu < maxcpu; cpu++)
1111 CPU_LOCK_FINI(zone, cpu);
1112
1113 if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) != 0)
1114 hash_free(zone->uz_hash.uh_slab_hash,
1115 zone->uz_hash.uh_hashsize);
1116
1117 ZONE_UNLOCK(zone);
1118 ZONE_LOCK_FINI(zone);
1119}
1120/*
1121 * Traverses every zone in the system and calls a callback
1122 *
1123 * Arguments:
1124 * zfunc A pointer to a function which accepts a zone
1125 * as an argument.
1126 *
1127 * Returns:
1128 * Nothing
1129 */
1130static void
1131zone_foreach(void (*zfunc)(uma_zone_t))
1132{
1133 uma_zone_t zone;
1134
1135 mtx_lock(&uma_mtx);
1136 LIST_FOREACH(zone, &uma_zones, uz_link) {
1137 zfunc(zone);
1138 }
1139 mtx_unlock(&uma_mtx);
1140}
1141
1142/* Public functions */
1143/* See uma.h */
1144void
1145uma_startup(void *bootmem)
1146{
1147 struct uma_zctor_args args;
1148 uma_slab_t slab;
1149 int slabsize;
1150 int i;
1151
1152#ifdef UMA_DEBUG
1153 printf("Creating uma zone headers zone.\n");
1154#endif
1155#ifdef SMP
1156 maxcpu = mp_maxid + 1;
1157#else
1158 maxcpu = 1;
1159#endif
1160#ifdef UMA_DEBUG
1161 printf("Max cpu = %d, mp_maxid = %d\n", maxcpu, mp_maxid);
1162 Debugger("stop");
1163#endif
1164 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1165 /* "manually" Create the initial zone */
1166 args.name = "UMA Zones";
1167 args.size = sizeof(struct uma_zone) +
1168 (sizeof(struct uma_cache) * (maxcpu - 1));
1169 args.ctor = zone_ctor;
1170 args.dtor = zone_dtor;
1171 args.uminit = zero_init;
1172 args.fini = NULL;
1173 args.align = 32 - 1;
1174 args.flags = UMA_ZONE_INTERNAL;
1175 /* The initial zone has no Per cpu queues so it's smaller */
1176 zone_ctor(zones, sizeof(struct uma_zone), &args);
1177
1178#ifdef UMA_DEBUG
1179 printf("Filling boot free list.\n");
1180#endif
1181 for (i = 0; i < UMA_BOOT_PAGES; i++) {
1182 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1183 slab->us_data = (u_int8_t *)slab;
1184 slab->us_flags = UMA_SLAB_BOOT;
1185 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1186 uma_boot_free++;
1187 }
1188
1189#ifdef UMA_DEBUG
1190 printf("Creating slab zone.\n");
1191#endif
1192
1193 /*
1194 * This is the max number of free list items we'll have with
1195 * offpage slabs.
1196 */
1197
1198 slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab);
1199 slabsize /= UMA_MAX_WASTE;
1200 slabsize++; /* In case there it's rounded */
1201 slabsize += sizeof(struct uma_slab);
1202
1203 /* Now make a zone for slab headers */
1204 slabzone = uma_zcreate("UMA Slabs",
1205 slabsize,
1206 NULL, NULL, NULL, NULL,
1207 UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
1208
1209 hashzone = uma_zcreate("UMA Hash",
1210 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1211 NULL, NULL, NULL, NULL,
1212 UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
1213
1214 bucketzone = uma_zcreate("UMA Buckets", sizeof(struct uma_bucket),
1215 NULL, NULL, NULL, NULL,
1216 UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
1217
1218
1219#ifdef UMA_DEBUG
1220 printf("UMA startup complete.\n");
1221#endif
1222}
1223
1224/* see uma.h */
1225void
1226uma_startup2(void *hashmem, u_long elems)
1227{
1228 bzero(hashmem, elems * sizeof(void *));
1229 mallochash->uh_slab_hash = hashmem;
1230 mallochash->uh_hashsize = elems;
1231 mallochash->uh_hashmask = elems - 1;
1232 booted = 1;
1233 bucket_enable();
1234#ifdef UMA_DEBUG
1235 printf("UMA startup2 complete.\n");
1236#endif
1237}
1238
1239/*
1240 * Initialize our callout handle
1241 *
1242 */
1243
1244static void
1245uma_startup3(void)
1246{
1247#ifdef UMA_DEBUG
1248 printf("Starting callout.\n");
1249#endif
1250 callout_init(&uma_callout, 0);
1251 callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
1252#ifdef UMA_DEBUG
1253 printf("UMA startup3 complete.\n");
1254#endif
1255}
1256
1257/* See uma.h */
1258uma_zone_t
1259uma_zcreate(char *name, int size, uma_ctor ctor, uma_dtor dtor, uma_init uminit,
1260 uma_fini fini, int align, u_int16_t flags)
1261
1262{
1263 struct uma_zctor_args args;
1264
1265 /* This stuff is essential for the zone ctor */
1266 args.name = name;
1267 args.size = size;
1268 args.ctor = ctor;
1269 args.dtor = dtor;
1270 args.uminit = uminit;
1271 args.fini = fini;
1272 args.align = align;
1273 args.flags = flags;
1274
1275 return (uma_zalloc_internal(zones, &args, M_WAITOK, NULL));
1276}
1277
1278/* See uma.h */
1279void
1280uma_zdestroy(uma_zone_t zone)
1281{
1282 uma_zfree_internal(zones, zone, NULL, 0);
1283}
1284
1285/* See uma.h */
1286void *
1287uma_zalloc_arg(uma_zone_t zone, void *udata, int wait)
1288{
1289 void *item;
1290 uma_cache_t cache;
1291 uma_bucket_t bucket;
1292 int cpu;
1293
1294 /* This is the fast path allocation */
1295#ifdef UMA_DEBUG_ALLOC_1
1296 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1297#endif
1298
1299zalloc_restart:
1300 cpu = PCPU_GET(cpuid);
1301 CPU_LOCK(zone, cpu);
1302 cache = &zone->uz_cpu[cpu];
1303
1304zalloc_start:
1305 bucket = cache->uc_allocbucket;
1306
1307 if (bucket) {
1308 if (bucket->ub_ptr > -1) {
1309 item = bucket->ub_bucket[bucket->ub_ptr];
1310#ifdef INVARIANTS
1311 bucket->ub_bucket[bucket->ub_ptr] = NULL;
1312#endif
1313 bucket->ub_ptr--;
1314 KASSERT(item != NULL,
1315 ("uma_zalloc: Bucket pointer mangled."));
1316 cache->uc_allocs++;
1317 CPU_UNLOCK(zone, cpu);
1318 if (zone->uz_ctor)
1319 zone->uz_ctor(item, zone->uz_size, udata);
1320 return (item);
1321 } else if (cache->uc_freebucket) {
1322 /*
1323 * We have run out of items in our allocbucket.
1324 * See if we can switch with our free bucket.
1325 */
1326 if (cache->uc_freebucket->ub_ptr > -1) {
1327 uma_bucket_t swap;
1328
1329#ifdef UMA_DEBUG_ALLOC
1330 printf("uma_zalloc: Swapping empty with alloc.\n");
1331#endif
1332 swap = cache->uc_freebucket;
1333 cache->uc_freebucket = cache->uc_allocbucket;
1334 cache->uc_allocbucket = swap;
1335
1336 goto zalloc_start;
1337 }
1338 }
1339 }
1340 ZONE_LOCK(zone);
1341 /* Since we have locked the zone we may as well send back our stats */
1342 zone->uz_allocs += cache->uc_allocs;
1343 cache->uc_allocs = 0;
1344
1345 /* Our old one is now a free bucket */
1346 if (cache->uc_allocbucket) {
1347 KASSERT(cache->uc_allocbucket->ub_ptr == -1,
1348 ("uma_zalloc_arg: Freeing a non free bucket."));
1349 LIST_INSERT_HEAD(&zone->uz_free_bucket,
1350 cache->uc_allocbucket, ub_link);
1351 cache->uc_allocbucket = NULL;
1352 }
1353
1354 /* Check the free list for a new alloc bucket */
1355 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1356 KASSERT(bucket->ub_ptr != -1,
1357 ("uma_zalloc_arg: Returning an empty bucket."));
1358
1359 LIST_REMOVE(bucket, ub_link);
1360 cache->uc_allocbucket = bucket;
1361 ZONE_UNLOCK(zone);
1362 goto zalloc_start;
1363 }
1364 /* Bump up our uz_count so we get here less */
1365 if (zone->uz_count < UMA_BUCKET_SIZE - 1)
1366 zone->uz_count++;
1367
1368 /* We are no longer associated with this cpu!!! */
1369 CPU_UNLOCK(zone, cpu);
1370
1371 /*
1372 * Now lets just fill a bucket and put it on the free list. If that
1373 * works we'll restart the allocation from the begining.
1374 *
1375 * Try this zone's free list first so we don't allocate extra buckets.
1376 */
1377
1378 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL)
1379 LIST_REMOVE(bucket, ub_link);
1380
1381 /* Now we no longer need the zone lock. */
1382 ZONE_UNLOCK(zone);
1383
1384 if (bucket == NULL)
1385 bucket = uma_zalloc_internal(bucketzone,
1386 NULL, wait, NULL);
1387
1388 if (bucket != NULL) {
1389#ifdef INVARIANTS
1390 bzero(bucket, bucketzone->uz_size);
1391#endif
1392 bucket->ub_ptr = -1;
1393
1394 if (uma_zalloc_internal(zone, udata, wait, bucket))
1395 goto zalloc_restart;
1396 else
1397 uma_zfree_internal(bucketzone, bucket, NULL, 0);
1398 }
1399 /*
1400 * We may not get a bucket if we recurse, so
1401 * return an actual item.
1402 */
1403#ifdef UMA_DEBUG
1404 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1405#endif
1406
1407 return (uma_zalloc_internal(zone, udata, wait, NULL));
1408}
1409
1410/*
1411 * Allocates an item for an internal zone OR fills a bucket
1412 *
1413 * Arguments
1414 * zone The zone to alloc for.
1415 * udata The data to be passed to the constructor.
1416 * wait M_WAITOK or M_NOWAIT.
1417 * bucket The bucket to fill or NULL
1418 *
1419 * Returns
1420 * NULL if there is no memory and M_NOWAIT is set
1421 * An item if called on an interal zone
1422 * Non NULL if called to fill a bucket and it was successful.
1423 *
1424 * Discussion:
1425 * This was much cleaner before it had to do per cpu caches. It is
1426 * complicated now because it has to handle the simple internal case, and
1427 * the more involved bucket filling and allocation.
1428 */
1429
1430static void *
1431uma_zalloc_internal(uma_zone_t zone, void *udata, int wait, uma_bucket_t bucket)
1432{
1433 uma_slab_t slab;
1434 u_int8_t freei;
1435 void *item;
1436
1437 item = NULL;
1438
1439 /*
1440 * This is to stop us from allocating per cpu buckets while we're
1441 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the
1442 * boot pages.
1443 */
1444
1445 if (bucketdisable && zone == bucketzone)
1446 return (NULL);
1447
1448#ifdef UMA_DEBUG_ALLOC
1449 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
1450#endif
1451 ZONE_LOCK(zone);
1452
1453 /*
1454 * This code is here to limit the number of simultaneous bucket fills
1455 * for any given zone to the number of per cpu caches in this zone. This
1456 * is done so that we don't allocate more memory than we really need.
1457 */
1458
1459 if (bucket) {
1460#ifdef SMP
1461 if (zone->uz_fills >= mp_ncpus) {
1462#else
1463 if (zone->uz_fills > 1) {
1464#endif
1465 ZONE_UNLOCK(zone);
1466 return (NULL);
1467 }
1468
1469 zone->uz_fills++;
1470 }
1471
1472new_slab:
1473
1474 /* Find a slab with some space */
1475 if (zone->uz_free) {
1476 if (!LIST_EMPTY(&zone->uz_part_slab)) {
1477 slab = LIST_FIRST(&zone->uz_part_slab);
1478 } else {
1479 slab = LIST_FIRST(&zone->uz_free_slab);
1480 LIST_REMOVE(slab, us_link);
1481 LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1482 }
1483 } else {
1484 /*
1485 * This is to prevent us from recursively trying to allocate
1486 * buckets. The problem is that if an allocation forces us to
1487 * grab a new bucket we will call page_alloc, which will go off
1488 * and cause the vm to allocate vm_map_entries. If we need new
1489 * buckets there too we will recurse in kmem_alloc and bad
1490 * things happen. So instead we return a NULL bucket, and make
1491 * the code that allocates buckets smart enough to deal with it */
1492 if (zone == bucketzone && zone->uz_recurse != 0) {
1493 ZONE_UNLOCK(zone);
1494 return (NULL);
1495 }
1496 while (zone->uz_maxpages &&
1497 zone->uz_pages >= zone->uz_maxpages) {
1498 zone->uz_flags |= UMA_ZFLAG_FULL;
1499
1500 if (wait & M_WAITOK)
1501 msleep(zone, &zone->uz_lock, PVM, "zonelimit", 0);
1502 else
1503 goto alloc_fail;
1504
1505 goto new_slab;
1506 }
1507
1508 zone->uz_recurse++;
1509 slab = slab_zalloc(zone, wait);
1510 zone->uz_recurse--;
1511 /*
1512 * We might not have been able to get a slab but another cpu
1513 * could have while we were unlocked. If we did get a slab put
1514 * it on the partially used slab list. If not check the free
1515 * count and restart or fail accordingly.
1516 */
1517 if (slab)
1518 LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1519 else if (zone->uz_free == 0)
1520 goto alloc_fail;
1521 else
1522 goto new_slab;
1523 }
1524 /*
1525 * If this is our first time though put this guy on the list.
1526 */
1527 if (bucket != NULL && bucket->ub_ptr == -1)
1528 LIST_INSERT_HEAD(&zone->uz_full_bucket,
1529 bucket, ub_link);
1530
1531
1532 while (slab->us_freecount) {
1533 freei = slab->us_firstfree;
1534 slab->us_firstfree = slab->us_freelist[freei];
1535#ifdef INVARIANTS
1536 slab->us_freelist[freei] = 255;
1537#endif
1538 slab->us_freecount--;
1539 zone->uz_free--;
1540 item = slab->us_data + (zone->uz_rsize * freei);
1541
1542 if (bucket == NULL) {
1543 zone->uz_allocs++;
1544 break;
1545 }
1546 bucket->ub_bucket[++bucket->ub_ptr] = item;
1547
1548 /* Don't overfill the bucket! */
1549 if (bucket->ub_ptr == zone->uz_count)
1550 break;
1551 }
1552
1553 /* Move this slab to the full list */
1554 if (slab->us_freecount == 0) {
1555 LIST_REMOVE(slab, us_link);
1556 LIST_INSERT_HEAD(&zone->uz_full_slab, slab, us_link);
1557 }
1558
1559 if (bucket != NULL) {
1560 /* Try to keep the buckets totally full, but don't block */
1561 if (bucket->ub_ptr < zone->uz_count) {
1562 wait = M_NOWAIT;
1563 goto new_slab;
1564 } else
1565 zone->uz_fills--;
1566 }
1567
1568 ZONE_UNLOCK(zone);
1569
1570 /* Only construct at this time if we're not filling a bucket */
1571 if (bucket == NULL && zone->uz_ctor != NULL)
1572 zone->uz_ctor(item, zone->uz_size, udata);
1573
1574 return (item);
1575
1576alloc_fail:
1577 if (bucket != NULL)
1578 zone->uz_fills--;
1579 ZONE_UNLOCK(zone);
1580
1581 if (bucket != NULL && bucket->ub_ptr != -1)
1582 return (bucket);
1583
1584 return (NULL);
1585}
1586
1587/* See uma.h */
1588void
1589uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
1590{
1591 uma_cache_t cache;
1592 uma_bucket_t bucket;
1593 int cpu;
1594
1595 /* This is the fast path free */
1596#ifdef UMA_DEBUG_ALLOC_1
1597 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
1598#endif
1599 /*
1600 * The race here is acceptable. If we miss it we'll just have to wait
1601 * a little longer for the limits to be reset.
1602 */
1603
1604 if (zone->uz_flags & UMA_ZFLAG_FULL)
1605 goto zfree_internal;
1606
1607zfree_restart:
1608 cpu = PCPU_GET(cpuid);
1609 CPU_LOCK(zone, cpu);
1610 cache = &zone->uz_cpu[cpu];
1611
1612zfree_start:
1613 bucket = cache->uc_freebucket;
1614
1615 if (bucket) {
1616 /*
1617 * Do we have room in our bucket? It is OK for this uz count
1618 * check to be slightly out of sync.
1619 */
1620
1621 if (bucket->ub_ptr < zone->uz_count) {
1622 bucket->ub_ptr++;
1623 KASSERT(bucket->ub_bucket[bucket->ub_ptr] == NULL,
1624 ("uma_zfree: Freeing to non free bucket index."));
1625 bucket->ub_bucket[bucket->ub_ptr] = item;
1626 if (zone->uz_dtor)
1627 zone->uz_dtor(item, zone->uz_size, udata);
1628 CPU_UNLOCK(zone, cpu);
1629 return;
1630 } else if (cache->uc_allocbucket) {
1631#ifdef UMA_DEBUG_ALLOC
1632 printf("uma_zfree: Swapping buckets.\n");
1633#endif
1634 /*
1635 * We have run out of space in our freebucket.
1636 * See if we can switch with our alloc bucket.
1637 */
1638 if (cache->uc_allocbucket->ub_ptr <
1639 cache->uc_freebucket->ub_ptr) {
1640 uma_bucket_t swap;
1641
1642 swap = cache->uc_freebucket;
1643 cache->uc_freebucket = cache->uc_allocbucket;
1644 cache->uc_allocbucket = swap;
1645
1646 goto zfree_start;
1647 }
1648 }
1649 }
1650
1651 /*
1652 * We can get here for two reasons:
1653 *
1654 * 1) The buckets are NULL
1655 * 2) The alloc and free buckets are both somewhat full.
1656 *
1657 */
1658
1659 ZONE_LOCK(zone);
1660
1661 bucket = cache->uc_freebucket;
1662 cache->uc_freebucket = NULL;
1663
1664 /* Can we throw this on the zone full list? */
1665 if (bucket != NULL) {
1666#ifdef UMA_DEBUG_ALLOC
1667 printf("uma_zfree: Putting old bucket on the free list.\n");
1668#endif
1669 /* ub_ptr is pointing to the last free item */
1670 KASSERT(bucket->ub_ptr != -1,
1671 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
1672 LIST_INSERT_HEAD(&zone->uz_full_bucket,
1673 bucket, ub_link);
1674 }
1675 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
1676 LIST_REMOVE(bucket, ub_link);
1677 ZONE_UNLOCK(zone);
1678 cache->uc_freebucket = bucket;
1679 goto zfree_start;
1680 }
1681 /* We're done with this CPU now */
1682 CPU_UNLOCK(zone, cpu);
1683
1684 /* And the zone.. */
1685 ZONE_UNLOCK(zone);
1686
1687#ifdef UMA_DEBUG_ALLOC
1688 printf("uma_zfree: Allocating new free bucket.\n");
1689#endif
1690 bucket = uma_zalloc_internal(bucketzone,
1691 NULL, M_NOWAIT, NULL);
1692 if (bucket) {
1693#ifdef INVARIANTS
1694 bzero(bucket, bucketzone->uz_size);
1695#endif
1696 bucket->ub_ptr = -1;
1697 ZONE_LOCK(zone);
1698 LIST_INSERT_HEAD(&zone->uz_free_bucket,
1699 bucket, ub_link);
1700 ZONE_UNLOCK(zone);
1701 goto zfree_restart;
1702 }
1703
1704 /*
1705 * If nothing else caught this, we'll just do an internal free.
1706 */
1707
1708zfree_internal:
1709
1710 uma_zfree_internal(zone, item, udata, 0);
1711
1712 return;
1713
1714}
1715
1716/*
1717 * Frees an item to an INTERNAL zone or allocates a free bucket
1718 *
1719 * Arguments:
1720 * zone The zone to free to
1721 * item The item we're freeing
1722 * udata User supplied data for the dtor
1723 * skip Skip the dtor, it was done in uma_zfree_arg
1724 */
1725
1726static void
1727uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
1728{
1729 uma_slab_t slab;
1730 u_int8_t *mem;
1731 u_int8_t freei;
1732
1733 ZONE_LOCK(zone);
1734
1735 if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
1736 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
1737 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
1738 slab = hash_sfind(&zone->uz_hash, mem);
1739 else {
1740 mem += zone->uz_pgoff;
1741 slab = (uma_slab_t)mem;
1742 }
1743 } else {
1744 slab = (uma_slab_t)udata;
1745 }
1746
1747 /* Do we need to remove from any lists? */
1748 if (slab->us_freecount+1 == zone->uz_ipers) {
1749 LIST_REMOVE(slab, us_link);
1750 LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
1751 } else if (slab->us_freecount == 0) {
1752 LIST_REMOVE(slab, us_link);
1753 LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1754 }
1755
1756 /* Slab management stuff */
1757 freei = ((unsigned long)item - (unsigned long)slab->us_data)
1758 / zone->uz_rsize;
1759#ifdef INVARIANTS
1760 if (((freei * zone->uz_rsize) + slab->us_data) != item)
1761 panic("zone: %s(%p) slab %p freed address %p unaligned.\n",
1762 zone->uz_name, zone, slab, item);
1763 if (freei >= zone->uz_ipers)
1764 panic("zone: %s(%p) slab %p freelist %i out of range 0-%d\n",
1765 zone->uz_name, zone, slab, freei, zone->uz_ipers-1);
1766
1767 if (slab->us_freelist[freei] != 255) {
1768 printf("Slab at %p, freei %d = %d.\n",
1769 slab, freei, slab->us_freelist[freei]);
1770 panic("Duplicate free of item %p from zone %p(%s)\n",
1771 item, zone, zone->uz_name);
1772 }
1773#endif
1774 slab->us_freelist[freei] = slab->us_firstfree;
1775 slab->us_firstfree = freei;
1776 slab->us_freecount++;
1777
1778 /* Zone statistics */
1779 zone->uz_free++;
1780
1781 if (!skip && zone->uz_dtor)
1782 zone->uz_dtor(item, zone->uz_size, udata);
1783
1784 if (zone->uz_flags & UMA_ZFLAG_FULL) {
1785 if (zone->uz_pages < zone->uz_maxpages)
1786 zone->uz_flags &= ~UMA_ZFLAG_FULL;
1787
1788 /* We can handle one more allocation */
1789 wakeup_one(&zone);
1790 }
1791
1792 ZONE_UNLOCK(zone);
1793}
1794
1795/* See uma.h */
1796void
1797uma_zone_set_max(uma_zone_t zone, int nitems)
1798{
1799 ZONE_LOCK(zone);
1800 if (zone->uz_ppera > 1)
1801 zone->uz_maxpages = nitems * zone->uz_ppera;
1802 else
1803 zone->uz_maxpages = nitems / zone->uz_ipers;
1804 if (zone->uz_maxpages * zone->uz_ipers < nitems)
1805 zone->uz_maxpages++;
1806 ZONE_UNLOCK(zone);
1807}
1808
1809/* See uma.h */
1810void
1811uma_zone_set_freef(uma_zone_t zone, uma_free freef)
1812{
1813 ZONE_LOCK(zone);
1814
1815 zone->uz_freef = freef;
1816
1817 ZONE_UNLOCK(zone);
1818}
1819
1820/* See uma.h */
1821void
1822uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
1823{
1824 ZONE_LOCK(zone);
1825
1826 zone->uz_flags |= UMA_ZFLAG_PRIVALLOC;
1827 zone->uz_allocf = allocf;
1828
1829 ZONE_UNLOCK(zone);
1830}
1831
1832/* See uma.h */
1833int
1834uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
1835{
1836 int pages;
1837 vm_offset_t kva;
1838
1839 mtx_lock(&Giant);
1840
1841 pages = count / zone->uz_ipers;
1842
1843 if (pages * zone->uz_ipers < count)
1844 pages++;
1845
1846 kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE);
1847
1848 if (kva == 0) {
1849 mtx_unlock(&Giant);
1850 return (0);
1851 }
1852
1853
1854 if (obj == NULL)
1855 obj = vm_object_allocate(OBJT_DEFAULT,
1856 zone->uz_maxpages);
1857 else
1858 _vm_object_allocate(OBJT_DEFAULT,
1859 zone->uz_maxpages, obj);
1860
1861 ZONE_LOCK(zone);
1862 zone->uz_kva = kva;
1863 zone->uz_obj = obj;
1864 zone->uz_maxpages = pages;
1865
1866 zone->uz_allocf = obj_alloc;
1867 zone->uz_flags |= UMA_ZFLAG_NOFREE | UMA_ZFLAG_PRIVALLOC;
1868
1869 ZONE_UNLOCK(zone);
1870 mtx_unlock(&Giant);
1871
1872 return (1);
1873}
1874
1875/* See uma.h */
1876void
1877uma_prealloc(uma_zone_t zone, int items)
1878{
1879 int slabs;
1880 uma_slab_t slab;
1881
1882 ZONE_LOCK(zone);
1883 slabs = items / zone->uz_ipers;
1884 if (slabs * zone->uz_ipers < items)
1885 slabs++;
1886
1887 while (slabs > 0) {
1888 slab = slab_zalloc(zone, M_WAITOK);
1889 LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
1890 slabs--;
1891 }
1892 ZONE_UNLOCK(zone);
1893}
1894
1895/* See uma.h */
1896void
1897uma_reclaim(void)
1898{
1899 /*
1900 * You might think that the delay below would improve performance since
1901 * the allocator will give away memory that it may ask for immediately.
1902 * Really, it makes things worse, since cpu cycles are so much cheaper
1903 * than disk activity.
1904 */
1905#if 0
1906 static struct timeval tv = {0};
1907 struct timeval now;
1908 getmicrouptime(&now);
1909 if (now.tv_sec > tv.tv_sec + 30)
1910 tv = now;
1911 else
1912 return;
1913#endif
1914#ifdef UMA_DEBUG
1915 printf("UMA: vm asked us to release pages!\n");
1916#endif
1917 bucket_enable();
1918 zone_foreach(zone_drain);
1919
1920 /*
1921 * Some slabs may have been freed but this zone will be visited early
1922 * we visit again so that we can free pages that are empty once other
1923 * zones are drained. We have to do the same for buckets.
1924 */
1925 zone_drain(slabzone);
1926 zone_drain(bucketzone);
1927}
1928
1929void *
1930uma_large_malloc(int size, int wait)
1931{
1932 void *mem;
1933 uma_slab_t slab;
1934 u_int8_t flags;
1935
1936 slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
1937 if (slab == NULL)
1938 return (NULL);
1939
1940 mem = page_alloc(NULL, size, &flags, wait);
1941 if (mem) {
1942 slab->us_data = mem;
1943 slab->us_flags = flags | UMA_SLAB_MALLOC;
1944 slab->us_size = size;
1945 UMA_HASH_INSERT(mallochash, slab, mem);
1946 } else {
1947 uma_zfree_internal(slabzone, slab, NULL, 0);
1948 }
1949
1950
1951 return (mem);
1952}
1953
1954void
1955uma_large_free(uma_slab_t slab)
1956{
1957 UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
1958 page_free(slab->us_data, slab->us_size, slab->us_flags);
1959 uma_zfree_internal(slabzone, slab, NULL, 0);
1960}
1961
1962void
1963uma_print_stats(void)
1964{
1965 zone_foreach(uma_print_zone);
1966}
1967
1968void
1969uma_print_zone(uma_zone_t zone)
1970{
1971 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
1972 zone->uz_name, zone, zone->uz_size, zone->uz_rsize, zone->uz_flags,
1973 zone->uz_ipers, zone->uz_ppera,
1974 (zone->uz_ipers * zone->uz_pages) - zone->uz_free, zone->uz_free);
1975}
1976
1977/*
1978 * Sysctl handler for vm.zone
1979 *
1980 * stolen from vm_zone.c
1981 */
1982static int
1983sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
1984{
1985 int error, len, cnt;
1986 const int linesize = 128; /* conservative */
1987 int totalfree;
1988 char *tmpbuf, *offset;
1989 uma_zone_t z;
1990 char *p;
1991
1992 cnt = 0;
1993 mtx_lock(&uma_mtx);
1994 LIST_FOREACH(z, &uma_zones, uz_link)
1995 cnt++;
1996 mtx_unlock(&uma_mtx);
1997 MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
1998 M_TEMP, M_WAITOK);
1999 len = snprintf(tmpbuf, linesize,
2000 "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n");
2001 if (cnt == 0)
2002 tmpbuf[len - 1] = '\0';
2003 error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
2004 if (error || cnt == 0)
2005 goto out;
2006 offset = tmpbuf;
2007 mtx_lock(&uma_mtx);
2008 LIST_FOREACH(z, &uma_zones, uz_link) {
2009 if (cnt == 0) /* list may have changed size */
2010 break;
2011 ZONE_LOCK(z);
2012 totalfree = z->uz_free + z->uz_cachefree;
2013 len = snprintf(offset, linesize,
2014 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
2015 z->uz_name, z->uz_size,
2016 z->uz_maxpages * z->uz_ipers,
2017 (z->uz_ipers * (z->uz_pages / z->uz_ppera)) - totalfree,
2018 totalfree,
2019 (unsigned long long)z->uz_allocs);
2020 ZONE_UNLOCK(z);
2021 for (p = offset + 12; p > offset && *p == ' '; --p)
2022 /* nothing */ ;
2023 p[1] = ':';
2024 cnt--;
2025 offset += len;
2026 }
2027 mtx_unlock(&uma_mtx);
2028 *offset++ = '\0';
2029 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
2030out:
2031 FREE(tmpbuf, M_TEMP);
2032 return (error);
2033}