Deleted Added
sdiff udiff text old ( 251709 ) new ( 251826 )
full compact
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 34 unchanged lines hidden (view full) ---

43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 251709 2013-06-13 21:05:38Z jeff $");
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59

--- 66 unchanged lines hidden (view full) ---

126 * Are we allowed to allocate buckets?
127 */
128static int bucketdisable = 1;
129
130/* Linked list of all kegs in the system */
131static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
132
133/* This mutex protects the keg list */
134static struct mtx uma_mtx;
135
136/* Linked list of boot time pages */
137static LIST_HEAD(,uma_slab) uma_boot_pages =
138 LIST_HEAD_INITIALIZER(uma_boot_pages);
139
140/* This mutex protects the boot time pages list */
141static struct mtx uma_boot_pages_mtx;
142
143/* Is the VM done starting up? */
144static int booted = 0;
145#define UMA_STARTUP 1
146#define UMA_STARTUP2 2
147
148/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
149static const u_int uma_max_ipers = SLAB_SETSIZE;

--- 17 unchanged lines hidden (view full) ---

167 */
168struct uma_zctor_args {
169 const char *name;
170 size_t size;
171 uma_ctor ctor;
172 uma_dtor dtor;
173 uma_init uminit;
174 uma_fini fini;
175 uma_keg_t keg;
176 int align;
177 uint32_t flags;
178};
179
180struct uma_kctor_args {
181 uma_zone_t zone;
182 size_t size;

--- 28 unchanged lines hidden (view full) ---

211 */
212static uint8_t bucket_size[BUCKET_ZONES];
213
214/*
215 * Flags and enumerations to be passed to internal functions.
216 */
217enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
218
219#define ZFREE_STATFAIL 0x00000001 /* Update zone failure statistic. */
220#define ZFREE_STATFREE 0x00000002 /* Update zone free statistic. */
221
222/* Prototypes.. */
223
224static void *noobj_alloc(uma_zone_t, int, uint8_t *, int);
225static void *page_alloc(uma_zone_t, int, uint8_t *, int);
226static void *startup_alloc(uma_zone_t, int, uint8_t *, int);
227static void page_free(void *, int, uint8_t);
228static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
229static void cache_drain(uma_zone_t);

--- 9 unchanged lines hidden (view full) ---

239static void zone_foreach(void (*zfunc)(uma_zone_t));
240static void zone_timeout(uma_zone_t zone);
241static int hash_alloc(struct uma_hash *);
242static int hash_expand(struct uma_hash *, struct uma_hash *);
243static void hash_free(struct uma_hash *hash);
244static void uma_timeout(void *);
245static void uma_startup3(void);
246static void *zone_alloc_item(uma_zone_t, void *, int);
247static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
248 int);
249static void bucket_enable(void);
250static void bucket_init(void);
251static uma_bucket_t bucket_alloc(int, int);
252static void bucket_free(uma_bucket_t);
253static void bucket_zone_drain(void);
254static int zone_alloc_bucket(uma_zone_t zone, int flags);
255static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
256static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
257static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
258static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
259 uma_fini fini, int align, uint32_t flags);
260static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
261static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
262
263void uma_print_zone(uma_zone_t);
264void uma_print_stats(void);
265static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
266static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
267
268SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
269

--- 88 unchanged lines hidden (view full) ---

358}
359
360static void
361bucket_free(uma_bucket_t bucket)
362{
363 struct uma_bucket_zone *ubz;
364
365 ubz = bucket_zone_lookup(bucket->ub_entries);
366 zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
367 ZFREE_STATFREE);
368}
369
370static void
371bucket_zone_drain(void)
372{
373 struct uma_bucket_zone *ubz;
374
375 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)

--- 7 unchanged lines hidden (view full) ---

383
384 if (!zone_warnings || zone->uz_warning == NULL)
385 return;
386
387 if (ratecheck(&zone->uz_ratecheck, &warninterval))
388 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
389}
390
391static inline uma_keg_t
392zone_first_keg(uma_zone_t zone)
393{
394
395 return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
396}
397
398static void
399zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
400{
401 uma_klink_t klink;
402
403 LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
404 kegfn(klink->kl_keg);
405}

--- 168 unchanged lines hidden (view full) ---

574 * Nothing
575 */
576static void
577hash_free(struct uma_hash *hash)
578{
579 if (hash->uh_slab_hash == NULL)
580 return;
581 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
582 zone_free_item(hashzone,
583 hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
584 else
585 free(hash->uh_slab_hash, M_UMAHASH);
586}
587
588/*
589 * Frees all outstanding items in a bucket
590 *
591 * Arguments:
592 * zone The zone to free to, must be unlocked.
593 * bucket The free/alloc bucket with items, cpu queue must be locked.
594 *
595 * Returns:
596 * Nothing
597 */
598
599static void
600bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
601{
602 void *item;
603
604 if (bucket == NULL)
605 return;
606
607 while (bucket->ub_cnt > 0) {
608 bucket->ub_cnt--;
609 item = bucket->ub_bucket[bucket->ub_cnt];
610#ifdef INVARIANTS
611 bucket->ub_bucket[bucket->ub_cnt] = NULL;
612 KASSERT(item != NULL,
613 ("bucket_drain: botched ptr, item is NULL"));
614#endif
615 zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
616 }
617}
618
619/*
620 * Drains the per cpu caches for a zone.
621 *
622 * NOTE: This may only be called while the zone is being turn down, and not
623 * during normal operation. This is necessary in order that we do not have
624 * to migrate CPUs to drain the per-CPU caches.

--- 137 unchanged lines hidden (view full) ---

762 obj = kernel_object;
763 else
764 obj = NULL;
765 for (i = 0; i < keg->uk_ppera; i++)
766 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
767 obj);
768 }
769 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
770 zone_free_item(keg->uk_slabzone, slab, NULL,
771 SKIP_NONE, ZFREE_STATFREE);
772#ifdef UMA_DEBUG
773 printf("%s: Returning %d bytes.\n",
774 keg->uk_name, PAGE_SIZE * keg->uk_ppera);
775#endif
776 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
777 }
778}
779

--- 57 unchanged lines hidden (view full) ---

837 uint8_t *mem;
838 uint8_t flags;
839 int i;
840
841 mtx_assert(&keg->uk_lock, MA_OWNED);
842 slab = NULL;
843
844#ifdef UMA_DEBUG
845 printf("slab_zalloc: Allocating a new slab for %s\n", keg->uk_name);
846#endif
847 allocf = keg->uk_allocf;
848 KEG_UNLOCK(keg);
849
850 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
851 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
852 if (slab == NULL) {
853 KEG_LOCK(keg);

--- 15 unchanged lines hidden (view full) ---

869
870 if (keg->uk_flags & UMA_ZONE_NODUMP)
871 wait |= M_NODUMP;
872
873 /* zone is passed for legacy reasons. */
874 mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
875 if (mem == NULL) {
876 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
877 zone_free_item(keg->uk_slabzone, slab, NULL,
878 SKIP_NONE, ZFREE_STATFREE);
879 KEG_LOCK(keg);
880 return (NULL);
881 }
882
883 /* Point the slab into the allocated memory */
884 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
885 slab = (uma_slab_t )(mem + keg->uk_pgoff);
886

--- 37 unchanged lines hidden (view full) ---

924 else
925 obj = NULL;
926 for (i = 0; i < keg->uk_ppera; i++)
927 vsetobj((vm_offset_t)mem +
928 (i * PAGE_SIZE), obj);
929 }
930 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
931 zone_free_item(keg->uk_slabzone, slab,
932 NULL, SKIP_NONE, ZFREE_STATFREE);
933 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera,
934 flags);
935 KEG_LOCK(keg);
936 return (NULL);
937 }
938 }
939 KEG_LOCK(keg);
940

--- 537 unchanged lines hidden (view full) ---

1478 zone->uz_fails = 0;
1479 zone->uz_sleeps = 0;
1480 zone->uz_fills = zone->uz_count = 0;
1481 zone->uz_flags = 0;
1482 zone->uz_warning = NULL;
1483 timevalclear(&zone->uz_ratecheck);
1484 keg = arg->keg;
1485
1486 if (arg->flags & UMA_ZONE_SECONDARY) {
1487 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1488 zone->uz_init = arg->uminit;
1489 zone->uz_fini = arg->fini;
1490 zone->uz_lock = &keg->uk_lock;
1491 zone->uz_flags |= UMA_ZONE_SECONDARY;
1492 mtx_lock(&uma_mtx);
1493 ZONE_LOCK(zone);

--- 20 unchanged lines hidden (view full) ---

1514 karg.align = arg->align;
1515 karg.flags = arg->flags;
1516 karg.zone = zone;
1517 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1518 flags);
1519 if (error)
1520 return (error);
1521 }
1522 /*
1523 * Link in the first keg.
1524 */
1525 zone->uz_klink.kl_keg = keg;
1526 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1527 zone->uz_lock = &keg->uk_lock;
1528 zone->uz_size = keg->uk_size;
1529 zone->uz_flags |= (keg->uk_flags &

--- 81 unchanged lines hidden (view full) ---

1611 LIST_REMOVE(klink, kl_link);
1612 if (klink == &zone->uz_klink)
1613 continue;
1614 free(klink, M_TEMP);
1615 }
1616 /*
1617 * We only destroy kegs from non secondary zones.
1618 */
1619 if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1620 mtx_lock(&uma_mtx);
1621 LIST_REMOVE(keg, uk_link);
1622 mtx_unlock(&uma_mtx);
1623 zone_free_item(kegs, keg, NULL, SKIP_NONE,
1624 ZFREE_STATFREE);
1625 }
1626}
1627
1628/*
1629 * Traverses every zone in the system and calls a callback
1630 *
1631 * Arguments:
1632 * zfunc A pointer to a function which accepts a zone

--- 27 unchanged lines hidden (view full) ---

1660 int i;
1661
1662#ifdef UMA_DEBUG
1663 printf("Creating uma keg headers zone and keg.\n");
1664#endif
1665 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1666
1667 /* "manually" create the initial zone */
1668 args.name = "UMA Kegs";
1669 args.size = sizeof(struct uma_keg);
1670 args.ctor = keg_ctor;
1671 args.dtor = keg_dtor;
1672 args.uminit = zero_init;
1673 args.fini = NULL;
1674 args.keg = &masterkeg;
1675 args.align = 32 - 1;

--- 124 unchanged lines hidden (view full) ---

1800uma_zone_t
1801uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1802 uma_init uminit, uma_fini fini, int align, uint32_t flags)
1803
1804{
1805 struct uma_zctor_args args;
1806
1807 /* This stuff is essential for the zone ctor */
1808 args.name = name;
1809 args.size = size;
1810 args.ctor = ctor;
1811 args.dtor = dtor;
1812 args.uminit = uminit;
1813 args.fini = fini;
1814 args.align = align;
1815 args.flags = flags;

--- 6 unchanged lines hidden (view full) ---

1822uma_zone_t
1823uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1824 uma_init zinit, uma_fini zfini, uma_zone_t master)
1825{
1826 struct uma_zctor_args args;
1827 uma_keg_t keg;
1828
1829 keg = zone_first_keg(master);
1830 args.name = name;
1831 args.size = keg->uk_size;
1832 args.ctor = ctor;
1833 args.dtor = dtor;
1834 args.uminit = zinit;
1835 args.fini = zfini;
1836 args.align = keg->uk_align;
1837 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1838 args.keg = keg;
1839
1840 /* XXX Attaches only one keg of potentially many. */
1841 return (zone_alloc_item(zones, &args, M_WAITOK));
1842}
1843
1844static void
1845zone_lock_pair(uma_zone_t a, uma_zone_t b)
1846{
1847 if (a < b) {
1848 ZONE_LOCK(a);
1849 mtx_lock_flags(b->uz_lock, MTX_DUPOK);
1850 } else {
1851 ZONE_LOCK(b);

--- 75 unchanged lines hidden (view full) ---

1927}
1928
1929
1930/* See uma.h */
1931void
1932uma_zdestroy(uma_zone_t zone)
1933{
1934
1935 zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
1936}
1937
1938/* See uma.h */
1939void *
1940uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1941{
1942 void *item;
1943 uma_cache_t cache;

--- 62 unchanged lines hidden (view full) ---

2006#endif
2007 KASSERT(item != NULL,
2008 ("uma_zalloc: Bucket pointer mangled."));
2009 cache->uc_allocs++;
2010 critical_exit();
2011 if (zone->uz_ctor != NULL) {
2012 if (zone->uz_ctor(item, zone->uz_size,
2013 udata, flags) != 0) {
2014 zone_free_item(zone, item, udata,
2015 SKIP_DTOR, ZFREE_STATFAIL |
2016 ZFREE_STATFREE);
2017 return (NULL);
2018 }
2019 }
2020#ifdef INVARIANTS
2021 uma_dbg_alloc(zone, NULL, item);
2022#endif
2023 if (flags & M_ZERO)
2024 bzero(item, zone->uz_size);

--- 39 unchanged lines hidden (view full) ---

2064 bucket = cache->uc_freebucket;
2065 if (bucket != NULL && bucket->ub_cnt > 0) {
2066 ZONE_UNLOCK(zone);
2067 goto zalloc_start;
2068 }
2069 }
2070
2071 /* Since we have locked the zone we may as well send back our stats */
2072 zone->uz_allocs += cache->uc_allocs;
2073 cache->uc_allocs = 0;
2074 zone->uz_frees += cache->uc_frees;
2075 cache->uc_frees = 0;
2076
2077 /* Our old one is now a free bucket */
2078 if (cache->uc_allocbucket) {
2079 KASSERT(cache->uc_allocbucket->ub_cnt == 0,
2080 ("uma_zalloc_arg: Freeing a non free bucket."));
2081 LIST_INSERT_HEAD(&zone->uz_free_bucket,
2082 cache->uc_allocbucket, ub_link);

--- 231 unchanged lines hidden (view full) ---

2314 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2315 continue;
2316 }
2317 }
2318 return (NULL);
2319}
2320
2321static void *
2322slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
2323{
2324 uma_keg_t keg;
2325 void *item;
2326 uint8_t freei;
2327
2328 keg = slab->us_keg;
2329 mtx_assert(&keg->uk_lock, MA_OWNED);
2330
2331 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2332 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2333 item = slab->us_data + (keg->uk_rsize * freei);
2334 slab->us_freecount--;
2335 keg->uk_free--;
2336
2337 /* Move this slab to the full list */
2338 if (slab->us_freecount == 0) {
2339 LIST_REMOVE(slab, us_link);
2340 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2341 }
2342
2343 return (item);
2344}
2345
2346static int
2347zone_alloc_bucket(uma_zone_t zone, int flags)
2348{
2349 uma_bucket_t bucket;
2350 uma_slab_t slab;
2351 uma_keg_t keg;
2352 int16_t saved;
2353 int max, origflags = flags;
2354
2355 /*
2356 * Try this zone's free list first so we don't allocate extra buckets.
2357 */
2358 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2359 KASSERT(bucket->ub_cnt == 0,
2360 ("zone_alloc_bucket: Bucket on free list is not empty."));
2361 LIST_REMOVE(bucket, ub_link);
2362 } else {
2363 int bflags;
2364
2365 bflags = (flags & ~M_ZERO);
2366 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2367 bflags |= M_NOVM;
2368
2369 ZONE_UNLOCK(zone);
2370 bucket = bucket_alloc(zone->uz_count, bflags);
2371 ZONE_LOCK(zone);
2372 }
2373
2374 if (bucket == NULL) {
2375 return (0);
2376 }
2377
2378#ifdef SMP
2379 /*
2380 * This code is here to limit the number of simultaneous bucket fills
2381 * for any given zone to the number of per cpu caches in this zone. This
2382 * is done so that we don't allocate more memory than we really need.
2383 */
2384 if (zone->uz_fills >= mp_ncpus)
2385 goto done;
2386
2387#endif
2388 zone->uz_fills++;
2389
2390 max = MIN(bucket->ub_entries, zone->uz_count);
2391 /* Try to keep the buckets totally full */
2392 saved = bucket->ub_cnt;
2393 slab = NULL;
2394 keg = NULL;
2395 while (bucket->ub_cnt < max &&
2396 (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
2397 keg = slab->us_keg;
2398 while (slab->us_freecount && bucket->ub_cnt < max) {
2399 bucket->ub_bucket[bucket->ub_cnt++] =
2400 slab_alloc_item(zone, slab);
2401 }
2402
2403 /* Don't block on the next fill */
2404 flags |= M_NOWAIT;
2405 }
2406 if (slab)
2407 zone_relock(zone, keg);
2408
2409 /*
2410 * We unlock here because we need to call the zone's init.
2411 * It should be safe to unlock because the slab dealt with
2412 * above is already on the appropriate list within the keg
2413 * and the bucket we filled is not yet on any list, so we
2414 * own it.
2415 */
2416 if (zone->uz_init != NULL) {
2417 int i;
2418
2419 ZONE_UNLOCK(zone);
2420 for (i = saved; i < bucket->ub_cnt; i++)
2421 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2422 origflags) != 0)
2423 break;
2424 /*
2425 * If we couldn't initialize the whole bucket, put the
2426 * rest back onto the freelist.
2427 */
2428 if (i != bucket->ub_cnt) {
2429 int j;
2430
2431 for (j = i; j < bucket->ub_cnt; j++) {
2432 zone_free_item(zone, bucket->ub_bucket[j],
2433 NULL, SKIP_FINI, 0);
2434#ifdef INVARIANTS
2435 bucket->ub_bucket[j] = NULL;
2436#endif
2437 }
2438 bucket->ub_cnt = i;
2439 }
2440 ZONE_LOCK(zone);
2441 }
2442
2443 zone->uz_fills--;
2444 if (bucket->ub_cnt != 0) {
2445 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2446 bucket, ub_link);
2447 return (1);
2448 }
2449#ifdef SMP
2450done:
2451#endif
2452 bucket_free(bucket);
2453
2454 return (0);
2455}
2456/*
2457 * Allocates an item for an internal zone
2458 *
2459 * Arguments
2460 * zone The zone to alloc for.
2461 * udata The data to be passed to the constructor.
2462 * flags M_WAITOK, M_NOWAIT, M_ZERO.
2463 *
2464 * Returns
2465 * NULL if there is no memory and M_NOWAIT is set
2466 * An item if successful
2467 */
2468
2469static void *
2470zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2471{
2472 uma_slab_t slab;
2473 void *item;
2474
2475 item = NULL;
2476
2477#ifdef UMA_DEBUG_ALLOC
2478 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2479#endif
2480 ZONE_LOCK(zone);
2481
2482 slab = zone->uz_slab(zone, NULL, flags);
2483 if (slab == NULL) {
2484 zone->uz_fails++;
2485 ZONE_UNLOCK(zone);
2486 return (NULL);
2487 }
2488
2489 item = slab_alloc_item(zone, slab);
2490
2491 zone_relock(zone, slab->us_keg);
2492 zone->uz_allocs++;
2493 ZONE_UNLOCK(zone);
2494
2495 /*
2496 * We have to call both the zone's init (not the keg's init)
2497 * and the zone's ctor. This is because the item is going from
2498 * a keg slab directly to the user, and the user is expecting it
2499 * to be both zone-init'd as well as zone-ctor'd.
2500 */
2501 if (zone->uz_init != NULL) {
2502 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2503 zone_free_item(zone, item, udata, SKIP_FINI,
2504 ZFREE_STATFAIL | ZFREE_STATFREE);
2505 return (NULL);
2506 }
2507 }
2508 if (zone->uz_ctor != NULL) {
2509 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2510 zone_free_item(zone, item, udata, SKIP_DTOR,
2511 ZFREE_STATFAIL | ZFREE_STATFREE);
2512 return (NULL);
2513 }
2514 }
2515#ifdef INVARIANTS
2516 uma_dbg_alloc(zone, slab, item);
2517#endif
2518 if (flags & M_ZERO)
2519 bzero(item, zone->uz_size);
2520
2521 return (item);
2522}
2523
2524/* See uma.h */
2525void
2526uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2527{
2528 uma_cache_t cache;
2529 uma_bucket_t bucket;

--- 113 unchanged lines hidden (view full) ---

2643 (cache->uc_allocbucket->ub_cnt <
2644 cache->uc_freebucket->ub_cnt)) {
2645 ZONE_UNLOCK(zone);
2646 goto zfree_start;
2647 }
2648 }
2649
2650 /* Since we have locked the zone we may as well send back our stats */
2651 zone->uz_allocs += cache->uc_allocs;
2652 cache->uc_allocs = 0;
2653 zone->uz_frees += cache->uc_frees;
2654 cache->uc_frees = 0;
2655
2656 bucket = cache->uc_freebucket;
2657 cache->uc_freebucket = NULL;
2658
2659 /* Can we throw this on the zone full list? */
2660 if (bucket != NULL) {
2661#ifdef UMA_DEBUG_ALLOC

--- 32 unchanged lines hidden (view full) ---

2694 ZONE_UNLOCK(zone);
2695 goto zfree_restart;
2696 }
2697
2698 /*
2699 * If nothing else caught this, we'll just do an internal free.
2700 */
2701zfree_internal:
2702 zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
2703
2704 return;
2705}
2706
2707/*
2708 * Frees an item to an INTERNAL zone or allocates a free bucket
2709 *
2710 * Arguments:
2711 * zone The zone to free to
2712 * item The item we're freeing
2713 * udata User supplied data for the dtor
2714 * skip Skip dtors and finis
2715 */
2716static void
2717zone_free_item(uma_zone_t zone, void *item, void *udata,
2718 enum zfreeskip skip, int flags)
2719{
2720 uma_slab_t slab;
2721 uma_keg_t keg;
2722 uint8_t *mem;
2723 uint8_t freei;
2724 int clearfull;
2725
2726#ifdef INVARIANTS
2727 if (skip == SKIP_NONE) {
2728 if (zone->uz_flags & UMA_ZONE_MALLOC)
2729 uma_dbg_free(zone, udata, item);
2730 else
2731 uma_dbg_free(zone, NULL, item);
2732 }
2733#endif
2734 if (skip < SKIP_DTOR && zone->uz_dtor)
2735 zone->uz_dtor(item, zone->uz_size, udata);
2736
2737 if (skip < SKIP_FINI && zone->uz_fini)
2738 zone->uz_fini(item, zone->uz_size);
2739
2740 ZONE_LOCK(zone);
2741
2742 if (flags & ZFREE_STATFAIL)
2743 zone->uz_fails++;
2744 if (flags & ZFREE_STATFREE)
2745 zone->uz_frees++;
2746
2747 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2748 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2749 keg = zone_first_keg(zone); /* Must only be one. */
2750 if (zone->uz_flags & UMA_ZONE_HASH) {
2751 slab = hash_sfind(&keg->uk_hash, mem);
2752 } else {
2753 mem += keg->uk_pgoff;
2754 slab = (uma_slab_t)mem;
2755 }
2756 } else {
2757 /* This prevents redundant lookups via free(). */
2758 if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL)
2759 slab = (uma_slab_t)udata;
2760 else
2761 slab = vtoslab((vm_offset_t)item);
2762 keg = slab->us_keg;
2763 keg_relock(keg, zone);
2764 }
2765 MPASS(keg == slab->us_keg);
2766
2767 /* Do we need to remove from any lists? */
2768 if (slab->us_freecount+1 == keg->uk_ipers) {
2769 LIST_REMOVE(slab, us_link);
2770 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2771 } else if (slab->us_freecount == 0) {
2772 LIST_REMOVE(slab, us_link);
2773 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2774 }
2775
2776 /* Slab management. */
2777 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2778 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2779 slab->us_freecount++;
2780
2781 /* Keg statistics. */
2782 keg->uk_free++;
2783
2784 clearfull = 0;
2785 if (keg->uk_flags & UMA_ZFLAG_FULL) {
2786 if (keg->uk_pages < keg->uk_maxpages) {
2787 keg->uk_flags &= ~UMA_ZFLAG_FULL;
2788 clearfull = 1;
2789 }
2790
2791 /*
2792 * We can handle one more allocation. Since we're
2793 * clearing ZFLAG_FULL, wake up all procs blocked
2794 * on pages. This should be uncommon, so keeping this
2795 * simple for now (rather than adding count of blocked
2796 * threads etc).
2797 */
2798 wakeup(keg);
2799 }
2800 if (clearfull) {
2801 zone_relock(zone, keg);
2802 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2803 wakeup(zone);
2804 ZONE_UNLOCK(zone);
2805 } else
2806 KEG_UNLOCK(keg);
2807
2808}
2809
2810/* See uma.h */
2811int
2812uma_zone_set_max(uma_zone_t zone, int nitems)
2813{
2814 uma_keg_t keg;
2815
2816 ZONE_LOCK(zone);
2817 keg = zone_first_keg(zone);
2818 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2819 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2820 keg->uk_maxpages += keg->uk_ppera;
2821 nitems = keg->uk_maxpages * keg->uk_ipers;
2822 ZONE_UNLOCK(zone);
2823
2824 return (nitems);
2825}
2826
2827/* See uma.h */
2828int
2829uma_zone_get_max(uma_zone_t zone)
2830{
2831 int nitems;
2832 uma_keg_t keg;
2833
2834 ZONE_LOCK(zone);
2835 keg = zone_first_keg(zone);
2836 nitems = keg->uk_maxpages * keg->uk_ipers;
2837 ZONE_UNLOCK(zone);
2838
2839 return (nitems);
2840}
2841
2842/* See uma.h */
2843void

--- 31 unchanged lines hidden (view full) ---

2875/* See uma.h */
2876void
2877uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2878{
2879 uma_keg_t keg;
2880
2881 ZONE_LOCK(zone);
2882 keg = zone_first_keg(zone);
2883 KASSERT(keg->uk_pages == 0,
2884 ("uma_zone_set_init on non-empty keg"));
2885 keg->uk_init = uminit;
2886 ZONE_UNLOCK(zone);
2887}
2888
2889/* See uma.h */
2890void
2891uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2892{
2893 uma_keg_t keg;
2894
2895 ZONE_LOCK(zone);
2896 keg = zone_first_keg(zone);
2897 KASSERT(keg->uk_pages == 0,
2898 ("uma_zone_set_fini on non-empty keg"));
2899 keg->uk_fini = fini;
2900 ZONE_UNLOCK(zone);
2901}
2902
2903/* See uma.h */
2904void

--- 17 unchanged lines hidden (view full) ---

2922 ZONE_UNLOCK(zone);
2923}
2924
2925/* See uma.h */
2926/* XXX uk_freef is not actually used with the zone locked */
2927void
2928uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2929{
2930
2931 ZONE_LOCK(zone);
2932 zone_first_keg(zone)->uk_freef = freef;
2933 ZONE_UNLOCK(zone);
2934}
2935
2936/* See uma.h */
2937/* XXX uk_allocf is not actually used with the zone locked */
2938void
2939uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2940{

--- 10 unchanged lines hidden (view full) ---

2951int
2952uma_zone_reserve_kva(uma_zone_t zone, int count)
2953{
2954 uma_keg_t keg;
2955 vm_offset_t kva;
2956 int pages;
2957
2958 keg = zone_first_keg(zone);
2959 pages = count / keg->uk_ipers;
2960
2961 if (pages * keg->uk_ipers < count)
2962 pages++;
2963
2964#ifdef UMA_MD_SMALL_ALLOC
2965 if (keg->uk_ppera > 1) {
2966#else

--- 22 unchanged lines hidden (view full) ---

2989void
2990uma_prealloc(uma_zone_t zone, int items)
2991{
2992 int slabs;
2993 uma_slab_t slab;
2994 uma_keg_t keg;
2995
2996 keg = zone_first_keg(zone);
2997 ZONE_LOCK(zone);
2998 slabs = items / keg->uk_ipers;
2999 if (slabs * keg->uk_ipers < items)
3000 slabs++;
3001 while (slabs > 0) {
3002 slab = keg_alloc_slab(keg, zone, M_WAITOK);
3003 if (slab == NULL)
3004 break;

--- 73 unchanged lines hidden (view full) ---

3078 return (NULL);
3079 mem = page_alloc(NULL, size, &flags, wait);
3080 if (mem) {
3081 vsetslab((vm_offset_t)mem, slab);
3082 slab->us_data = mem;
3083 slab->us_flags = flags | UMA_SLAB_MALLOC;
3084 slab->us_size = size;
3085 } else {
3086 zone_free_item(slabzone, slab, NULL, SKIP_NONE,
3087 ZFREE_STATFAIL | ZFREE_STATFREE);
3088 }
3089
3090 return (mem);
3091}
3092
3093void
3094uma_large_free(uma_slab_t slab)
3095{
3096 vsetobj((vm_offset_t)slab->us_data, kmem_object);
3097 page_free(slab->us_data, slab->us_size, slab->us_flags);
3098 zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
3099}
3100
3101void
3102uma_print_stats(void)
3103{
3104 zone_foreach(uma_print_zone);
3105}
3106

--- 258 unchanged lines hidden ---