Deleted Added
sdiff udiff text old ( 251983 ) new ( 252040 )
full compact
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 34 unchanged lines hidden (view full) ---

43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 252040 2013-06-20 19:08:12Z jeff $");
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59

--- 181 unchanged lines hidden (view full) ---

241static int hash_expand(struct uma_hash *, struct uma_hash *);
242static void hash_free(struct uma_hash *hash);
243static void uma_timeout(void *);
244static void uma_startup3(void);
245static void *zone_alloc_item(uma_zone_t, void *, int);
246static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
247static void bucket_enable(void);
248static void bucket_init(void);
249static uma_bucket_t bucket_alloc(uma_zone_t zone, int);
250static void bucket_free(uma_zone_t zone, uma_bucket_t);
251static void bucket_zone_drain(void);
252static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, int flags);
253static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
254static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
255static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
256static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
257static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
258 uma_fini fini, int align, uint32_t flags);
259static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
260static void zone_release(uma_zone_t zone, void **bucket, int cnt);
261
262void uma_print_zone(uma_zone_t);
263void uma_print_stats(void);
264static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
265static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
266

--- 78 unchanged lines hidden (view full) ---

345 for (; ubz->ubz_entries != 0; ubz++)
346 if (ubz->ubz_maxsize < size)
347 break;
348 ubz--;
349 return (ubz->ubz_entries);
350}
351
352static uma_bucket_t
353bucket_alloc(uma_zone_t zone, int flags)
354{
355 struct uma_bucket_zone *ubz;
356 uma_bucket_t bucket;
357
358 /*
359 * This is to stop us from allocating per cpu buckets while we're
360 * running out of vm.boot_pages. Otherwise, we would exhaust the
361 * boot pages. This also prevents us from allocating buckets in
362 * low memory situations.
363 */
364 if (bucketdisable)
365 return (NULL);
366
367 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
368 flags |= M_NOVM;
369 ubz = bucket_zone_lookup(zone->uz_count);
370 bucket = uma_zalloc(ubz->ubz_zone, flags);
371 if (bucket) {
372#ifdef INVARIANTS
373 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
374#endif
375 bucket->ub_cnt = 0;
376 bucket->ub_entries = ubz->ubz_entries;
377 }
378
379 return (bucket);
380}
381
382static void
383bucket_free(uma_zone_t zone, uma_bucket_t bucket)
384{
385 struct uma_bucket_zone *ubz;
386
387 KASSERT(bucket->ub_cnt == 0,
388 ("bucket_free: Freeing a non free bucket."));
389 ubz = bucket_zone_lookup(bucket->ub_entries);
390 uma_zfree(ubz->ubz_zone, bucket);
391}

--- 265 unchanged lines hidden (view full) ---

657 * it is used elsewhere. Should the tear-down path be made special
658 * there in some form?
659 */
660 CPU_FOREACH(cpu) {
661 cache = &zone->uz_cpu[cpu];
662 bucket_drain(zone, cache->uc_allocbucket);
663 bucket_drain(zone, cache->uc_freebucket);
664 if (cache->uc_allocbucket != NULL)
665 bucket_free(zone, cache->uc_allocbucket);
666 if (cache->uc_freebucket != NULL)
667 bucket_free(zone, cache->uc_freebucket);
668 cache->uc_allocbucket = cache->uc_freebucket = NULL;
669 }
670 ZONE_LOCK(zone);
671 bucket_cache_drain(zone);
672 ZONE_UNLOCK(zone);
673}
674
675/*

--- 7 unchanged lines hidden (view full) ---

683 /*
684 * Drain the bucket queues and free the buckets, we just keep two per
685 * cpu (alloc/free).
686 */
687 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
688 LIST_REMOVE(bucket, ub_link);
689 ZONE_UNLOCK(zone);
690 bucket_drain(zone, bucket);
691 bucket_free(zone, bucket);
692 ZONE_LOCK(zone);
693 }
694}
695
696static void
697keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
698{
699 uint8_t *mem;

--- 96 unchanged lines hidden (view full) ---

796 * is the only call that knows the structure will still be available
797 * when it wakes up.
798 */
799 ZONE_LOCK(zone);
800 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
801 if (waitok == M_NOWAIT)
802 goto out;
803 mtx_unlock(&uma_mtx);
804 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
805 mtx_lock(&uma_mtx);
806 }
807 zone->uz_flags |= UMA_ZFLAG_DRAINING;
808 bucket_cache_drain(zone);
809 ZONE_UNLOCK(zone);
810 /*
811 * The DRAINING flag protects us from being freed while
812 * we're running. Normally the uma_mtx would protect us but we

--- 557 unchanged lines hidden (view full) ---

1370 if (booted < UMA_STARTUP2)
1371 keg->uk_allocf = startup_alloc;
1372#endif
1373 } else if (booted < UMA_STARTUP2 &&
1374 (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1375 keg->uk_allocf = startup_alloc;
1376
1377 /*
1378 * Initialize keg's lock
1379 */
1380 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1381
1382 /*
1383 * If we're putting the slab header in the actual page we need to
1384 * figure out where in each page it goes. This calculates a right
1385 * justified offset into the memory on an ALIGN_PTR boundary.
1386 */
1387 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1388 u_int totsize;

--- 72 unchanged lines hidden (view full) ---

1461 zone->uz_fails = 0;
1462 zone->uz_sleeps = 0;
1463 zone->uz_count = 0;
1464 zone->uz_flags = 0;
1465 zone->uz_warning = NULL;
1466 timevalclear(&zone->uz_ratecheck);
1467 keg = arg->keg;
1468
1469 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1470
1471 /*
1472 * This is a pure cache zone, no kegs.
1473 */
1474 if (arg->import) {
1475 zone->uz_size = arg->size;
1476 zone->uz_import = arg->import;
1477 zone->uz_release = arg->release;
1478 zone->uz_arg = arg->arg;
1479 zone->uz_lockptr = &zone->uz_lock;
1480 goto out;
1481 }
1482
1483 /*
1484 * Use the regular zone/keg/slab allocator.
1485 */
1486 zone->uz_import = (uma_import)zone_import;
1487 zone->uz_release = (uma_release)zone_release;
1488 zone->uz_arg = zone;
1489
1490 if (arg->flags & UMA_ZONE_SECONDARY) {
1491 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1492 zone->uz_init = arg->uminit;
1493 zone->uz_fini = arg->fini;
1494 zone->uz_lockptr = &keg->uk_lock;
1495 zone->uz_flags |= UMA_ZONE_SECONDARY;
1496 mtx_lock(&uma_mtx);
1497 ZONE_LOCK(zone);
1498 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1499 if (LIST_NEXT(z, uz_link) == NULL) {
1500 LIST_INSERT_AFTER(z, zone, uz_link);
1501 break;
1502 }

--- 21 unchanged lines hidden (view full) ---

1524 return (error);
1525 }
1526
1527 /*
1528 * Link in the first keg.
1529 */
1530 zone->uz_klink.kl_keg = keg;
1531 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1532 zone->uz_lockptr = &keg->uk_lock;
1533 zone->uz_size = keg->uk_size;
1534 zone->uz_flags |= (keg->uk_flags &
1535 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1536
1537 /*
1538 * Some internal zones don't have room allocated for the per cpu
1539 * caches. If we're internal, bail out here.
1540 */
1541 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1542 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1543 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1544 return (0);
1545 }
1546
1547out:
1548 if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1549 zone->uz_count = bucket_select(zone->uz_size);
1550 else
1551 zone->uz_count = BUCKET_MAX;
1552
1553 return (0);
1554}
1555
1556/*
1557 * Keg header dtor. This frees all data, destroys locks, frees the hash

--- 64 unchanged lines hidden (view full) ---

1622 * We only destroy kegs from non secondary zones.
1623 */
1624 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1625 mtx_lock(&uma_mtx);
1626 LIST_REMOVE(keg, uk_link);
1627 mtx_unlock(&uma_mtx);
1628 zone_free_item(kegs, keg, NULL, SKIP_NONE);
1629 }
1630 ZONE_LOCK_FINI(zone);
1631}
1632
1633/*
1634 * Traverses every zone in the system and calls a callback
1635 *
1636 * Arguments:
1637 * zfunc A pointer to a function which accepts a zone
1638 * as an argument.

--- 207 unchanged lines hidden (view full) ---

1846 args.keg = keg;
1847
1848 /* XXX Attaches only one keg of potentially many. */
1849 return (zone_alloc_item(zones, &args, M_WAITOK));
1850}
1851
1852/* See uma.h */
1853uma_zone_t
1854uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1855 uma_init zinit, uma_fini zfini, uma_import zimport,
1856 uma_release zrelease, void *arg, int flags)
1857{
1858 struct uma_zctor_args args;
1859
1860 memset(&args, 0, sizeof(args));
1861 args.name = name;
1862 args.size = size;
1863 args.ctor = ctor;
1864 args.dtor = dtor;
1865 args.uminit = zinit;
1866 args.fini = zfini;
1867 args.import = zimport;
1868 args.release = zrelease;
1869 args.arg = arg;
1870 args.align = 0;
1871 args.flags = flags;
1872
1873 return (zone_alloc_item(zones, &args, M_WAITOK));
1874}
1875
1876static void
1877zone_lock_pair(uma_zone_t a, uma_zone_t b)
1878{
1879 if (a < b) {
1880 ZONE_LOCK(a);
1881 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
1882 } else {
1883 ZONE_LOCK(b);
1884 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
1885 }
1886}
1887
1888static void
1889zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1890{
1891
1892 ZONE_UNLOCK(a);

--- 175 unchanged lines hidden (view full) ---

2068
2069 /*
2070 * Discard any empty allocation bucket while we hold no locks.
2071 */
2072 bucket = cache->uc_allocbucket;
2073 cache->uc_allocbucket = NULL;
2074 critical_exit();
2075 if (bucket != NULL)
2076 bucket_free(zone, bucket);
2077
2078 /* Short-circuit for zones without buckets and low memory. */
2079 if (zone->uz_count == 0 || bucketdisable)
2080 goto zalloc_item;
2081
2082 /*
2083 * Attempt to retrieve the item from the per-CPU cache has failed, so
2084 * we must go back to the zone. This requires the zone lock, so we

--- 153 unchanged lines hidden (view full) ---

2238 * could have while we were unlocked. Check again before we
2239 * fail.
2240 */
2241 flags |= M_NOVM;
2242 }
2243 return (slab);
2244}
2245
2246static uma_slab_t
2247zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2248{
2249 uma_slab_t slab;
2250
2251 if (keg == NULL) {
2252 keg = zone_first_keg(zone);
2253 KEG_LOCK(keg);
2254 }
2255
2256 for (;;) {
2257 slab = keg_fetch_slab(keg, zone, flags);
2258 if (slab)
2259 return (slab);
2260 if (flags & (M_NOWAIT | M_NOVM))
2261 break;
2262 }
2263 KEG_UNLOCK(keg);
2264 return (NULL);
2265}
2266
2267/*
2268 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2269 * with the keg locked. On NULL no lock is held.
2270 *
2271 * The last pointer is used to seed the search. It is not required.
2272 */
2273static uma_slab_t
2274zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2275{
2276 uma_klink_t klink;
2277 uma_slab_t slab;

--- 7 unchanged lines hidden (view full) ---

2285 * as well. We don't want to block if we can find a provider
2286 * without blocking.
2287 */
2288 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2289 /*
2290 * Use the last slab allocated as a hint for where to start
2291 * the search.
2292 */
2293 if (last != NULL) {
2294 slab = keg_fetch_slab(last, zone, flags);
2295 if (slab)
2296 return (slab);
2297 KEG_UNLOCK(last);
2298 }
2299 /*
2300 * Loop until we have a slab incase of transient failures
2301 * while M_WAITOK is specified. I'm not sure this is 100%
2302 * required but we've done it for so long now.
2303 */
2304 for (;;) {
2305 empty = 0;
2306 full = 0;
2307 /*
2308 * Search the available kegs for slabs. Be careful to hold the
2309 * correct lock while calling into the keg layer.
2310 */
2311 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2312 keg = klink->kl_keg;
2313 KEG_LOCK(keg);
2314 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2315 slab = keg_fetch_slab(keg, zone, flags);
2316 if (slab)
2317 return (slab);
2318 }
2319 if (keg->uk_flags & UMA_ZFLAG_FULL)
2320 full++;
2321 else
2322 empty++;
2323 KEG_UNLOCK(keg);
2324 }
2325 if (rflags & (M_NOWAIT | M_NOVM))
2326 break;
2327 flags = rflags;
2328 /*
2329 * All kegs are full. XXX We can't atomically check all kegs
2330 * and sleep so just sleep for a short period and retry.
2331 */
2332 if (full && !empty) {
2333 ZONE_LOCK(zone);
2334 zone->uz_flags |= UMA_ZFLAG_FULL;
2335 zone->uz_sleeps++;
2336 zone_log_warning(zone);
2337 msleep(zone, zone->uz_lockptr, PVM,
2338 "zonelimit", hz/100);
2339 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2340 ZONE_UNLOCK(zone);
2341 continue;
2342 }
2343 }
2344 return (NULL);
2345}
2346
2347static void *
2348slab_alloc_item(uma_keg_t keg, uma_slab_t slab)

--- 21 unchanged lines hidden (view full) ---

2370
2371static int
2372zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2373{
2374 uma_slab_t slab;
2375 uma_keg_t keg;
2376 int i;
2377
2378 slab = NULL;
2379 keg = NULL;
2380 /* Try to keep the buckets totally full */
2381 for (i = 0; i < max; ) {
2382 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2383 break;
2384 keg = slab->us_keg;
2385 while (slab->us_freecount && i < max)
2386 bucket[i++] = slab_alloc_item(keg, slab);
2387
2388 /* Don't block on the next fill */
2389 flags &= ~M_WAITOK;
2390 flags |= M_NOWAIT;
2391 }
2392 if (slab != NULL)
2393 KEG_UNLOCK(keg);
2394
2395 return i;
2396}
2397
2398static uma_bucket_t
2399zone_alloc_bucket(uma_zone_t zone, int flags)
2400{
2401 uma_bucket_t bucket;
2402 int max;
2403
2404 bucket = bucket_alloc(zone, M_NOWAIT | (flags & M_NOVM));
2405 if (bucket == NULL)
2406 goto out;
2407
2408 max = MIN(bucket->ub_entries, zone->uz_count);
2409 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2410 max, flags);
2411
2412 /*
2413 * Initialize the memory if necessary.
2414 */
2415 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2416 int i;
2417
2418 for (i = 0; i < bucket->ub_cnt; i++)
2419 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2420 flags) != 0)
2421 break;
2422 /*
2423 * If we couldn't initialize the whole bucket, put the
2424 * rest back onto the freelist.
2425 */
2426 if (i != bucket->ub_cnt) {
2427 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2428 bucket->ub_cnt - i);
2429#ifdef INVARIANTS
2430 bzero(&bucket->ub_bucket[i],
2431 sizeof(void *) * (bucket->ub_cnt - i));
2432#endif
2433 bucket->ub_cnt = i;
2434 }
2435 }
2436
2437out:
2438 if (bucket == NULL || bucket->ub_cnt == 0) {
2439 if (bucket != NULL)
2440 bucket_free(zone, bucket);
2441 atomic_add_long(&zone->uz_fails, 1);
2442 return (NULL);
2443 }
2444
2445 return (bucket);
2446}
2447
2448/*

--- 55 unchanged lines hidden (view full) ---

2504}
2505
2506/* See uma.h */
2507void
2508uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2509{
2510 uma_cache_t cache;
2511 uma_bucket_t bucket;
2512 int cpu;
2513
2514#ifdef UMA_DEBUG_ALLOC_1
2515 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2516#endif
2517 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2518 zone->uz_name);
2519

--- 108 unchanged lines hidden (view full) ---

2628 critical_exit();
2629
2630 /* And the zone.. */
2631 ZONE_UNLOCK(zone);
2632
2633#ifdef UMA_DEBUG_ALLOC
2634 printf("uma_zfree: Allocating new free bucket.\n");
2635#endif
2636 bucket = bucket_alloc(zone, M_NOWAIT);
2637 if (bucket) {
2638 critical_enter();
2639 cpu = curcpu;
2640 cache = &zone->uz_cpu[cpu];
2641 if (cache->uc_freebucket == NULL) {
2642 cache->uc_freebucket = bucket;
2643 goto zfree_start;
2644 }
2645 /*
2646 * We lost the race, start over. We have to drop our
2647 * critical section to free the bucket.
2648 */
2649 critical_exit();
2650 bucket_free(zone, bucket);
2651 goto zfree_restart;
2652 }
2653
2654 /*
2655 * If nothing else caught this, we'll just do an internal free.
2656 */
2657zfree_item:
2658 zone_free_item(zone, item, udata, SKIP_DTOR);

--- 33 unchanged lines hidden (view full) ---

2692 void *item;
2693 uma_slab_t slab;
2694 uma_keg_t keg;
2695 uint8_t *mem;
2696 int clearfull;
2697 int i;
2698
2699 clearfull = 0;
2700 keg = zone_first_keg(zone);
2701 KEG_LOCK(keg);
2702 for (i = 0; i < cnt; i++) {
2703 item = bucket[i];
2704 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2705 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2706 if (zone->uz_flags & UMA_ZONE_HASH) {
2707 slab = hash_sfind(&keg->uk_hash, mem);
2708 } else {
2709 mem += keg->uk_pgoff;

--- 19 unchanged lines hidden (view full) ---

2729 * clearing ZFLAG_FULL, wake up all procs blocked
2730 * on pages. This should be uncommon, so keeping this
2731 * simple for now (rather than adding count of blocked
2732 * threads etc).
2733 */
2734 wakeup(keg);
2735 }
2736 }
2737 KEG_UNLOCK(keg);
2738 if (clearfull) {
2739 ZONE_LOCK(zone);
2740 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2741 wakeup(zone);
2742 ZONE_UNLOCK(zone);
2743 }
2744
2745}
2746
2747/*
2748 * Frees a single item to any zone.
2749 *
2750 * Arguments:
2751 * zone The zone to free to

--- 27 unchanged lines hidden (view full) ---

2779int
2780uma_zone_set_max(uma_zone_t zone, int nitems)
2781{
2782 uma_keg_t keg;
2783
2784 keg = zone_first_keg(zone);
2785 if (keg == NULL)
2786 return (0);
2787 KEG_LOCK(keg);
2788 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2789 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2790 keg->uk_maxpages += keg->uk_ppera;
2791 nitems = keg->uk_maxpages * keg->uk_ipers;
2792 KEG_UNLOCK(keg);
2793
2794 return (nitems);
2795}
2796
2797/* See uma.h */
2798int
2799uma_zone_get_max(uma_zone_t zone)
2800{
2801 int nitems;
2802 uma_keg_t keg;
2803
2804 keg = zone_first_keg(zone);
2805 if (keg == NULL)
2806 return (0);
2807 KEG_LOCK(keg);
2808 nitems = keg->uk_maxpages * keg->uk_ipers;
2809 KEG_UNLOCK(keg);
2810
2811 return (nitems);
2812}
2813
2814/* See uma.h */
2815void
2816uma_zone_set_warning(uma_zone_t zone, const char *warning)
2817{

--- 27 unchanged lines hidden (view full) ---

2845}
2846
2847/* See uma.h */
2848void
2849uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2850{
2851 uma_keg_t keg;
2852
2853 keg = zone_first_keg(zone);
2854 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2855 KEG_LOCK(keg);
2856 KASSERT(keg->uk_pages == 0,
2857 ("uma_zone_set_init on non-empty keg"));
2858 keg->uk_init = uminit;
2859 KEG_UNLOCK(keg);
2860}
2861
2862/* See uma.h */
2863void
2864uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2865{
2866 uma_keg_t keg;
2867
2868 keg = zone_first_keg(zone);
2869 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2870 KEG_LOCK(keg);
2871 KASSERT(keg->uk_pages == 0,
2872 ("uma_zone_set_fini on non-empty keg"));
2873 keg->uk_fini = fini;
2874 KEG_UNLOCK(keg);
2875}
2876
2877/* See uma.h */
2878void
2879uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2880{
2881
2882 ZONE_LOCK(zone);
2883 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2884 ("uma_zone_set_zinit on non-empty keg"));
2885 zone->uz_init = zinit;
2886 ZONE_UNLOCK(zone);
2887}
2888
2889/* See uma.h */
2890void
2891uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2892{
2893
2894 ZONE_LOCK(zone);
2895 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2896 ("uma_zone_set_zfini on non-empty keg"));
2897 zone->uz_fini = zfini;
2898 ZONE_UNLOCK(zone);
2899}
2900
2901/* See uma.h */
2902/* XXX uk_freef is not actually used with the zone locked */
2903void
2904uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2905{
2906 uma_keg_t keg;
2907
2908 keg = zone_first_keg(zone);
2909 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2910 KEG_LOCK(keg);
2911 keg->uk_freef = freef;
2912 KEG_UNLOCK(keg);
2913}
2914
2915/* See uma.h */
2916/* XXX uk_allocf is not actually used with the zone locked */
2917void
2918uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2919{
2920 uma_keg_t keg;
2921
2922 keg = zone_first_keg(zone);
2923 KEG_LOCK(keg);
2924 keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2925 keg->uk_allocf = allocf;
2926 KEG_UNLOCK(keg);
2927}
2928
2929/* See uma.h */
2930int
2931uma_zone_reserve_kva(uma_zone_t zone, int count)
2932{
2933 uma_keg_t keg;
2934 vm_offset_t kva;

--- 12 unchanged lines hidden (view full) ---

2947#else
2948 if (1) {
2949#endif
2950 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2951 if (kva == 0)
2952 return (0);
2953 } else
2954 kva = 0;
2955 KEG_LOCK(keg);
2956 keg->uk_kva = kva;
2957 keg->uk_offset = 0;
2958 keg->uk_maxpages = pages;
2959#ifdef UMA_MD_SMALL_ALLOC
2960 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
2961#else
2962 keg->uk_allocf = noobj_alloc;
2963#endif
2964 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2965 KEG_UNLOCK(keg);
2966
2967 return (1);
2968}
2969
2970/* See uma.h */
2971void
2972uma_prealloc(uma_zone_t zone, int items)
2973{
2974 int slabs;
2975 uma_slab_t slab;
2976 uma_keg_t keg;
2977
2978 keg = zone_first_keg(zone);
2979 if (keg == NULL)
2980 return;
2981 KEG_LOCK(keg);
2982 slabs = items / keg->uk_ipers;
2983 if (slabs * keg->uk_ipers < items)
2984 slabs++;
2985 while (slabs > 0) {
2986 slab = keg_alloc_slab(keg, zone, M_WAITOK);
2987 if (slab == NULL)
2988 break;
2989 MPASS(slab->us_keg == keg);
2990 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2991 slabs--;
2992 }
2993 KEG_UNLOCK(keg);
2994}
2995
2996/* See uma.h */
2997uint32_t *
2998uma_find_refcnt(uma_zone_t zone, void *item)
2999{
3000 uma_slabrefcnt_t slabref;
3001 uma_slab_t slab;

--- 346 unchanged lines hidden ---