Deleted Added
sdiff udiff text old ( 251983 ) new ( 252040 )
full compact
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 34 unchanged lines hidden (view full) ---

43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 251983 2013-06-19 02:30:32Z jeff $");
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59

--- 181 unchanged lines hidden (view full) ---

241static int hash_expand(struct uma_hash *, struct uma_hash *);
242static void hash_free(struct uma_hash *hash);
243static void uma_timeout(void *);
244static void uma_startup3(void);
245static void *zone_alloc_item(uma_zone_t, void *, int);
246static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
247static void bucket_enable(void);
248static void bucket_init(void);
249static uma_bucket_t bucket_alloc(int, int);
250static void bucket_free(uma_bucket_t);
251static void bucket_zone_drain(void);
252static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, int flags);
253static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
254static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
255static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
256static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
257static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
258 uma_fini fini, int align, uint32_t flags);
259static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
260static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
261static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
262static void zone_release(uma_zone_t zone, void **bucket, int cnt);
263
264void uma_print_zone(uma_zone_t);
265void uma_print_stats(void);
266static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
267static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
268

--- 78 unchanged lines hidden (view full) ---

347 for (; ubz->ubz_entries != 0; ubz++)
348 if (ubz->ubz_maxsize < size)
349 break;
350 ubz--;
351 return (ubz->ubz_entries);
352}
353
354static uma_bucket_t
355bucket_alloc(int entries, int bflags)
356{
357 struct uma_bucket_zone *ubz;
358 uma_bucket_t bucket;
359
360 /*
361 * This is to stop us from allocating per cpu buckets while we're
362 * running out of vm.boot_pages. Otherwise, we would exhaust the
363 * boot pages. This also prevents us from allocating buckets in
364 * low memory situations.
365 */
366 if (bucketdisable)
367 return (NULL);
368
369 ubz = bucket_zone_lookup(entries);
370 bucket = uma_zalloc(ubz->ubz_zone, bflags);
371 if (bucket) {
372#ifdef INVARIANTS
373 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
374#endif
375 bucket->ub_cnt = 0;
376 bucket->ub_entries = ubz->ubz_entries;
377 }
378
379 return (bucket);
380}
381
382static void
383bucket_free(uma_bucket_t bucket)
384{
385 struct uma_bucket_zone *ubz;
386
387 KASSERT(bucket->ub_cnt == 0,
388 ("bucket_free: Freeing a non free bucket."));
389 ubz = bucket_zone_lookup(bucket->ub_entries);
390 uma_zfree(ubz->ubz_zone, bucket);
391}

--- 265 unchanged lines hidden (view full) ---

657 * it is used elsewhere. Should the tear-down path be made special
658 * there in some form?
659 */
660 CPU_FOREACH(cpu) {
661 cache = &zone->uz_cpu[cpu];
662 bucket_drain(zone, cache->uc_allocbucket);
663 bucket_drain(zone, cache->uc_freebucket);
664 if (cache->uc_allocbucket != NULL)
665 bucket_free(cache->uc_allocbucket);
666 if (cache->uc_freebucket != NULL)
667 bucket_free(cache->uc_freebucket);
668 cache->uc_allocbucket = cache->uc_freebucket = NULL;
669 }
670 ZONE_LOCK(zone);
671 bucket_cache_drain(zone);
672 ZONE_UNLOCK(zone);
673}
674
675/*

--- 7 unchanged lines hidden (view full) ---

683 /*
684 * Drain the bucket queues and free the buckets, we just keep two per
685 * cpu (alloc/free).
686 */
687 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
688 LIST_REMOVE(bucket, ub_link);
689 ZONE_UNLOCK(zone);
690 bucket_drain(zone, bucket);
691 bucket_free(bucket);
692 ZONE_LOCK(zone);
693 }
694}
695
696static void
697keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
698{
699 uint8_t *mem;

--- 96 unchanged lines hidden (view full) ---

796 * is the only call that knows the structure will still be available
797 * when it wakes up.
798 */
799 ZONE_LOCK(zone);
800 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
801 if (waitok == M_NOWAIT)
802 goto out;
803 mtx_unlock(&uma_mtx);
804 msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
805 mtx_lock(&uma_mtx);
806 }
807 zone->uz_flags |= UMA_ZFLAG_DRAINING;
808 bucket_cache_drain(zone);
809 ZONE_UNLOCK(zone);
810 /*
811 * The DRAINING flag protects us from being freed while
812 * we're running. Normally the uma_mtx would protect us but we

--- 557 unchanged lines hidden (view full) ---

1370 if (booted < UMA_STARTUP2)
1371 keg->uk_allocf = startup_alloc;
1372#endif
1373 } else if (booted < UMA_STARTUP2 &&
1374 (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1375 keg->uk_allocf = startup_alloc;
1376
1377 /*
1378 * Initialize keg's lock (shared among zones).
1379 */
1380 if (arg->flags & UMA_ZONE_MTXCLASS)
1381 KEG_LOCK_INIT(keg, 1);
1382 else
1383 KEG_LOCK_INIT(keg, 0);
1384
1385 /*
1386 * If we're putting the slab header in the actual page we need to
1387 * figure out where in each page it goes. This calculates a right
1388 * justified offset into the memory on an ALIGN_PTR boundary.
1389 */
1390 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1391 u_int totsize;

--- 72 unchanged lines hidden (view full) ---

1464 zone->uz_fails = 0;
1465 zone->uz_sleeps = 0;
1466 zone->uz_count = 0;
1467 zone->uz_flags = 0;
1468 zone->uz_warning = NULL;
1469 timevalclear(&zone->uz_ratecheck);
1470 keg = arg->keg;
1471
1472 /*
1473 * This is a pure cache zone, no kegs.
1474 */
1475 if (arg->import) {
1476 zone->uz_import = arg->import;
1477 zone->uz_release = arg->release;
1478 zone->uz_arg = arg->arg;
1479 zone->uz_count = BUCKET_MAX;
1480 return (0);
1481 }
1482
1483 /*
1484 * Use the regular zone/keg/slab allocator.
1485 */
1486 zone->uz_import = (uma_import)zone_import;
1487 zone->uz_release = (uma_release)zone_release;
1488 zone->uz_arg = zone;
1489
1490 if (arg->flags & UMA_ZONE_SECONDARY) {
1491 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1492 zone->uz_init = arg->uminit;
1493 zone->uz_fini = arg->fini;
1494 zone->uz_lock = &keg->uk_lock;
1495 zone->uz_flags |= UMA_ZONE_SECONDARY;
1496 mtx_lock(&uma_mtx);
1497 ZONE_LOCK(zone);
1498 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1499 if (LIST_NEXT(z, uz_link) == NULL) {
1500 LIST_INSERT_AFTER(z, zone, uz_link);
1501 break;
1502 }

--- 21 unchanged lines hidden (view full) ---

1524 return (error);
1525 }
1526
1527 /*
1528 * Link in the first keg.
1529 */
1530 zone->uz_klink.kl_keg = keg;
1531 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1532 zone->uz_lock = &keg->uk_lock;
1533 zone->uz_size = keg->uk_size;
1534 zone->uz_flags |= (keg->uk_flags &
1535 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1536
1537 /*
1538 * Some internal zones don't have room allocated for the per cpu
1539 * caches. If we're internal, bail out here.
1540 */
1541 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1542 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1543 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1544 return (0);
1545 }
1546
1547 if ((keg->uk_flags & UMA_ZONE_MAXBUCKET) == 0)
1548 zone->uz_count = bucket_select(keg->uk_rsize);
1549 else
1550 zone->uz_count = BUCKET_MAX;
1551
1552 return (0);
1553}
1554
1555/*
1556 * Keg header dtor. This frees all data, destroys locks, frees the hash

--- 64 unchanged lines hidden (view full) ---

1621 * We only destroy kegs from non secondary zones.
1622 */
1623 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1624 mtx_lock(&uma_mtx);
1625 LIST_REMOVE(keg, uk_link);
1626 mtx_unlock(&uma_mtx);
1627 zone_free_item(kegs, keg, NULL, SKIP_NONE);
1628 }
1629}
1630
1631/*
1632 * Traverses every zone in the system and calls a callback
1633 *
1634 * Arguments:
1635 * zfunc A pointer to a function which accepts a zone
1636 * as an argument.

--- 207 unchanged lines hidden (view full) ---

1844 args.keg = keg;
1845
1846 /* XXX Attaches only one keg of potentially many. */
1847 return (zone_alloc_item(zones, &args, M_WAITOK));
1848}
1849
1850/* See uma.h */
1851uma_zone_t
1852uma_zcache_create(char *name, uma_ctor ctor, uma_dtor dtor, uma_init zinit,
1853 uma_fini zfini, uma_import zimport, uma_release zrelease,
1854 void *arg, int flags)
1855{
1856 struct uma_zctor_args args;
1857
1858 memset(&args, 0, sizeof(args));
1859 args.name = name;
1860 args.size = 0;
1861 args.ctor = ctor;
1862 args.dtor = dtor;
1863 args.uminit = zinit;
1864 args.fini = zfini;
1865 args.import = zimport;
1866 args.release = zrelease;
1867 args.arg = arg;
1868 args.align = 0;
1869 args.flags = flags;
1870
1871 return (zone_alloc_item(zones, &args, M_WAITOK));
1872}
1873
1874static void
1875zone_lock_pair(uma_zone_t a, uma_zone_t b)
1876{
1877 if (a < b) {
1878 ZONE_LOCK(a);
1879 mtx_lock_flags(b->uz_lock, MTX_DUPOK);
1880 } else {
1881 ZONE_LOCK(b);
1882 mtx_lock_flags(a->uz_lock, MTX_DUPOK);
1883 }
1884}
1885
1886static void
1887zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1888{
1889
1890 ZONE_UNLOCK(a);

--- 175 unchanged lines hidden (view full) ---

2066
2067 /*
2068 * Discard any empty allocation bucket while we hold no locks.
2069 */
2070 bucket = cache->uc_allocbucket;
2071 cache->uc_allocbucket = NULL;
2072 critical_exit();
2073 if (bucket != NULL)
2074 bucket_free(bucket);
2075
2076 /* Short-circuit for zones without buckets and low memory. */
2077 if (zone->uz_count == 0 || bucketdisable)
2078 goto zalloc_item;
2079
2080 /*
2081 * Attempt to retrieve the item from the per-CPU cache has failed, so
2082 * we must go back to the zone. This requires the zone lock, so we

--- 153 unchanged lines hidden (view full) ---

2236 * could have while we were unlocked. Check again before we
2237 * fail.
2238 */
2239 flags |= M_NOVM;
2240 }
2241 return (slab);
2242}
2243
2244static inline void
2245zone_relock(uma_zone_t zone, uma_keg_t keg)
2246{
2247 if (zone->uz_lock != &keg->uk_lock) {
2248 KEG_UNLOCK(keg);
2249 ZONE_LOCK(zone);
2250 }
2251}
2252
2253static inline void
2254keg_relock(uma_keg_t keg, uma_zone_t zone)
2255{
2256 if (zone->uz_lock != &keg->uk_lock) {
2257 ZONE_UNLOCK(zone);
2258 KEG_LOCK(keg);
2259 }
2260}
2261
2262static uma_slab_t
2263zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2264{
2265 uma_slab_t slab;
2266
2267 if (keg == NULL)
2268 keg = zone_first_keg(zone);
2269
2270 for (;;) {
2271 slab = keg_fetch_slab(keg, zone, flags);
2272 if (slab)
2273 return (slab);
2274 if (flags & (M_NOWAIT | M_NOVM))
2275 break;
2276 }
2277 return (NULL);
2278}
2279
2280/*
2281 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2282 * with the keg locked. Caller must call zone_relock() afterwards if the
2283 * zone lock is required. On NULL the zone lock is held.
2284 *
2285 * The last pointer is used to seed the search. It is not required.
2286 */
2287static uma_slab_t
2288zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2289{
2290 uma_klink_t klink;
2291 uma_slab_t slab;

--- 7 unchanged lines hidden (view full) ---

2299 * as well. We don't want to block if we can find a provider
2300 * without blocking.
2301 */
2302 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2303 /*
2304 * Use the last slab allocated as a hint for where to start
2305 * the search.
2306 */
2307 if (last) {
2308 slab = keg_fetch_slab(last, zone, flags);
2309 if (slab)
2310 return (slab);
2311 zone_relock(zone, last);
2312 last = NULL;
2313 }
2314 /*
2315 * Loop until we have a slab incase of transient failures
2316 * while M_WAITOK is specified. I'm not sure this is 100%
2317 * required but we've done it for so long now.
2318 */
2319 for (;;) {
2320 empty = 0;
2321 full = 0;
2322 /*
2323 * Search the available kegs for slabs. Be careful to hold the
2324 * correct lock while calling into the keg layer.
2325 */
2326 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2327 keg = klink->kl_keg;
2328 keg_relock(keg, zone);
2329 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2330 slab = keg_fetch_slab(keg, zone, flags);
2331 if (slab)
2332 return (slab);
2333 }
2334 if (keg->uk_flags & UMA_ZFLAG_FULL)
2335 full++;
2336 else
2337 empty++;
2338 zone_relock(zone, keg);
2339 }
2340 if (rflags & (M_NOWAIT | M_NOVM))
2341 break;
2342 flags = rflags;
2343 /*
2344 * All kegs are full. XXX We can't atomically check all kegs
2345 * and sleep so just sleep for a short period and retry.
2346 */
2347 if (full && !empty) {
2348 zone->uz_flags |= UMA_ZFLAG_FULL;
2349 zone->uz_sleeps++;
2350 zone_log_warning(zone);
2351 msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
2352 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2353 continue;
2354 }
2355 }
2356 return (NULL);
2357}
2358
2359static void *
2360slab_alloc_item(uma_keg_t keg, uma_slab_t slab)

--- 21 unchanged lines hidden (view full) ---

2382
2383static int
2384zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2385{
2386 uma_slab_t slab;
2387 uma_keg_t keg;
2388 int i;
2389
2390 ZONE_LOCK(zone);
2391 /* Try to keep the buckets totally full */
2392 slab = NULL;
2393 keg = NULL;
2394 for (i = 0; i < max; ) {
2395 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2396 break;
2397 keg = slab->us_keg;
2398 while (slab->us_freecount && i < max)
2399 bucket[i++] = slab_alloc_item(keg, slab);
2400
2401 /* Don't block on the next fill */
2402 flags &= ~M_WAITOK;
2403 flags |= M_NOWAIT;
2404 }
2405 if (slab != NULL)
2406 KEG_UNLOCK(keg);
2407 else
2408 ZONE_UNLOCK(zone);
2409
2410 return i;
2411}
2412
2413static uma_bucket_t
2414zone_alloc_bucket(uma_zone_t zone, int flags)
2415{
2416 uma_bucket_t bucket;
2417 int bflags;
2418 int max;
2419
2420 max = zone->uz_count;
2421 bflags = (flags & ~M_WAITOK) | M_NOWAIT;
2422 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2423 bflags |= M_NOVM;
2424 bucket = bucket_alloc(zone->uz_count, bflags);
2425 if (bucket == NULL)
2426 goto out;
2427
2428 max = MIN(bucket->ub_entries, max);
2429 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2430 max, flags);
2431
2432 /*
2433 * Initialize the memory if necessary.
2434 */
2435 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2436 int i;
2437
2438 for (i = 0; i < bucket->ub_cnt; i++)
2439 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2440 flags) != 0)
2441 break;
2442 /*
2443 * If we couldn't initialize the whole bucket, put the
2444 * rest back onto the freelist.
2445 */
2446 if (i != bucket->ub_cnt) {
2447 zone->uz_release(zone->uz_arg, bucket->ub_bucket[i],
2448 bucket->ub_cnt - i);
2449#ifdef INVARIANTS
2450 bzero(&bucket->ub_bucket[i],
2451 sizeof(void *) * (bucket->ub_cnt - i));
2452#endif
2453 bucket->ub_cnt = i;
2454 }
2455 }
2456
2457out:
2458 if (bucket == NULL || bucket->ub_cnt == 0) {
2459 if (bucket != NULL)
2460 bucket_free(bucket);
2461 atomic_add_long(&zone->uz_fails, 1);
2462 return (NULL);
2463 }
2464
2465 return (bucket);
2466}
2467
2468/*

--- 55 unchanged lines hidden (view full) ---

2524}
2525
2526/* See uma.h */
2527void
2528uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2529{
2530 uma_cache_t cache;
2531 uma_bucket_t bucket;
2532 int bflags;
2533 int cpu;
2534
2535#ifdef UMA_DEBUG_ALLOC_1
2536 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2537#endif
2538 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2539 zone->uz_name);
2540

--- 108 unchanged lines hidden (view full) ---

2649 critical_exit();
2650
2651 /* And the zone.. */
2652 ZONE_UNLOCK(zone);
2653
2654#ifdef UMA_DEBUG_ALLOC
2655 printf("uma_zfree: Allocating new free bucket.\n");
2656#endif
2657 bflags = M_NOWAIT;
2658 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2659 bflags |= M_NOVM;
2660 bucket = bucket_alloc(zone->uz_count, bflags);
2661 if (bucket) {
2662 critical_enter();
2663 cpu = curcpu;
2664 cache = &zone->uz_cpu[cpu];
2665 if (cache->uc_freebucket == NULL) {
2666 cache->uc_freebucket = bucket;
2667 goto zfree_start;
2668 }
2669 /*
2670 * We lost the race, start over. We have to drop our
2671 * critical section to free the bucket.
2672 */
2673 critical_exit();
2674 bucket_free(bucket);
2675 goto zfree_restart;
2676 }
2677
2678 /*
2679 * If nothing else caught this, we'll just do an internal free.
2680 */
2681zfree_item:
2682 zone_free_item(zone, item, udata, SKIP_DTOR);

--- 33 unchanged lines hidden (view full) ---

2716 void *item;
2717 uma_slab_t slab;
2718 uma_keg_t keg;
2719 uint8_t *mem;
2720 int clearfull;
2721 int i;
2722
2723 clearfull = 0;
2724 ZONE_LOCK(zone);
2725 keg = zone_first_keg(zone);
2726 for (i = 0; i < cnt; i++) {
2727 item = bucket[i];
2728 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2729 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2730 if (zone->uz_flags & UMA_ZONE_HASH) {
2731 slab = hash_sfind(&keg->uk_hash, mem);
2732 } else {
2733 mem += keg->uk_pgoff;

--- 19 unchanged lines hidden (view full) ---

2753 * clearing ZFLAG_FULL, wake up all procs blocked
2754 * on pages. This should be uncommon, so keeping this
2755 * simple for now (rather than adding count of blocked
2756 * threads etc).
2757 */
2758 wakeup(keg);
2759 }
2760 }
2761 zone_relock(zone, keg);
2762 if (clearfull) {
2763 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2764 wakeup(zone);
2765 }
2766 ZONE_UNLOCK(zone);
2767
2768}
2769
2770/*
2771 * Frees a single item to any zone.
2772 *
2773 * Arguments:
2774 * zone The zone to free to

--- 27 unchanged lines hidden (view full) ---

2802int
2803uma_zone_set_max(uma_zone_t zone, int nitems)
2804{
2805 uma_keg_t keg;
2806
2807 keg = zone_first_keg(zone);
2808 if (keg == NULL)
2809 return (0);
2810 ZONE_LOCK(zone);
2811 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2812 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2813 keg->uk_maxpages += keg->uk_ppera;
2814 nitems = keg->uk_maxpages * keg->uk_ipers;
2815 ZONE_UNLOCK(zone);
2816
2817 return (nitems);
2818}
2819
2820/* See uma.h */
2821int
2822uma_zone_get_max(uma_zone_t zone)
2823{
2824 int nitems;
2825 uma_keg_t keg;
2826
2827 keg = zone_first_keg(zone);
2828 if (keg == NULL)
2829 return (0);
2830 ZONE_LOCK(zone);
2831 nitems = keg->uk_maxpages * keg->uk_ipers;
2832 ZONE_UNLOCK(zone);
2833
2834 return (nitems);
2835}
2836
2837/* See uma.h */
2838void
2839uma_zone_set_warning(uma_zone_t zone, const char *warning)
2840{

--- 27 unchanged lines hidden (view full) ---

2868}
2869
2870/* See uma.h */
2871void
2872uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2873{
2874 uma_keg_t keg;
2875
2876 ZONE_LOCK(zone);
2877 keg = zone_first_keg(zone);
2878 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2879 KASSERT(keg->uk_pages == 0,
2880 ("uma_zone_set_init on non-empty keg"));
2881 keg->uk_init = uminit;
2882 ZONE_UNLOCK(zone);
2883}
2884
2885/* See uma.h */
2886void
2887uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2888{
2889 uma_keg_t keg;
2890
2891 ZONE_LOCK(zone);
2892 keg = zone_first_keg(zone);
2893 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2894 KASSERT(keg->uk_pages == 0,
2895 ("uma_zone_set_fini on non-empty keg"));
2896 keg->uk_fini = fini;
2897 ZONE_UNLOCK(zone);
2898}
2899
2900/* See uma.h */
2901void
2902uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2903{
2904 ZONE_LOCK(zone);
2905 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2906 ("uma_zone_set_zinit on non-empty keg"));
2907 zone->uz_init = zinit;
2908 ZONE_UNLOCK(zone);
2909}
2910
2911/* See uma.h */
2912void
2913uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2914{
2915 ZONE_LOCK(zone);
2916 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2917 ("uma_zone_set_zfini on non-empty keg"));
2918 zone->uz_fini = zfini;
2919 ZONE_UNLOCK(zone);
2920}
2921
2922/* See uma.h */
2923/* XXX uk_freef is not actually used with the zone locked */
2924void
2925uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2926{
2927 uma_keg_t keg;
2928
2929 ZONE_LOCK(zone);
2930 keg = zone_first_keg(zone);
2931 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2932 keg->uk_freef = freef;
2933 ZONE_UNLOCK(zone);
2934}
2935
2936/* See uma.h */
2937/* XXX uk_allocf is not actually used with the zone locked */
2938void
2939uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2940{
2941 uma_keg_t keg;
2942
2943 ZONE_LOCK(zone);
2944 keg = zone_first_keg(zone);
2945 keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2946 keg->uk_allocf = allocf;
2947 ZONE_UNLOCK(zone);
2948}
2949
2950/* See uma.h */
2951int
2952uma_zone_reserve_kva(uma_zone_t zone, int count)
2953{
2954 uma_keg_t keg;
2955 vm_offset_t kva;

--- 12 unchanged lines hidden (view full) ---

2968#else
2969 if (1) {
2970#endif
2971 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2972 if (kva == 0)
2973 return (0);
2974 } else
2975 kva = 0;
2976 ZONE_LOCK(zone);
2977 keg->uk_kva = kva;
2978 keg->uk_offset = 0;
2979 keg->uk_maxpages = pages;
2980#ifdef UMA_MD_SMALL_ALLOC
2981 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
2982#else
2983 keg->uk_allocf = noobj_alloc;
2984#endif
2985 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2986 ZONE_UNLOCK(zone);
2987 return (1);
2988}
2989
2990/* See uma.h */
2991void
2992uma_prealloc(uma_zone_t zone, int items)
2993{
2994 int slabs;
2995 uma_slab_t slab;
2996 uma_keg_t keg;
2997
2998 keg = zone_first_keg(zone);
2999 if (keg == NULL)
3000 return;
3001 ZONE_LOCK(zone);
3002 slabs = items / keg->uk_ipers;
3003 if (slabs * keg->uk_ipers < items)
3004 slabs++;
3005 while (slabs > 0) {
3006 slab = keg_alloc_slab(keg, zone, M_WAITOK);
3007 if (slab == NULL)
3008 break;
3009 MPASS(slab->us_keg == keg);
3010 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3011 slabs--;
3012 }
3013 ZONE_UNLOCK(zone);
3014}
3015
3016/* See uma.h */
3017uint32_t *
3018uma_find_refcnt(uma_zone_t zone, void *item)
3019{
3020 uma_slabrefcnt_t slabref;
3021 uma_slab_t slab;

--- 346 unchanged lines hidden ---