Deleted Added
full compact
uma_core.c (251983) uma_core.c (252040)
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 34 unchanged lines hidden (view full) ---

43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 34 unchanged lines hidden (view full) ---

43
44/*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 251983 2013-06-19 02:30:32Z jeff $");
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 252040 2013-06-20 19:08:12Z jeff $");
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59

--- 181 unchanged lines hidden (view full) ---

241static int hash_expand(struct uma_hash *, struct uma_hash *);
242static void hash_free(struct uma_hash *hash);
243static void uma_timeout(void *);
244static void uma_startup3(void);
245static void *zone_alloc_item(uma_zone_t, void *, int);
246static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
247static void bucket_enable(void);
248static void bucket_init(void);
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59

--- 181 unchanged lines hidden (view full) ---

241static int hash_expand(struct uma_hash *, struct uma_hash *);
242static void hash_free(struct uma_hash *hash);
243static void uma_timeout(void *);
244static void uma_startup3(void);
245static void *zone_alloc_item(uma_zone_t, void *, int);
246static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
247static void bucket_enable(void);
248static void bucket_init(void);
249static uma_bucket_t bucket_alloc(int, int);
250static void bucket_free(uma_bucket_t);
249static uma_bucket_t bucket_alloc(uma_zone_t zone, int);
250static void bucket_free(uma_zone_t zone, uma_bucket_t);
251static void bucket_zone_drain(void);
252static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, int flags);
253static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
254static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
255static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
256static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
257static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
258 uma_fini fini, int align, uint32_t flags);
251static void bucket_zone_drain(void);
252static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, int flags);
253static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
254static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
255static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
256static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
257static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
258 uma_fini fini, int align, uint32_t flags);
259static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
260static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
261static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
262static void zone_release(uma_zone_t zone, void **bucket, int cnt);
263
264void uma_print_zone(uma_zone_t);
265void uma_print_stats(void);
266static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
267static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
268

--- 78 unchanged lines hidden (view full) ---

347 for (; ubz->ubz_entries != 0; ubz++)
348 if (ubz->ubz_maxsize < size)
349 break;
350 ubz--;
351 return (ubz->ubz_entries);
352}
353
354static uma_bucket_t
259static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
260static void zone_release(uma_zone_t zone, void **bucket, int cnt);
261
262void uma_print_zone(uma_zone_t);
263void uma_print_stats(void);
264static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
265static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
266

--- 78 unchanged lines hidden (view full) ---

345 for (; ubz->ubz_entries != 0; ubz++)
346 if (ubz->ubz_maxsize < size)
347 break;
348 ubz--;
349 return (ubz->ubz_entries);
350}
351
352static uma_bucket_t
355bucket_alloc(int entries, int bflags)
353bucket_alloc(uma_zone_t zone, int flags)
356{
357 struct uma_bucket_zone *ubz;
358 uma_bucket_t bucket;
359
360 /*
361 * This is to stop us from allocating per cpu buckets while we're
362 * running out of vm.boot_pages. Otherwise, we would exhaust the
363 * boot pages. This also prevents us from allocating buckets in
364 * low memory situations.
365 */
366 if (bucketdisable)
367 return (NULL);
368
354{
355 struct uma_bucket_zone *ubz;
356 uma_bucket_t bucket;
357
358 /*
359 * This is to stop us from allocating per cpu buckets while we're
360 * running out of vm.boot_pages. Otherwise, we would exhaust the
361 * boot pages. This also prevents us from allocating buckets in
362 * low memory situations.
363 */
364 if (bucketdisable)
365 return (NULL);
366
369 ubz = bucket_zone_lookup(entries);
370 bucket = uma_zalloc(ubz->ubz_zone, bflags);
367 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
368 flags |= M_NOVM;
369 ubz = bucket_zone_lookup(zone->uz_count);
370 bucket = uma_zalloc(ubz->ubz_zone, flags);
371 if (bucket) {
372#ifdef INVARIANTS
373 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
374#endif
375 bucket->ub_cnt = 0;
376 bucket->ub_entries = ubz->ubz_entries;
377 }
378
379 return (bucket);
380}
381
382static void
371 if (bucket) {
372#ifdef INVARIANTS
373 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
374#endif
375 bucket->ub_cnt = 0;
376 bucket->ub_entries = ubz->ubz_entries;
377 }
378
379 return (bucket);
380}
381
382static void
383bucket_free(uma_bucket_t bucket)
383bucket_free(uma_zone_t zone, uma_bucket_t bucket)
384{
385 struct uma_bucket_zone *ubz;
386
387 KASSERT(bucket->ub_cnt == 0,
388 ("bucket_free: Freeing a non free bucket."));
389 ubz = bucket_zone_lookup(bucket->ub_entries);
390 uma_zfree(ubz->ubz_zone, bucket);
391}

--- 265 unchanged lines hidden (view full) ---

657 * it is used elsewhere. Should the tear-down path be made special
658 * there in some form?
659 */
660 CPU_FOREACH(cpu) {
661 cache = &zone->uz_cpu[cpu];
662 bucket_drain(zone, cache->uc_allocbucket);
663 bucket_drain(zone, cache->uc_freebucket);
664 if (cache->uc_allocbucket != NULL)
384{
385 struct uma_bucket_zone *ubz;
386
387 KASSERT(bucket->ub_cnt == 0,
388 ("bucket_free: Freeing a non free bucket."));
389 ubz = bucket_zone_lookup(bucket->ub_entries);
390 uma_zfree(ubz->ubz_zone, bucket);
391}

--- 265 unchanged lines hidden (view full) ---

657 * it is used elsewhere. Should the tear-down path be made special
658 * there in some form?
659 */
660 CPU_FOREACH(cpu) {
661 cache = &zone->uz_cpu[cpu];
662 bucket_drain(zone, cache->uc_allocbucket);
663 bucket_drain(zone, cache->uc_freebucket);
664 if (cache->uc_allocbucket != NULL)
665 bucket_free(cache->uc_allocbucket);
665 bucket_free(zone, cache->uc_allocbucket);
666 if (cache->uc_freebucket != NULL)
666 if (cache->uc_freebucket != NULL)
667 bucket_free(cache->uc_freebucket);
667 bucket_free(zone, cache->uc_freebucket);
668 cache->uc_allocbucket = cache->uc_freebucket = NULL;
669 }
670 ZONE_LOCK(zone);
671 bucket_cache_drain(zone);
672 ZONE_UNLOCK(zone);
673}
674
675/*

--- 7 unchanged lines hidden (view full) ---

683 /*
684 * Drain the bucket queues and free the buckets, we just keep two per
685 * cpu (alloc/free).
686 */
687 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
688 LIST_REMOVE(bucket, ub_link);
689 ZONE_UNLOCK(zone);
690 bucket_drain(zone, bucket);
668 cache->uc_allocbucket = cache->uc_freebucket = NULL;
669 }
670 ZONE_LOCK(zone);
671 bucket_cache_drain(zone);
672 ZONE_UNLOCK(zone);
673}
674
675/*

--- 7 unchanged lines hidden (view full) ---

683 /*
684 * Drain the bucket queues and free the buckets, we just keep two per
685 * cpu (alloc/free).
686 */
687 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
688 LIST_REMOVE(bucket, ub_link);
689 ZONE_UNLOCK(zone);
690 bucket_drain(zone, bucket);
691 bucket_free(bucket);
691 bucket_free(zone, bucket);
692 ZONE_LOCK(zone);
693 }
694}
695
696static void
697keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
698{
699 uint8_t *mem;

--- 96 unchanged lines hidden (view full) ---

796 * is the only call that knows the structure will still be available
797 * when it wakes up.
798 */
799 ZONE_LOCK(zone);
800 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
801 if (waitok == M_NOWAIT)
802 goto out;
803 mtx_unlock(&uma_mtx);
692 ZONE_LOCK(zone);
693 }
694}
695
696static void
697keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
698{
699 uint8_t *mem;

--- 96 unchanged lines hidden (view full) ---

796 * is the only call that knows the structure will still be available
797 * when it wakes up.
798 */
799 ZONE_LOCK(zone);
800 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
801 if (waitok == M_NOWAIT)
802 goto out;
803 mtx_unlock(&uma_mtx);
804 msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
804 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
805 mtx_lock(&uma_mtx);
806 }
807 zone->uz_flags |= UMA_ZFLAG_DRAINING;
808 bucket_cache_drain(zone);
809 ZONE_UNLOCK(zone);
810 /*
811 * The DRAINING flag protects us from being freed while
812 * we're running. Normally the uma_mtx would protect us but we

--- 557 unchanged lines hidden (view full) ---

1370 if (booted < UMA_STARTUP2)
1371 keg->uk_allocf = startup_alloc;
1372#endif
1373 } else if (booted < UMA_STARTUP2 &&
1374 (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1375 keg->uk_allocf = startup_alloc;
1376
1377 /*
805 mtx_lock(&uma_mtx);
806 }
807 zone->uz_flags |= UMA_ZFLAG_DRAINING;
808 bucket_cache_drain(zone);
809 ZONE_UNLOCK(zone);
810 /*
811 * The DRAINING flag protects us from being freed while
812 * we're running. Normally the uma_mtx would protect us but we

--- 557 unchanged lines hidden (view full) ---

1370 if (booted < UMA_STARTUP2)
1371 keg->uk_allocf = startup_alloc;
1372#endif
1373 } else if (booted < UMA_STARTUP2 &&
1374 (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1375 keg->uk_allocf = startup_alloc;
1376
1377 /*
1378 * Initialize keg's lock (shared among zones).
1378 * Initialize keg's lock
1379 */
1379 */
1380 if (arg->flags & UMA_ZONE_MTXCLASS)
1381 KEG_LOCK_INIT(keg, 1);
1382 else
1383 KEG_LOCK_INIT(keg, 0);
1380 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1384
1385 /*
1386 * If we're putting the slab header in the actual page we need to
1387 * figure out where in each page it goes. This calculates a right
1388 * justified offset into the memory on an ALIGN_PTR boundary.
1389 */
1390 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1391 u_int totsize;

--- 72 unchanged lines hidden (view full) ---

1464 zone->uz_fails = 0;
1465 zone->uz_sleeps = 0;
1466 zone->uz_count = 0;
1467 zone->uz_flags = 0;
1468 zone->uz_warning = NULL;
1469 timevalclear(&zone->uz_ratecheck);
1470 keg = arg->keg;
1471
1381
1382 /*
1383 * If we're putting the slab header in the actual page we need to
1384 * figure out where in each page it goes. This calculates a right
1385 * justified offset into the memory on an ALIGN_PTR boundary.
1386 */
1387 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1388 u_int totsize;

--- 72 unchanged lines hidden (view full) ---

1461 zone->uz_fails = 0;
1462 zone->uz_sleeps = 0;
1463 zone->uz_count = 0;
1464 zone->uz_flags = 0;
1465 zone->uz_warning = NULL;
1466 timevalclear(&zone->uz_ratecheck);
1467 keg = arg->keg;
1468
1469 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1470
1472 /*
1473 * This is a pure cache zone, no kegs.
1474 */
1475 if (arg->import) {
1471 /*
1472 * This is a pure cache zone, no kegs.
1473 */
1474 if (arg->import) {
1475 zone->uz_size = arg->size;
1476 zone->uz_import = arg->import;
1477 zone->uz_release = arg->release;
1478 zone->uz_arg = arg->arg;
1476 zone->uz_import = arg->import;
1477 zone->uz_release = arg->release;
1478 zone->uz_arg = arg->arg;
1479 zone->uz_count = BUCKET_MAX;
1480 return (0);
1479 zone->uz_lockptr = &zone->uz_lock;
1480 goto out;
1481 }
1482
1483 /*
1484 * Use the regular zone/keg/slab allocator.
1485 */
1486 zone->uz_import = (uma_import)zone_import;
1487 zone->uz_release = (uma_release)zone_release;
1488 zone->uz_arg = zone;
1489
1490 if (arg->flags & UMA_ZONE_SECONDARY) {
1491 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1492 zone->uz_init = arg->uminit;
1493 zone->uz_fini = arg->fini;
1481 }
1482
1483 /*
1484 * Use the regular zone/keg/slab allocator.
1485 */
1486 zone->uz_import = (uma_import)zone_import;
1487 zone->uz_release = (uma_release)zone_release;
1488 zone->uz_arg = zone;
1489
1490 if (arg->flags & UMA_ZONE_SECONDARY) {
1491 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1492 zone->uz_init = arg->uminit;
1493 zone->uz_fini = arg->fini;
1494 zone->uz_lock = &keg->uk_lock;
1494 zone->uz_lockptr = &keg->uk_lock;
1495 zone->uz_flags |= UMA_ZONE_SECONDARY;
1496 mtx_lock(&uma_mtx);
1497 ZONE_LOCK(zone);
1498 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1499 if (LIST_NEXT(z, uz_link) == NULL) {
1500 LIST_INSERT_AFTER(z, zone, uz_link);
1501 break;
1502 }

--- 21 unchanged lines hidden (view full) ---

1524 return (error);
1525 }
1526
1527 /*
1528 * Link in the first keg.
1529 */
1530 zone->uz_klink.kl_keg = keg;
1531 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1495 zone->uz_flags |= UMA_ZONE_SECONDARY;
1496 mtx_lock(&uma_mtx);
1497 ZONE_LOCK(zone);
1498 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1499 if (LIST_NEXT(z, uz_link) == NULL) {
1500 LIST_INSERT_AFTER(z, zone, uz_link);
1501 break;
1502 }

--- 21 unchanged lines hidden (view full) ---

1524 return (error);
1525 }
1526
1527 /*
1528 * Link in the first keg.
1529 */
1530 zone->uz_klink.kl_keg = keg;
1531 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1532 zone->uz_lock = &keg->uk_lock;
1532 zone->uz_lockptr = &keg->uk_lock;
1533 zone->uz_size = keg->uk_size;
1534 zone->uz_flags |= (keg->uk_flags &
1535 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1536
1537 /*
1538 * Some internal zones don't have room allocated for the per cpu
1539 * caches. If we're internal, bail out here.
1540 */
1541 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1542 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1543 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1544 return (0);
1545 }
1546
1533 zone->uz_size = keg->uk_size;
1534 zone->uz_flags |= (keg->uk_flags &
1535 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1536
1537 /*
1538 * Some internal zones don't have room allocated for the per cpu
1539 * caches. If we're internal, bail out here.
1540 */
1541 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1542 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1543 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1544 return (0);
1545 }
1546
1547 if ((keg->uk_flags & UMA_ZONE_MAXBUCKET) == 0)
1548 zone->uz_count = bucket_select(keg->uk_rsize);
1547out:
1548 if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1549 zone->uz_count = bucket_select(zone->uz_size);
1549 else
1550 zone->uz_count = BUCKET_MAX;
1551
1552 return (0);
1553}
1554
1555/*
1556 * Keg header dtor. This frees all data, destroys locks, frees the hash

--- 64 unchanged lines hidden (view full) ---

1621 * We only destroy kegs from non secondary zones.
1622 */
1623 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1624 mtx_lock(&uma_mtx);
1625 LIST_REMOVE(keg, uk_link);
1626 mtx_unlock(&uma_mtx);
1627 zone_free_item(kegs, keg, NULL, SKIP_NONE);
1628 }
1550 else
1551 zone->uz_count = BUCKET_MAX;
1552
1553 return (0);
1554}
1555
1556/*
1557 * Keg header dtor. This frees all data, destroys locks, frees the hash

--- 64 unchanged lines hidden (view full) ---

1622 * We only destroy kegs from non secondary zones.
1623 */
1624 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1625 mtx_lock(&uma_mtx);
1626 LIST_REMOVE(keg, uk_link);
1627 mtx_unlock(&uma_mtx);
1628 zone_free_item(kegs, keg, NULL, SKIP_NONE);
1629 }
1630 ZONE_LOCK_FINI(zone);
1629}
1630
1631/*
1632 * Traverses every zone in the system and calls a callback
1633 *
1634 * Arguments:
1635 * zfunc A pointer to a function which accepts a zone
1636 * as an argument.

--- 207 unchanged lines hidden (view full) ---

1844 args.keg = keg;
1845
1846 /* XXX Attaches only one keg of potentially many. */
1847 return (zone_alloc_item(zones, &args, M_WAITOK));
1848}
1849
1850/* See uma.h */
1851uma_zone_t
1631}
1632
1633/*
1634 * Traverses every zone in the system and calls a callback
1635 *
1636 * Arguments:
1637 * zfunc A pointer to a function which accepts a zone
1638 * as an argument.

--- 207 unchanged lines hidden (view full) ---

1846 args.keg = keg;
1847
1848 /* XXX Attaches only one keg of potentially many. */
1849 return (zone_alloc_item(zones, &args, M_WAITOK));
1850}
1851
1852/* See uma.h */
1853uma_zone_t
1852uma_zcache_create(char *name, uma_ctor ctor, uma_dtor dtor, uma_init zinit,
1853 uma_fini zfini, uma_import zimport, uma_release zrelease,
1854 void *arg, int flags)
1854uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1855 uma_init zinit, uma_fini zfini, uma_import zimport,
1856 uma_release zrelease, void *arg, int flags)
1855{
1856 struct uma_zctor_args args;
1857
1858 memset(&args, 0, sizeof(args));
1859 args.name = name;
1857{
1858 struct uma_zctor_args args;
1859
1860 memset(&args, 0, sizeof(args));
1861 args.name = name;
1860 args.size = 0;
1862 args.size = size;
1861 args.ctor = ctor;
1862 args.dtor = dtor;
1863 args.uminit = zinit;
1864 args.fini = zfini;
1865 args.import = zimport;
1866 args.release = zrelease;
1867 args.arg = arg;
1868 args.align = 0;
1869 args.flags = flags;
1870
1871 return (zone_alloc_item(zones, &args, M_WAITOK));
1872}
1873
1874static void
1875zone_lock_pair(uma_zone_t a, uma_zone_t b)
1876{
1877 if (a < b) {
1878 ZONE_LOCK(a);
1863 args.ctor = ctor;
1864 args.dtor = dtor;
1865 args.uminit = zinit;
1866 args.fini = zfini;
1867 args.import = zimport;
1868 args.release = zrelease;
1869 args.arg = arg;
1870 args.align = 0;
1871 args.flags = flags;
1872
1873 return (zone_alloc_item(zones, &args, M_WAITOK));
1874}
1875
1876static void
1877zone_lock_pair(uma_zone_t a, uma_zone_t b)
1878{
1879 if (a < b) {
1880 ZONE_LOCK(a);
1879 mtx_lock_flags(b->uz_lock, MTX_DUPOK);
1881 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
1880 } else {
1881 ZONE_LOCK(b);
1882 } else {
1883 ZONE_LOCK(b);
1882 mtx_lock_flags(a->uz_lock, MTX_DUPOK);
1884 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
1883 }
1884}
1885
1886static void
1887zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1888{
1889
1890 ZONE_UNLOCK(a);

--- 175 unchanged lines hidden (view full) ---

2066
2067 /*
2068 * Discard any empty allocation bucket while we hold no locks.
2069 */
2070 bucket = cache->uc_allocbucket;
2071 cache->uc_allocbucket = NULL;
2072 critical_exit();
2073 if (bucket != NULL)
1885 }
1886}
1887
1888static void
1889zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1890{
1891
1892 ZONE_UNLOCK(a);

--- 175 unchanged lines hidden (view full) ---

2068
2069 /*
2070 * Discard any empty allocation bucket while we hold no locks.
2071 */
2072 bucket = cache->uc_allocbucket;
2073 cache->uc_allocbucket = NULL;
2074 critical_exit();
2075 if (bucket != NULL)
2074 bucket_free(bucket);
2076 bucket_free(zone, bucket);
2075
2076 /* Short-circuit for zones without buckets and low memory. */
2077 if (zone->uz_count == 0 || bucketdisable)
2078 goto zalloc_item;
2079
2080 /*
2081 * Attempt to retrieve the item from the per-CPU cache has failed, so
2082 * we must go back to the zone. This requires the zone lock, so we

--- 153 unchanged lines hidden (view full) ---

2236 * could have while we were unlocked. Check again before we
2237 * fail.
2238 */
2239 flags |= M_NOVM;
2240 }
2241 return (slab);
2242}
2243
2077
2078 /* Short-circuit for zones without buckets and low memory. */
2079 if (zone->uz_count == 0 || bucketdisable)
2080 goto zalloc_item;
2081
2082 /*
2083 * Attempt to retrieve the item from the per-CPU cache has failed, so
2084 * we must go back to the zone. This requires the zone lock, so we

--- 153 unchanged lines hidden (view full) ---

2238 * could have while we were unlocked. Check again before we
2239 * fail.
2240 */
2241 flags |= M_NOVM;
2242 }
2243 return (slab);
2244}
2245
2244static inline void
2245zone_relock(uma_zone_t zone, uma_keg_t keg)
2246{
2247 if (zone->uz_lock != &keg->uk_lock) {
2248 KEG_UNLOCK(keg);
2249 ZONE_LOCK(zone);
2250 }
2251}
2252
2253static inline void
2254keg_relock(uma_keg_t keg, uma_zone_t zone)
2255{
2256 if (zone->uz_lock != &keg->uk_lock) {
2257 ZONE_UNLOCK(zone);
2258 KEG_LOCK(keg);
2259 }
2260}
2261
2262static uma_slab_t
2263zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2264{
2265 uma_slab_t slab;
2266
2246static uma_slab_t
2247zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2248{
2249 uma_slab_t slab;
2250
2267 if (keg == NULL)
2251 if (keg == NULL) {
2268 keg = zone_first_keg(zone);
2252 keg = zone_first_keg(zone);
2253 KEG_LOCK(keg);
2254 }
2269
2270 for (;;) {
2271 slab = keg_fetch_slab(keg, zone, flags);
2272 if (slab)
2273 return (slab);
2274 if (flags & (M_NOWAIT | M_NOVM))
2275 break;
2276 }
2255
2256 for (;;) {
2257 slab = keg_fetch_slab(keg, zone, flags);
2258 if (slab)
2259 return (slab);
2260 if (flags & (M_NOWAIT | M_NOVM))
2261 break;
2262 }
2263 KEG_UNLOCK(keg);
2277 return (NULL);
2278}
2279
2280/*
2281 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2264 return (NULL);
2265}
2266
2267/*
2268 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2282 * with the keg locked. Caller must call zone_relock() afterwards if the
2283 * zone lock is required. On NULL the zone lock is held.
2269 * with the keg locked. On NULL no lock is held.
2284 *
2285 * The last pointer is used to seed the search. It is not required.
2286 */
2287static uma_slab_t
2288zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2289{
2290 uma_klink_t klink;
2291 uma_slab_t slab;

--- 7 unchanged lines hidden (view full) ---

2299 * as well. We don't want to block if we can find a provider
2300 * without blocking.
2301 */
2302 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2303 /*
2304 * Use the last slab allocated as a hint for where to start
2305 * the search.
2306 */
2270 *
2271 * The last pointer is used to seed the search. It is not required.
2272 */
2273static uma_slab_t
2274zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2275{
2276 uma_klink_t klink;
2277 uma_slab_t slab;

--- 7 unchanged lines hidden (view full) ---

2285 * as well. We don't want to block if we can find a provider
2286 * without blocking.
2287 */
2288 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2289 /*
2290 * Use the last slab allocated as a hint for where to start
2291 * the search.
2292 */
2307 if (last) {
2293 if (last != NULL) {
2308 slab = keg_fetch_slab(last, zone, flags);
2309 if (slab)
2310 return (slab);
2294 slab = keg_fetch_slab(last, zone, flags);
2295 if (slab)
2296 return (slab);
2311 zone_relock(zone, last);
2312 last = NULL;
2297 KEG_UNLOCK(last);
2313 }
2314 /*
2315 * Loop until we have a slab incase of transient failures
2316 * while M_WAITOK is specified. I'm not sure this is 100%
2317 * required but we've done it for so long now.
2318 */
2319 for (;;) {
2320 empty = 0;
2321 full = 0;
2322 /*
2323 * Search the available kegs for slabs. Be careful to hold the
2324 * correct lock while calling into the keg layer.
2325 */
2326 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2327 keg = klink->kl_keg;
2298 }
2299 /*
2300 * Loop until we have a slab incase of transient failures
2301 * while M_WAITOK is specified. I'm not sure this is 100%
2302 * required but we've done it for so long now.
2303 */
2304 for (;;) {
2305 empty = 0;
2306 full = 0;
2307 /*
2308 * Search the available kegs for slabs. Be careful to hold the
2309 * correct lock while calling into the keg layer.
2310 */
2311 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2312 keg = klink->kl_keg;
2328 keg_relock(keg, zone);
2313 KEG_LOCK(keg);
2329 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2330 slab = keg_fetch_slab(keg, zone, flags);
2331 if (slab)
2332 return (slab);
2333 }
2334 if (keg->uk_flags & UMA_ZFLAG_FULL)
2335 full++;
2336 else
2337 empty++;
2314 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2315 slab = keg_fetch_slab(keg, zone, flags);
2316 if (slab)
2317 return (slab);
2318 }
2319 if (keg->uk_flags & UMA_ZFLAG_FULL)
2320 full++;
2321 else
2322 empty++;
2338 zone_relock(zone, keg);
2323 KEG_UNLOCK(keg);
2339 }
2340 if (rflags & (M_NOWAIT | M_NOVM))
2341 break;
2342 flags = rflags;
2343 /*
2344 * All kegs are full. XXX We can't atomically check all kegs
2345 * and sleep so just sleep for a short period and retry.
2346 */
2347 if (full && !empty) {
2324 }
2325 if (rflags & (M_NOWAIT | M_NOVM))
2326 break;
2327 flags = rflags;
2328 /*
2329 * All kegs are full. XXX We can't atomically check all kegs
2330 * and sleep so just sleep for a short period and retry.
2331 */
2332 if (full && !empty) {
2333 ZONE_LOCK(zone);
2348 zone->uz_flags |= UMA_ZFLAG_FULL;
2349 zone->uz_sleeps++;
2350 zone_log_warning(zone);
2334 zone->uz_flags |= UMA_ZFLAG_FULL;
2335 zone->uz_sleeps++;
2336 zone_log_warning(zone);
2351 msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
2337 msleep(zone, zone->uz_lockptr, PVM,
2338 "zonelimit", hz/100);
2352 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2339 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2340 ZONE_UNLOCK(zone);
2353 continue;
2354 }
2355 }
2356 return (NULL);
2357}
2358
2359static void *
2360slab_alloc_item(uma_keg_t keg, uma_slab_t slab)

--- 21 unchanged lines hidden (view full) ---

2382
2383static int
2384zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2385{
2386 uma_slab_t slab;
2387 uma_keg_t keg;
2388 int i;
2389
2341 continue;
2342 }
2343 }
2344 return (NULL);
2345}
2346
2347static void *
2348slab_alloc_item(uma_keg_t keg, uma_slab_t slab)

--- 21 unchanged lines hidden (view full) ---

2370
2371static int
2372zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2373{
2374 uma_slab_t slab;
2375 uma_keg_t keg;
2376 int i;
2377
2390 ZONE_LOCK(zone);
2391 /* Try to keep the buckets totally full */
2392 slab = NULL;
2393 keg = NULL;
2378 slab = NULL;
2379 keg = NULL;
2380 /* Try to keep the buckets totally full */
2394 for (i = 0; i < max; ) {
2395 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2396 break;
2397 keg = slab->us_keg;
2381 for (i = 0; i < max; ) {
2382 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2383 break;
2384 keg = slab->us_keg;
2398 while (slab->us_freecount && i < max)
2385 while (slab->us_freecount && i < max)
2399 bucket[i++] = slab_alloc_item(keg, slab);
2400
2401 /* Don't block on the next fill */
2402 flags &= ~M_WAITOK;
2403 flags |= M_NOWAIT;
2404 }
2405 if (slab != NULL)
2406 KEG_UNLOCK(keg);
2386 bucket[i++] = slab_alloc_item(keg, slab);
2387
2388 /* Don't block on the next fill */
2389 flags &= ~M_WAITOK;
2390 flags |= M_NOWAIT;
2391 }
2392 if (slab != NULL)
2393 KEG_UNLOCK(keg);
2407 else
2408 ZONE_UNLOCK(zone);
2409
2410 return i;
2411}
2412
2413static uma_bucket_t
2414zone_alloc_bucket(uma_zone_t zone, int flags)
2415{
2416 uma_bucket_t bucket;
2394
2395 return i;
2396}
2397
2398static uma_bucket_t
2399zone_alloc_bucket(uma_zone_t zone, int flags)
2400{
2401 uma_bucket_t bucket;
2417 int bflags;
2418 int max;
2419
2402 int max;
2403
2420 max = zone->uz_count;
2421 bflags = (flags & ~M_WAITOK) | M_NOWAIT;
2422 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2423 bflags |= M_NOVM;
2424 bucket = bucket_alloc(zone->uz_count, bflags);
2404 bucket = bucket_alloc(zone, M_NOWAIT | (flags & M_NOVM));
2425 if (bucket == NULL)
2426 goto out;
2427
2405 if (bucket == NULL)
2406 goto out;
2407
2428 max = MIN(bucket->ub_entries, max);
2408 max = MIN(bucket->ub_entries, zone->uz_count);
2429 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2430 max, flags);
2431
2432 /*
2433 * Initialize the memory if necessary.
2434 */
2435 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2436 int i;
2437
2438 for (i = 0; i < bucket->ub_cnt; i++)
2439 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2440 flags) != 0)
2441 break;
2442 /*
2443 * If we couldn't initialize the whole bucket, put the
2444 * rest back onto the freelist.
2445 */
2446 if (i != bucket->ub_cnt) {
2409 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2410 max, flags);
2411
2412 /*
2413 * Initialize the memory if necessary.
2414 */
2415 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2416 int i;
2417
2418 for (i = 0; i < bucket->ub_cnt; i++)
2419 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2420 flags) != 0)
2421 break;
2422 /*
2423 * If we couldn't initialize the whole bucket, put the
2424 * rest back onto the freelist.
2425 */
2426 if (i != bucket->ub_cnt) {
2447 zone->uz_release(zone->uz_arg, bucket->ub_bucket[i],
2427 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2448 bucket->ub_cnt - i);
2449#ifdef INVARIANTS
2450 bzero(&bucket->ub_bucket[i],
2451 sizeof(void *) * (bucket->ub_cnt - i));
2452#endif
2453 bucket->ub_cnt = i;
2454 }
2455 }
2456
2457out:
2458 if (bucket == NULL || bucket->ub_cnt == 0) {
2459 if (bucket != NULL)
2428 bucket->ub_cnt - i);
2429#ifdef INVARIANTS
2430 bzero(&bucket->ub_bucket[i],
2431 sizeof(void *) * (bucket->ub_cnt - i));
2432#endif
2433 bucket->ub_cnt = i;
2434 }
2435 }
2436
2437out:
2438 if (bucket == NULL || bucket->ub_cnt == 0) {
2439 if (bucket != NULL)
2460 bucket_free(bucket);
2440 bucket_free(zone, bucket);
2461 atomic_add_long(&zone->uz_fails, 1);
2462 return (NULL);
2463 }
2464
2465 return (bucket);
2466}
2467
2468/*

--- 55 unchanged lines hidden (view full) ---

2524}
2525
2526/* See uma.h */
2527void
2528uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2529{
2530 uma_cache_t cache;
2531 uma_bucket_t bucket;
2441 atomic_add_long(&zone->uz_fails, 1);
2442 return (NULL);
2443 }
2444
2445 return (bucket);
2446}
2447
2448/*

--- 55 unchanged lines hidden (view full) ---

2504}
2505
2506/* See uma.h */
2507void
2508uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2509{
2510 uma_cache_t cache;
2511 uma_bucket_t bucket;
2532 int bflags;
2533 int cpu;
2534
2535#ifdef UMA_DEBUG_ALLOC_1
2536 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2537#endif
2538 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2539 zone->uz_name);
2540

--- 108 unchanged lines hidden (view full) ---

2649 critical_exit();
2650
2651 /* And the zone.. */
2652 ZONE_UNLOCK(zone);
2653
2654#ifdef UMA_DEBUG_ALLOC
2655 printf("uma_zfree: Allocating new free bucket.\n");
2656#endif
2512 int cpu;
2513
2514#ifdef UMA_DEBUG_ALLOC_1
2515 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2516#endif
2517 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2518 zone->uz_name);
2519

--- 108 unchanged lines hidden (view full) ---

2628 critical_exit();
2629
2630 /* And the zone.. */
2631 ZONE_UNLOCK(zone);
2632
2633#ifdef UMA_DEBUG_ALLOC
2634 printf("uma_zfree: Allocating new free bucket.\n");
2635#endif
2657 bflags = M_NOWAIT;
2658 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2659 bflags |= M_NOVM;
2660 bucket = bucket_alloc(zone->uz_count, bflags);
2636 bucket = bucket_alloc(zone, M_NOWAIT);
2661 if (bucket) {
2662 critical_enter();
2663 cpu = curcpu;
2664 cache = &zone->uz_cpu[cpu];
2665 if (cache->uc_freebucket == NULL) {
2666 cache->uc_freebucket = bucket;
2667 goto zfree_start;
2668 }
2669 /*
2670 * We lost the race, start over. We have to drop our
2671 * critical section to free the bucket.
2672 */
2673 critical_exit();
2637 if (bucket) {
2638 critical_enter();
2639 cpu = curcpu;
2640 cache = &zone->uz_cpu[cpu];
2641 if (cache->uc_freebucket == NULL) {
2642 cache->uc_freebucket = bucket;
2643 goto zfree_start;
2644 }
2645 /*
2646 * We lost the race, start over. We have to drop our
2647 * critical section to free the bucket.
2648 */
2649 critical_exit();
2674 bucket_free(bucket);
2650 bucket_free(zone, bucket);
2675 goto zfree_restart;
2676 }
2677
2678 /*
2679 * If nothing else caught this, we'll just do an internal free.
2680 */
2681zfree_item:
2682 zone_free_item(zone, item, udata, SKIP_DTOR);

--- 33 unchanged lines hidden (view full) ---

2716 void *item;
2717 uma_slab_t slab;
2718 uma_keg_t keg;
2719 uint8_t *mem;
2720 int clearfull;
2721 int i;
2722
2723 clearfull = 0;
2651 goto zfree_restart;
2652 }
2653
2654 /*
2655 * If nothing else caught this, we'll just do an internal free.
2656 */
2657zfree_item:
2658 zone_free_item(zone, item, udata, SKIP_DTOR);

--- 33 unchanged lines hidden (view full) ---

2692 void *item;
2693 uma_slab_t slab;
2694 uma_keg_t keg;
2695 uint8_t *mem;
2696 int clearfull;
2697 int i;
2698
2699 clearfull = 0;
2724 ZONE_LOCK(zone);
2725 keg = zone_first_keg(zone);
2700 keg = zone_first_keg(zone);
2701 KEG_LOCK(keg);
2726 for (i = 0; i < cnt; i++) {
2727 item = bucket[i];
2728 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2729 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2730 if (zone->uz_flags & UMA_ZONE_HASH) {
2731 slab = hash_sfind(&keg->uk_hash, mem);
2732 } else {
2733 mem += keg->uk_pgoff;

--- 19 unchanged lines hidden (view full) ---

2753 * clearing ZFLAG_FULL, wake up all procs blocked
2754 * on pages. This should be uncommon, so keeping this
2755 * simple for now (rather than adding count of blocked
2756 * threads etc).
2757 */
2758 wakeup(keg);
2759 }
2760 }
2702 for (i = 0; i < cnt; i++) {
2703 item = bucket[i];
2704 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2705 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2706 if (zone->uz_flags & UMA_ZONE_HASH) {
2707 slab = hash_sfind(&keg->uk_hash, mem);
2708 } else {
2709 mem += keg->uk_pgoff;

--- 19 unchanged lines hidden (view full) ---

2729 * clearing ZFLAG_FULL, wake up all procs blocked
2730 * on pages. This should be uncommon, so keeping this
2731 * simple for now (rather than adding count of blocked
2732 * threads etc).
2733 */
2734 wakeup(keg);
2735 }
2736 }
2761 zone_relock(zone, keg);
2737 KEG_UNLOCK(keg);
2762 if (clearfull) {
2738 if (clearfull) {
2739 ZONE_LOCK(zone);
2763 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2764 wakeup(zone);
2740 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2741 wakeup(zone);
2742 ZONE_UNLOCK(zone);
2765 }
2743 }
2766 ZONE_UNLOCK(zone);
2767
2768}
2769
2770/*
2771 * Frees a single item to any zone.
2772 *
2773 * Arguments:
2774 * zone The zone to free to

--- 27 unchanged lines hidden (view full) ---

2802int
2803uma_zone_set_max(uma_zone_t zone, int nitems)
2804{
2805 uma_keg_t keg;
2806
2807 keg = zone_first_keg(zone);
2808 if (keg == NULL)
2809 return (0);
2744
2745}
2746
2747/*
2748 * Frees a single item to any zone.
2749 *
2750 * Arguments:
2751 * zone The zone to free to

--- 27 unchanged lines hidden (view full) ---

2779int
2780uma_zone_set_max(uma_zone_t zone, int nitems)
2781{
2782 uma_keg_t keg;
2783
2784 keg = zone_first_keg(zone);
2785 if (keg == NULL)
2786 return (0);
2810 ZONE_LOCK(zone);
2787 KEG_LOCK(keg);
2811 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2812 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2813 keg->uk_maxpages += keg->uk_ppera;
2814 nitems = keg->uk_maxpages * keg->uk_ipers;
2788 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2789 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2790 keg->uk_maxpages += keg->uk_ppera;
2791 nitems = keg->uk_maxpages * keg->uk_ipers;
2815 ZONE_UNLOCK(zone);
2792 KEG_UNLOCK(keg);
2816
2817 return (nitems);
2818}
2819
2820/* See uma.h */
2821int
2822uma_zone_get_max(uma_zone_t zone)
2823{
2824 int nitems;
2825 uma_keg_t keg;
2826
2827 keg = zone_first_keg(zone);
2828 if (keg == NULL)
2829 return (0);
2793
2794 return (nitems);
2795}
2796
2797/* See uma.h */
2798int
2799uma_zone_get_max(uma_zone_t zone)
2800{
2801 int nitems;
2802 uma_keg_t keg;
2803
2804 keg = zone_first_keg(zone);
2805 if (keg == NULL)
2806 return (0);
2830 ZONE_LOCK(zone);
2807 KEG_LOCK(keg);
2831 nitems = keg->uk_maxpages * keg->uk_ipers;
2808 nitems = keg->uk_maxpages * keg->uk_ipers;
2832 ZONE_UNLOCK(zone);
2809 KEG_UNLOCK(keg);
2833
2834 return (nitems);
2835}
2836
2837/* See uma.h */
2838void
2839uma_zone_set_warning(uma_zone_t zone, const char *warning)
2840{

--- 27 unchanged lines hidden (view full) ---

2868}
2869
2870/* See uma.h */
2871void
2872uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2873{
2874 uma_keg_t keg;
2875
2810
2811 return (nitems);
2812}
2813
2814/* See uma.h */
2815void
2816uma_zone_set_warning(uma_zone_t zone, const char *warning)
2817{

--- 27 unchanged lines hidden (view full) ---

2845}
2846
2847/* See uma.h */
2848void
2849uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2850{
2851 uma_keg_t keg;
2852
2876 ZONE_LOCK(zone);
2877 keg = zone_first_keg(zone);
2878 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2853 keg = zone_first_keg(zone);
2854 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2855 KEG_LOCK(keg);
2879 KASSERT(keg->uk_pages == 0,
2880 ("uma_zone_set_init on non-empty keg"));
2881 keg->uk_init = uminit;
2856 KASSERT(keg->uk_pages == 0,
2857 ("uma_zone_set_init on non-empty keg"));
2858 keg->uk_init = uminit;
2882 ZONE_UNLOCK(zone);
2859 KEG_UNLOCK(keg);
2883}
2884
2885/* See uma.h */
2886void
2887uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2888{
2889 uma_keg_t keg;
2890
2860}
2861
2862/* See uma.h */
2863void
2864uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2865{
2866 uma_keg_t keg;
2867
2891 ZONE_LOCK(zone);
2892 keg = zone_first_keg(zone);
2893 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2868 keg = zone_first_keg(zone);
2869 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2870 KEG_LOCK(keg);
2894 KASSERT(keg->uk_pages == 0,
2895 ("uma_zone_set_fini on non-empty keg"));
2896 keg->uk_fini = fini;
2871 KASSERT(keg->uk_pages == 0,
2872 ("uma_zone_set_fini on non-empty keg"));
2873 keg->uk_fini = fini;
2897 ZONE_UNLOCK(zone);
2874 KEG_UNLOCK(keg);
2898}
2899
2900/* See uma.h */
2901void
2902uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2903{
2875}
2876
2877/* See uma.h */
2878void
2879uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2880{
2881
2904 ZONE_LOCK(zone);
2905 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2906 ("uma_zone_set_zinit on non-empty keg"));
2907 zone->uz_init = zinit;
2908 ZONE_UNLOCK(zone);
2909}
2910
2911/* See uma.h */
2912void
2913uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2914{
2882 ZONE_LOCK(zone);
2883 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2884 ("uma_zone_set_zinit on non-empty keg"));
2885 zone->uz_init = zinit;
2886 ZONE_UNLOCK(zone);
2887}
2888
2889/* See uma.h */
2890void
2891uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2892{
2893
2915 ZONE_LOCK(zone);
2916 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2917 ("uma_zone_set_zfini on non-empty keg"));
2918 zone->uz_fini = zfini;
2919 ZONE_UNLOCK(zone);
2920}
2921
2922/* See uma.h */
2923/* XXX uk_freef is not actually used with the zone locked */
2924void
2925uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2926{
2927 uma_keg_t keg;
2928
2894 ZONE_LOCK(zone);
2895 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2896 ("uma_zone_set_zfini on non-empty keg"));
2897 zone->uz_fini = zfini;
2898 ZONE_UNLOCK(zone);
2899}
2900
2901/* See uma.h */
2902/* XXX uk_freef is not actually used with the zone locked */
2903void
2904uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2905{
2906 uma_keg_t keg;
2907
2929 ZONE_LOCK(zone);
2930 keg = zone_first_keg(zone);
2931 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2908 keg = zone_first_keg(zone);
2909 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2910 KEG_LOCK(keg);
2932 keg->uk_freef = freef;
2911 keg->uk_freef = freef;
2933 ZONE_UNLOCK(zone);
2912 KEG_UNLOCK(keg);
2934}
2935
2936/* See uma.h */
2937/* XXX uk_allocf is not actually used with the zone locked */
2938void
2939uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2940{
2941 uma_keg_t keg;
2942
2913}
2914
2915/* See uma.h */
2916/* XXX uk_allocf is not actually used with the zone locked */
2917void
2918uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2919{
2920 uma_keg_t keg;
2921
2943 ZONE_LOCK(zone);
2944 keg = zone_first_keg(zone);
2922 keg = zone_first_keg(zone);
2923 KEG_LOCK(keg);
2945 keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2946 keg->uk_allocf = allocf;
2924 keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2925 keg->uk_allocf = allocf;
2947 ZONE_UNLOCK(zone);
2926 KEG_UNLOCK(keg);
2948}
2949
2950/* See uma.h */
2951int
2952uma_zone_reserve_kva(uma_zone_t zone, int count)
2953{
2954 uma_keg_t keg;
2955 vm_offset_t kva;

--- 12 unchanged lines hidden (view full) ---

2968#else
2969 if (1) {
2970#endif
2971 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2972 if (kva == 0)
2973 return (0);
2974 } else
2975 kva = 0;
2927}
2928
2929/* See uma.h */
2930int
2931uma_zone_reserve_kva(uma_zone_t zone, int count)
2932{
2933 uma_keg_t keg;
2934 vm_offset_t kva;

--- 12 unchanged lines hidden (view full) ---

2947#else
2948 if (1) {
2949#endif
2950 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2951 if (kva == 0)
2952 return (0);
2953 } else
2954 kva = 0;
2976 ZONE_LOCK(zone);
2955 KEG_LOCK(keg);
2977 keg->uk_kva = kva;
2978 keg->uk_offset = 0;
2979 keg->uk_maxpages = pages;
2980#ifdef UMA_MD_SMALL_ALLOC
2981 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
2982#else
2983 keg->uk_allocf = noobj_alloc;
2984#endif
2985 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2956 keg->uk_kva = kva;
2957 keg->uk_offset = 0;
2958 keg->uk_maxpages = pages;
2959#ifdef UMA_MD_SMALL_ALLOC
2960 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
2961#else
2962 keg->uk_allocf = noobj_alloc;
2963#endif
2964 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2986 ZONE_UNLOCK(zone);
2965 KEG_UNLOCK(keg);
2966
2987 return (1);
2988}
2989
2990/* See uma.h */
2991void
2992uma_prealloc(uma_zone_t zone, int items)
2993{
2994 int slabs;
2995 uma_slab_t slab;
2996 uma_keg_t keg;
2997
2998 keg = zone_first_keg(zone);
2999 if (keg == NULL)
3000 return;
2967 return (1);
2968}
2969
2970/* See uma.h */
2971void
2972uma_prealloc(uma_zone_t zone, int items)
2973{
2974 int slabs;
2975 uma_slab_t slab;
2976 uma_keg_t keg;
2977
2978 keg = zone_first_keg(zone);
2979 if (keg == NULL)
2980 return;
3001 ZONE_LOCK(zone);
2981 KEG_LOCK(keg);
3002 slabs = items / keg->uk_ipers;
3003 if (slabs * keg->uk_ipers < items)
3004 slabs++;
3005 while (slabs > 0) {
3006 slab = keg_alloc_slab(keg, zone, M_WAITOK);
3007 if (slab == NULL)
3008 break;
3009 MPASS(slab->us_keg == keg);
3010 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3011 slabs--;
3012 }
2982 slabs = items / keg->uk_ipers;
2983 if (slabs * keg->uk_ipers < items)
2984 slabs++;
2985 while (slabs > 0) {
2986 slab = keg_alloc_slab(keg, zone, M_WAITOK);
2987 if (slab == NULL)
2988 break;
2989 MPASS(slab->us_keg == keg);
2990 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2991 slabs--;
2992 }
3013 ZONE_UNLOCK(zone);
2993 KEG_UNLOCK(keg);
3014}
3015
3016/* See uma.h */
3017uint32_t *
3018uma_find_refcnt(uma_zone_t zone, void *item)
3019{
3020 uma_slabrefcnt_t slabref;
3021 uma_slab_t slab;

--- 346 unchanged lines hidden ---
2994}
2995
2996/* See uma.h */
2997uint32_t *
2998uma_find_refcnt(uma_zone_t zone, void *item)
2999{
3000 uma_slabrefcnt_t slabref;
3001 uma_slab_t slab;

--- 346 unchanged lines hidden ---