Deleted Added
full compact
2c2
< * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson <jeff@FreeBSD.org>
---
> * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
51c51
< __FBSDID("$FreeBSD: head/sys/vm/uma_core.c 182047 2008-08-23 12:40:07Z antoine $");
---
> __FBSDID("$FreeBSD: head/sys/vm/uma_core.c 187681 2009-01-25 09:11:24Z jeff $");
115c115
< static int uma_align_cache = 16 - 1;
---
> static int uma_align_cache = 64 - 1;
215c215
< static uma_slab_t slab_zalloc(uma_zone_t, int);
---
> static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
224,225c224,225
< static void zone_small_init(uma_zone_t zone);
< static void zone_large_init(uma_zone_t zone);
---
> static void keg_small_init(uma_keg_t keg);
> static void keg_large_init(uma_keg_t keg);
233,234c233,234
< static void *uma_zalloc_internal(uma_zone_t, void *, int);
< static void uma_zfree_internal(uma_zone_t, void *, void *, enum zfreeskip,
---
> static void *zone_alloc_item(uma_zone_t, void *, int);
> static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
241,244c241,245
< static int uma_zalloc_bucket(uma_zone_t zone, int flags);
< static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
< static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
< static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
---
> static int zone_alloc_bucket(uma_zone_t zone, int flags);
> static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
> static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
> static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
> static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
245a247,248
> static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
> static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
294c297,298
< NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
---
> NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
> UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET);
329c333
< bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags);
---
> bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
347c351
< uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
---
> zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
359a364,366
> static inline uma_keg_t
> zone_first_keg(uma_zone_t zone)
> {
360a368,379
> return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
> }
>
> static void
> zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
> {
> uma_klink_t klink;
>
> LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
> kegfn(klink->kl_keg);
> }
>
385,389c404
< * Arguments:
< * zone The zone to operate on
< *
< * Returns:
< * Nothing
---
> * Returns nothing.
392c407
< zone_timeout(uma_zone_t zone)
---
> keg_timeout(uma_keg_t keg)
394,395d408
< uma_keg_t keg;
< u_int64_t alloc;
397,399c410
< keg = zone->uz_keg;
< alloc = 0;
<
---
> KEG_LOCK(keg);
401c412
< * Expand the zone hash table.
---
> * Expand the keg hash table.
407d417
< ZONE_LOCK(zone);
416c426
< * while the zone lock is held will lead to deadlock.
---
> * while the keg lock is held will lead to deadlock.
421c431
< ZONE_UNLOCK(zone);
---
> KEG_UNLOCK(keg);
423c433
< ZONE_LOCK(zone);
---
> KEG_LOCK(keg);
431c441
< ZONE_UNLOCK(zone);
---
> KEG_UNLOCK(keg);
433c443
< ZONE_LOCK(zone);
---
> KEG_LOCK(keg);
436c446
< ZONE_UNLOCK(zone);
---
> KEG_UNLOCK(keg);
438a449,455
> static void
> zone_timeout(uma_zone_t zone)
> {
>
> zone_foreach_keg(zone, &keg_timeout);
> }
>
465c482
< hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL,
---
> hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
538c555
< uma_zfree_internal(hashzone,
---
> zone_free_item(hashzone,
558,559d574
< uma_slab_t slab;
< int mzone;
565,571d579
< slab = NULL;
< mzone = 0;
<
< /* We have to lookup the slab again for malloc.. */
< if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC)
< mzone = 1;
<
580,588c588
< /*
< * This is extremely inefficient. The slab pointer was passed
< * to uma_zfree_arg, but we lost it because the buckets don't
< * hold them. This will go away when free() gets a size passed
< * to it.
< */
< if (mzone)
< slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
< uma_zfree_internal(zone, item, slab, SKIP_DTOR, 0);
---
> zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
668c668
< * Frees pages from a zone back to the system. This is done on demand from
---
> * Frees pages from a keg back to the system. This is done on demand from
671,676c671
< * Arguments:
< * zone The zone to free pages from
< * all Should we drain all items?
< *
< * Returns:
< * Nothing.
---
> * Returns nothing.
678,679c673,674
< void
< zone_drain(uma_zone_t zone)
---
> static void
> keg_drain(uma_keg_t keg)
682d676
< uma_keg_t keg;
689,690d682
< keg = zone->uz_keg;
<
692c684
< * We don't want to take pages from statically allocated zones at this
---
> * We don't want to take pages from statically allocated kegs at this
698,699d689
< ZONE_LOCK(zone);
<
701c691
< printf("%s free items: %u\n", zone->uz_name, keg->uk_free);
---
> printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
703c693
< bucket_cache_drain(zone);
---
> KEG_LOCK(keg);
729c719
< ZONE_UNLOCK(zone);
---
> KEG_UNLOCK(keg);
741,742c731
< if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
< (keg->uk_flags & UMA_ZONE_REFCNT)) {
---
> if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
756c745
< uma_zfree_internal(keg->uk_slabzone, slab, NULL,
---
> zone_free_item(keg->uk_slabzone, slab, NULL,
760c749
< zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera);
---
> keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera);
765a755,795
> static void
> zone_drain_wait(uma_zone_t zone, int waitok)
> {
>
> /*
> * Set draining to interlock with zone_dtor() so we can release our
> * locks as we go. Only dtor() should do a WAITOK call since it
> * is the only call that knows the structure will still be available
> * when it wakes up.
> */
> ZONE_LOCK(zone);
> while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
> if (waitok == M_NOWAIT)
> goto out;
> mtx_unlock(&uma_mtx);
> msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
> mtx_lock(&uma_mtx);
> }
> zone->uz_flags |= UMA_ZFLAG_DRAINING;
> bucket_cache_drain(zone);
> ZONE_UNLOCK(zone);
> /*
> * The DRAINING flag protects us from being freed while
> * we're running. Normally the uma_mtx would protect us but we
> * must be able to release and acquire the right lock for each keg.
> */
> zone_foreach_keg(zone, &keg_drain);
> ZONE_LOCK(zone);
> zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
> wakeup(zone);
> out:
> ZONE_UNLOCK(zone);
> }
>
> void
> zone_drain(uma_zone_t zone)
> {
>
> zone_drain_wait(zone, M_NOWAIT);
> }
>
767c797
< * Allocate a new slab for a zone. This does not insert the slab onto a list.
---
> * Allocate a new slab for a keg. This does not insert the slab onto a list.
770d799
< * zone The zone to allocate slabs for
778c807
< slab_zalloc(uma_zone_t zone, int wait)
---
> keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
780a810
> uma_alloc allocf;
782d811
< uma_keg_t keg;
786a816
> mtx_assert(&keg->uk_lock, MA_OWNED);
788d817
< keg = zone->uz_keg;
791c820
< printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name);
---
> printf("slab_zalloc: Allocating a new slab for %s\n", keg->uk_name);
793c822,823
< ZONE_UNLOCK(zone);
---
> allocf = keg->uk_allocf;
> KEG_UNLOCK(keg);
796c826
< slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait);
---
> slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
798c828
< ZONE_LOCK(zone);
---
> KEG_LOCK(keg);
815,816c845,846
< mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE,
< &flags, wait);
---
> /* zone is passed for legacy reasons. */
> mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
819c849
< uma_zfree_internal(keg->uk_slabzone, slab, NULL,
---
> zone_free_item(keg->uk_slabzone, slab, NULL,
821c851
< ZONE_LOCK(zone);
---
> KEG_LOCK(keg);
829,830c859
< if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
< (keg->uk_flags & UMA_ZONE_REFCNT))
---
> if (keg->uk_flags & UMA_ZONE_VTOSLAB)
863,864c892
< if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
< (keg->uk_flags & UMA_ZONE_REFCNT)) {
---
> if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
878c906
< uma_zfree_internal(keg->uk_slabzone, slab,
---
> zone_free_item(keg->uk_slabzone, slab,
882c910
< ZONE_LOCK(zone);
---
> KEG_LOCK(keg);
886c914
< ZONE_LOCK(zone);
---
> KEG_LOCK(keg);
908c936
< keg = zone->uz_keg;
---
> keg = zone_first_keg(zone);
938d965
< * zone Unused
961d987
< * zone Unused
975a1002
> uma_keg_t keg;
977c1004,1005
< object = zone->uz_keg->uk_obj;
---
> keg = zone_first_keg(zone);
> object = keg->uk_obj;
987c1015
< zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE;
---
> zkva = keg->uk_kva + pages * PAGE_SIZE;
1055c1083
< * Finish creating a small uma zone. This calculates ipers, and the zone size.
---
> * Finish creating a small uma keg. This calculates ipers, and the keg size.
1058c1086
< * zone The zone we should initialize
---
> * keg The zone we should initialize
1064c1092
< zone_small_init(uma_zone_t zone)
---
> keg_small_init(uma_keg_t keg)
1066d1093
< uma_keg_t keg;
1072,1073c1099
< keg = zone->uz_keg;
< KASSERT(keg != NULL, ("Keg is null in zone_small_init"));
---
> KASSERT(keg != NULL, ("Keg is null in keg_small_init"));
1093c1119
< KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0"));
---
> KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0"));
1112c1138
< ("zone_small_init: keg->uk_ipers too high!"));
---
> ("keg_small_init: keg->uk_ipers too high!"));
1115c1141
< "zone: %s, calculated wastedspace = %d, "
---
> "keg: %s, calculated wastedspace = %d, "
1118c1144
< "new wasted space = %d\n", zone->uz_name, wastedspace,
---
> "new wasted space = %d\n", keg->uk_name, wastedspace,
1123c1149
< if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
---
> if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1129c1155
< * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do
---
> * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
1134c1160
< * zone The zone we should initialize
---
> * keg The keg we should initialize
1140c1166
< zone_large_init(uma_zone_t zone)
---
> keg_large_init(uma_keg_t keg)
1142d1167
< uma_keg_t keg;
1145,1147c1170
< keg = zone->uz_keg;
<
< KASSERT(keg != NULL, ("Keg is null in zone_large_init"));
---
> KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1149c1172
< ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone"));
---
> ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1161c1184
< if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
---
> if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1166a1190,1221
> static void
> keg_cachespread_init(uma_keg_t keg)
> {
> int alignsize;
> int trailer;
> int pages;
> int rsize;
>
> alignsize = keg->uk_align + 1;
> rsize = keg->uk_size;
> /*
> * We want one item to start on every align boundary in a page. To
> * do this we will span pages. We will also extend the item by the
> * size of align if it is an even multiple of align. Otherwise, it
> * would fall on the same boundary every time.
> */
> if (rsize & keg->uk_align)
> rsize = (rsize & ~keg->uk_align) + alignsize;
> if ((rsize & alignsize) == 0)
> rsize += alignsize;
> trailer = rsize - keg->uk_size;
> pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
> pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
> keg->uk_rsize = rsize;
> keg->uk_ppera = pages;
> keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
> keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
> KASSERT(keg->uk_ipers <= uma_max_ipers,
> ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
> keg->uk_ipers));
> }
>
1198c1253
< zone->uz_keg = keg;
---
> keg->uk_name = zone->uz_name;
1205a1261,1263
> if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
> keg->uk_flags |= UMA_ZONE_VTOSLAB;
>
1208c1266
< * linkage that is added to the size in zone_small_init(). If
---
> * linkage that is added to the size in keg_small_init(). If
1210c1268
< * zone_small_init() with a calculated 'ipers' of 0.
---
> * keg_small_init() with a calculated 'ipers' of 0.
1213c1271,1273
< if ((keg->uk_size+UMA_FRITMREF_SZ) >
---
> if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
> keg_cachespread_init(keg);
> else if ((keg->uk_size+UMA_FRITMREF_SZ) >
1215c1275
< zone_large_init(zone);
---
> keg_large_init(keg);
1217c1277
< zone_small_init(zone);
---
> keg_small_init(keg);
1219c1279,1281
< if ((keg->uk_size+UMA_FRITM_SZ) >
---
> if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
> keg_cachespread_init(keg);
> else if ((keg->uk_size+UMA_FRITM_SZ) >
1221c1283
< zone_large_init(zone);
---
> keg_large_init(keg);
1223c1285
< zone_small_init(zone);
---
> keg_small_init(keg);
1247,1248c1309
< * Initialize keg's lock (shared among zones) through
< * Master zone
---
> * Initialize keg's lock (shared among zones).
1250d1310
< zone->uz_lock = &keg->uk_lock;
1252c1312
< ZONE_LOCK_INIT(zone, 1);
---
> KEG_LOCK_INIT(keg, 1);
1254c1314
< ZONE_LOCK_INIT(zone, 0);
---
> KEG_LOCK_INIT(keg, 0);
1303,1306c1363,1366
< printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
< zone->uz_name, zone,
< keg->uk_size, keg->uk_ipers,
< keg->uk_ppera, keg->uk_pgoff);
---
> printf("UMA: %s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
> zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
> keg->uk_ipers, keg->uk_ppera,
> (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1323d1382
<
1335a1395
> zone->uz_slab = zone_fetch_slab;
1341a1402,1403
> zone->uz_flags = 0;
> keg = arg->keg;
1345,1346d1406
< keg = arg->keg;
< zone->uz_keg = keg;
1349a1410
> zone->uz_flags |= UMA_ZONE_SECONDARY;
1352d1412
< keg->uk_flags |= UMA_ZONE_SECONDARY;
1361,1363c1421,1423
< } else if (arg->keg == NULL) {
< if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
< arg->align, arg->flags) == NULL)
---
> } else if (keg == NULL) {
> if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
> arg->align, arg->flags)) == NULL)
1381c1441,1445
< keg = zone->uz_keg;
---
> /*
> * Link in the first keg.
> */
> zone->uz_klink.kl_keg = keg;
> LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1382a1447,1449
> zone->uz_size = keg->uk_size;
> zone->uz_flags |= (keg->uk_flags &
> (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1389c1456
< KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0,
---
> KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1416c1483
< mtx_lock(&keg->uk_lock);
---
> KEG_LOCK(keg);
1422c1489
< mtx_unlock(&keg->uk_lock);
---
> KEG_UNLOCK(keg);
1424,1425c1491
< if (keg->uk_flags & UMA_ZONE_HASH)
< hash_free(&keg->uk_hash);
---
> hash_free(&keg->uk_hash);
1427c1493
< mtx_destroy(&keg->uk_lock);
---
> KEG_LOCK_FINI(keg);
1438a1505
> uma_klink_t klink;
1443c1510
< keg = zone->uz_keg;
---
> keg = zone_first_keg(zone);
1445c1512
< if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL))
---
> if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1449,1463c1516,1539
< zone_drain(zone);
< if (keg->uk_flags & UMA_ZONE_SECONDARY) {
< LIST_REMOVE(zone, uz_link);
< /*
< * XXX there are some races here where
< * the zone can be drained but zone lock
< * released and then refilled before we
< * remove it... we dont care for now
< */
< ZONE_LOCK(zone);
< if (LIST_EMPTY(&keg->uk_zones))
< keg->uk_flags &= ~UMA_ZONE_SECONDARY;
< ZONE_UNLOCK(zone);
< mtx_unlock(&uma_mtx);
< } else {
---
> LIST_REMOVE(zone, uz_link);
> mtx_unlock(&uma_mtx);
> /*
> * XXX there are some races here where
> * the zone can be drained but zone lock
> * released and then refilled before we
> * remove it... we dont care for now
> */
> zone_drain_wait(zone, M_WAITOK);
> /*
> * Unlink all of our kegs.
> */
> while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
> klink->kl_keg = NULL;
> LIST_REMOVE(klink, kl_link);
> if (klink == &zone->uz_klink)
> continue;
> free(klink, M_TEMP);
> }
> /*
> * We only destroy kegs from non secondary zones.
> */
> if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
> mtx_lock(&uma_mtx);
1465d1540
< LIST_REMOVE(zone, uz_link);
1467c1542
< uma_zfree_internal(kegs, keg, NULL, SKIP_NONE,
---
> zone_free_item(kegs, keg, NULL, SKIP_NONE,
1470d1544
< zone->uz_keg = NULL;
1520c1594
< * which the calculated wastage in zone_small_init() will be
---
> * which the calculated wastage in keg_small_init() will be
1528c1602
< * the ipers initially calculated in zone_small_init(), we use
---
> * the ipers initially calculated in keg_small_init(), we use
1560c1634
< uma_max_ipers = UMA_SLAB_SIZE / objsize;
---
> uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64);
1573c1647
< uma_max_ipers_ref = UMA_SLAB_SIZE / objsize;
---
> uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64);
1701c1775
< static uma_zone_t
---
> static uma_keg_t
1713c1787
< return (uma_zalloc_internal(kegs, &args, M_WAITOK));
---
> return (zone_alloc_item(kegs, &args, M_WAITOK));
1744c1818
< return (uma_zalloc_internal(zones, &args, M_WAITOK));
---
> return (zone_alloc_item(zones, &args, M_WAITOK));
1752a1827
> uma_keg_t keg;
1753a1829
> keg = zone_first_keg(master);
1755c1831
< args.size = master->uz_keg->uk_size;
---
> args.size = keg->uk_size;
1760,1762c1836,1838
< args.align = master->uz_keg->uk_align;
< args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY;
< args.keg = master->uz_keg;
---
> args.align = keg->uk_align;
> args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
> args.keg = keg;
1764c1840,1841
< return (uma_zalloc_internal(zones, &args, M_WAITOK));
---
> /* XXX Attaches only one keg of potentially many. */
> return (zone_alloc_item(zones, &args, M_WAITOK));
1766a1844,1929
> static void
> zone_lock_pair(uma_zone_t a, uma_zone_t b)
> {
> if (a < b) {
> ZONE_LOCK(a);
> mtx_lock_flags(b->uz_lock, MTX_DUPOK);
> } else {
> ZONE_LOCK(b);
> mtx_lock_flags(a->uz_lock, MTX_DUPOK);
> }
> }
>
> static void
> zone_unlock_pair(uma_zone_t a, uma_zone_t b)
> {
>
> ZONE_UNLOCK(a);
> ZONE_UNLOCK(b);
> }
>
> int
> uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
> {
> uma_klink_t klink;
> uma_klink_t kl;
> int error;
>
> error = 0;
> klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
>
> zone_lock_pair(zone, master);
> /*
> * zone must use vtoslab() to resolve objects and must already be
> * a secondary.
> */
> if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
> != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
> error = EINVAL;
> goto out;
> }
> /*
> * The new master must also use vtoslab().
> */
> if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
> error = EINVAL;
> goto out;
> }
> /*
> * Both must either be refcnt, or not be refcnt.
> */
> if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
> (master->uz_flags & UMA_ZONE_REFCNT)) {
> error = EINVAL;
> goto out;
> }
> /*
> * The underlying object must be the same size. rsize
> * may be different.
> */
> if (master->uz_size != zone->uz_size) {
> error = E2BIG;
> goto out;
> }
> /*
> * Put it at the end of the list.
> */
> klink->kl_keg = zone_first_keg(master);
> LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
> if (LIST_NEXT(kl, kl_link) == NULL) {
> LIST_INSERT_AFTER(kl, klink, kl_link);
> break;
> }
> }
> klink = NULL;
> zone->uz_flags |= UMA_ZFLAG_MULTI;
> zone->uz_slab = zone_fetch_slab_multi;
>
> out:
> zone_unlock_pair(zone, master);
> if (klink != NULL)
> free(klink, M_TEMP);
>
> return (error);
> }
>
>
1772c1935
< uma_zfree_internal(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
---
> zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
1832c1995
< if (zone->uz_ctor(item, zone->uz_keg->uk_size,
---
> if (zone->uz_ctor(item, zone->uz_size,
1834c1997
< uma_zfree_internal(zone, item, udata,
---
> zone_free_item(zone, item, udata,
1841c2004
< bzero(item, zone->uz_keg->uk_size);
---
> bzero(item, zone->uz_size);
1924c2087
< if (uma_zalloc_bucket(zone, flags)) {
---
> if (zone_alloc_bucket(zone, flags)) {
1936c2099,2100
< return (uma_zalloc_internal(zone, udata, flags));
---
> item = zone_alloc_item(zone, udata, flags);
> return (item);
1940c2104
< uma_zone_slab(uma_zone_t zone, int flags)
---
> keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
1943d2106
< uma_keg_t keg;
1945,1968c2108
< keg = zone->uz_keg;
<
< /*
< * This is to prevent us from recursively trying to allocate
< * buckets. The problem is that if an allocation forces us to
< * grab a new bucket we will call page_alloc, which will go off
< * and cause the vm to allocate vm_map_entries. If we need new
< * buckets there too we will recurse in kmem_alloc and bad
< * things happen. So instead we return a NULL bucket, and make
< * the code that allocates buckets smart enough to deal with it
< *
< * XXX: While we want this protection for the bucket zones so that
< * recursion from the VM is handled (and the calling code that
< * allocates buckets knows how to deal with it), we do not want
< * to prevent allocation from the slab header zones (slabzone
< * and slabrefzone) if uk_recurse is not zero for them. The
< * reason is that it could lead to NULL being returned for
< * slab header allocations even in the M_WAITOK case, and the
< * caller can't handle that.
< */
< if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0)
< if (zone != slabzone && zone != slabrefzone && zone != zones)
< return (NULL);
<
---
> mtx_assert(&keg->uk_lock, MA_OWNED);
1985a2126
> MPASS(slab->us_keg == keg);
1995,1996c2136
< if (keg->uk_maxpages &&
< keg->uk_pages >= keg->uk_maxpages) {
---
> if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
1998c2138,2143
<
---
> /*
> * If this is not a multi-zone, set the FULL bit.
> * Otherwise slab_multi() takes care of it.
> */
> if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0)
> zone->uz_flags |= UMA_ZFLAG_FULL;
2001,2003c2146
< else
< msleep(keg, &keg->uk_lock, PVM,
< "zonelimit", 0);
---
> msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2007c2150
< slab = slab_zalloc(zone, flags);
---
> slab = keg_alloc_slab(keg, zone, flags);
2009d2151
<
2015a2158
> MPASS(slab->us_keg == keg);
2024,2025c2167
< if (flags & M_NOWAIT)
< flags |= M_NOVM;
---
> flags |= M_NOVM;
2029a2172,2295
> static inline void
> zone_relock(uma_zone_t zone, uma_keg_t keg)
> {
> if (zone->uz_lock != &keg->uk_lock) {
> KEG_UNLOCK(keg);
> ZONE_LOCK(zone);
> }
> }
>
> static inline void
> keg_relock(uma_keg_t keg, uma_zone_t zone)
> {
> if (zone->uz_lock != &keg->uk_lock) {
> ZONE_UNLOCK(zone);
> KEG_LOCK(keg);
> }
> }
>
> static uma_slab_t
> zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
> {
> uma_slab_t slab;
>
> if (keg == NULL)
> keg = zone_first_keg(zone);
> /*
> * This is to prevent us from recursively trying to allocate
> * buckets. The problem is that if an allocation forces us to
> * grab a new bucket we will call page_alloc, which will go off
> * and cause the vm to allocate vm_map_entries. If we need new
> * buckets there too we will recurse in kmem_alloc and bad
> * things happen. So instead we return a NULL bucket, and make
> * the code that allocates buckets smart enough to deal with it
> */
> if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0)
> return (NULL);
>
> for (;;) {
> slab = keg_fetch_slab(keg, zone, flags);
> if (slab)
> return (slab);
> if (flags & (M_NOWAIT | M_NOVM))
> break;
> }
> return (NULL);
> }
>
> /*
> * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
> * with the keg locked. Caller must call zone_relock() afterwards if the
> * zone lock is required. On NULL the zone lock is held.
> *
> * The last pointer is used to seed the search. It is not required.
> */
> static uma_slab_t
> zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
> {
> uma_klink_t klink;
> uma_slab_t slab;
> uma_keg_t keg;
> int flags;
> int empty;
> int full;
>
> /*
> * Don't wait on the first pass. This will skip limit tests
> * as well. We don't want to block if we can find a provider
> * without blocking.
> */
> flags = (rflags & ~M_WAITOK) | M_NOWAIT;
> /*
> * Use the last slab allocated as a hint for where to start
> * the search.
> */
> if (last) {
> slab = keg_fetch_slab(last, zone, flags);
> if (slab)
> return (slab);
> zone_relock(zone, last);
> last = NULL;
> }
> /*
> * Loop until we have a slab incase of transient failures
> * while M_WAITOK is specified. I'm not sure this is 100%
> * required but we've done it for so long now.
> */
> for (;;) {
> empty = 0;
> full = 0;
> /*
> * Search the available kegs for slabs. Be careful to hold the
> * correct lock while calling into the keg layer.
> */
> LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
> keg = klink->kl_keg;
> keg_relock(keg, zone);
> if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
> slab = keg_fetch_slab(keg, zone, flags);
> if (slab)
> return (slab);
> }
> if (keg->uk_flags & UMA_ZFLAG_FULL)
> full++;
> else
> empty++;
> zone_relock(zone, keg);
> }
> if (rflags & (M_NOWAIT | M_NOVM))
> break;
> flags = rflags;
> /*
> * All kegs are full. XXX We can't atomically check all kegs
> * and sleep so just sleep for a short period and retry.
> */
> if (full && !empty) {
> zone->uz_flags |= UMA_ZFLAG_FULL;
> msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
> zone->uz_flags &= ~UMA_ZFLAG_FULL;
> continue;
> }
> }
> return (NULL);
> }
>
2031c2297
< uma_slab_alloc(uma_zone_t zone, uma_slab_t slab)
---
> slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
2038c2304,2305
< keg = zone->uz_keg;
---
> keg = slab->us_keg;
> mtx_assert(&keg->uk_lock, MA_OWNED);
2064c2331
< uma_zalloc_bucket(uma_zone_t zone, int flags)
---
> zone_alloc_bucket(uma_zone_t zone, int flags)
2067a2335
> uma_keg_t keg;
2076c2344
< ("uma_zalloc_bucket: Bucket on free list is not empty."));
---
> ("zone_alloc_bucket: Bucket on free list is not empty."));
2082c2350
< if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY)
---
> if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2090c2358
< if (bucket == NULL)
---
> if (bucket == NULL) {
2091a2360
> }
2107a2377,2378
> slab = NULL;
> keg = NULL;
2109c2380,2381
< (slab = uma_zone_slab(zone, flags)) != NULL) {
---
> (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
> keg = slab->us_keg;
2112c2384
< uma_slab_alloc(zone, slab);
---
> slab_alloc_item(zone, slab);
2117a2390,2391
> if (slab)
> zone_relock(zone, keg);
2131,2132c2405,2406
< if (zone->uz_init(bucket->ub_bucket[i],
< zone->uz_keg->uk_size, origflags) != 0)
---
> if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
> origflags) != 0)
2142c2416
< uma_zfree_internal(zone, bucket->ub_bucket[j],
---
> zone_free_item(zone, bucket->ub_bucket[j],
2180c2454
< uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
---
> zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2182d2455
< uma_keg_t keg;
2187d2459
< keg = zone->uz_keg;
2194c2466
< slab = uma_zone_slab(zone, flags);
---
> slab = zone->uz_slab(zone, NULL, flags);
2201c2473
< item = uma_slab_alloc(zone, slab);
---
> item = slab_alloc_item(zone, slab);
2202a2475
> zone_relock(zone, slab->us_keg);
2204d2476
<
2214,2215c2486,2487
< if (zone->uz_init(item, keg->uk_size, flags) != 0) {
< uma_zfree_internal(zone, item, udata, SKIP_FINI,
---
> if (zone->uz_init(item, zone->uz_size, flags) != 0) {
> zone_free_item(zone, item, udata, SKIP_FINI,
2221,2222c2493,2494
< if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) {
< uma_zfree_internal(zone, item, udata, SKIP_DTOR,
---
> if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
> zone_free_item(zone, item, udata, SKIP_DTOR,
2228c2500
< bzero(item, keg->uk_size);
---
> bzero(item, zone->uz_size);
2237d2508
< uma_keg_t keg;
2243,2244d2513
< keg = zone->uz_keg;
<
2252c2521,2522
< zone->uz_dtor(item, keg->uk_size, udata);
---
> zone->uz_dtor(item, zone->uz_size, udata);
>
2255c2525
< if (keg->uk_flags & UMA_ZONE_MALLOC)
---
> if (zone->uz_flags & UMA_ZONE_MALLOC)
2265c2535
< if (keg->uk_flags & UMA_ZFLAG_FULL)
---
> if (zone->uz_flags & UMA_ZFLAG_FULL)
2387c2657
< if (keg->uk_flags & UMA_ZFLAG_CACHEONLY)
---
> if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2402c2672
< uma_zfree_internal(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
---
> zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
2417c2687
< uma_zfree_internal(uma_zone_t zone, void *item, void *udata,
---
> zone_free_item(uma_zone_t zone, void *item, void *udata,
2424a2695
> int clearfull;
2426,2427d2696
< keg = zone->uz_keg;
<
2429c2698,2699
< zone->uz_dtor(item, keg->uk_size, udata);
---
> zone->uz_dtor(item, zone->uz_size, udata);
>
2431c2701
< zone->uz_fini(item, keg->uk_size);
---
> zone->uz_fini(item, zone->uz_size);
2440c2710
< if (!(keg->uk_flags & UMA_ZONE_MALLOC)) {
---
> if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2442c2712,2713
< if (keg->uk_flags & UMA_ZONE_HASH)
---
> keg = zone_first_keg(zone); /* Must only be one. */
> if (zone->uz_flags & UMA_ZONE_HASH) {
2444c2715
< else {
---
> } else {
2449c2720,2726
< slab = (uma_slab_t)udata;
---
> /* This prevents redundant lookups via free(). */
> if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL)
> slab = (uma_slab_t)udata;
> else
> slab = vtoslab((vm_offset_t)item);
> keg = slab->us_keg;
> keg_relock(keg, zone);
2450a2728
> MPASS(keg == slab->us_keg);
2481a2760
> clearfull = 0;
2483c2762
< if (keg->uk_pages < keg->uk_maxpages)
---
> if (keg->uk_pages < keg->uk_maxpages) {
2484a2764,2765
> clearfull = 1;
> }
2494,2495c2775,2781
<
< ZONE_UNLOCK(zone);
---
> if (clearfull) {
> zone_relock(zone, keg);
> zone->uz_flags &= ~UMA_ZFLAG_FULL;
> wakeup(zone);
> ZONE_UNLOCK(zone);
> } else
> KEG_UNLOCK(keg);
2504d2789
< keg = zone->uz_keg;
2506,2510c2791,2792
< if (keg->uk_ppera > 1)
< keg->uk_maxpages = nitems * keg->uk_ppera;
< else
< keg->uk_maxpages = nitems / keg->uk_ipers;
<
---
> keg = zone_first_keg(zone);
> keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2512c2794
< keg->uk_maxpages++;
---
> keg->uk_maxpages += keg->uk_ppera;
2520a2803,2804
> uma_keg_t keg;
>
2522c2806,2807
< KASSERT(zone->uz_keg->uk_pages == 0,
---
> keg = zone_first_keg(zone);
> KASSERT(keg->uk_pages == 0,
2524c2809
< zone->uz_keg->uk_init = uminit;
---
> keg->uk_init = uminit;
2531a2817,2818
> uma_keg_t keg;
>
2533c2820,2821
< KASSERT(zone->uz_keg->uk_pages == 0,
---
> keg = zone_first_keg(zone);
> KASSERT(keg->uk_pages == 0,
2535c2823
< zone->uz_keg->uk_fini = fini;
---
> keg->uk_fini = fini;
2544c2832
< KASSERT(zone->uz_keg->uk_pages == 0,
---
> KASSERT(zone_first_keg(zone)->uk_pages == 0,
2555c2843
< KASSERT(zone->uz_keg->uk_pages == 0,
---
> KASSERT(zone_first_keg(zone)->uk_pages == 0,
2565a2854
>
2567c2856
< zone->uz_keg->uk_freef = freef;
---
> zone_first_keg(zone)->uk_freef = freef;
2575a2865,2866
> uma_keg_t keg;
>
2577,2578c2868,2870
< zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
< zone->uz_keg->uk_allocf = allocf;
---
> keg = zone_first_keg(zone);
> keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
> keg->uk_allocf = allocf;
2590c2882
< keg = zone->uz_keg;
---
> keg = zone_first_keg(zone);
2626c2918
< keg = zone->uz_keg;
---
> keg = zone_first_keg(zone);
2632c2924,2927
< slab = slab_zalloc(zone, M_WAITOK);
---
> slab = keg_alloc_slab(keg, zone, M_WAITOK);
> if (slab == NULL)
> break;
> MPASS(slab->us_keg == keg);
2648d2942
< keg = zone->uz_keg;
2650a2945
> keg = slabref->us_keg;
2685c2980
< full = (zone->uz_keg->uk_flags & UMA_ZFLAG_FULL);
---
> full = (zone->uz_flags & UMA_ZFLAG_FULL);
2693c2988
< return (zone->uz_keg->uk_flags & UMA_ZFLAG_FULL);
---
> return (zone->uz_flags & UMA_ZFLAG_FULL);
2703c2998
< slab = uma_zalloc_internal(slabzone, NULL, wait);
---
> slab = zone_alloc_item(slabzone, NULL, wait);
2713c3008
< uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE,
---
> zone_free_item(slabzone, slab, NULL, SKIP_NONE,
2725c3020
< uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
---
> zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
2752,2753c3047,3048
< void
< uma_print_zone(uma_zone_t zone)
---
> static void
> uma_print_keg(uma_keg_t keg)
2755,2756d3049
< uma_cache_t cache;
< uma_keg_t keg;
2758d3050
< int i;
2760,2762c3052,3054
< keg = zone->uz_keg;
< printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
< zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
---
> printf("keg: %s(%p) size %d(%d) flags %d ipers %d ppera %d "
> "out %d free %d limit %d\n",
> keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
2764c3056,3057
< (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
---
> (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
> (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
2773a3067,3079
> }
>
> void
> uma_print_zone(uma_zone_t zone)
> {
> uma_cache_t cache;
> uma_klink_t kl;
> int i;
>
> printf("zone: %s(%p) size %d flags %d\n",
> zone->uz_name, zone, zone->uz_size, zone->uz_flags);
> LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
> uma_print_keg(kl->kl_keg);
2852a3159
> uma_klink_t kl;
2854a3162
> uma_keg_t k;
2905,2906d3212
< uth.uth_pages = kz->uk_pages;
< uth.uth_keg_free = kz->uk_free;
2909,2915c3215,3222
< uth.uth_maxpages = kz->uk_maxpages;
< if (kz->uk_ppera > 1)
< uth.uth_limit = kz->uk_maxpages /
< kz->uk_ppera;
< else
< uth.uth_limit = kz->uk_maxpages *
< kz->uk_ipers;
---
> LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
> k = kl->kl_keg;
> uth.uth_maxpages += k->uk_maxpages;
> uth.uth_pages += k->uk_pages;
> uth.uth_keg_free += k->uk_free;
> uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
> * k->uk_ipers;
> }
2921c3228
< if ((kz->uk_flags & UMA_ZONE_SECONDARY) &&
---
> if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
2998c3305
< if (!((kz->uk_flags & UMA_ZONE_SECONDARY) &&
---
> if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&