Deleted Added
full compact
23c23
< * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
---
> * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
46a47
> #include <sys/cityhash.h>
2338c2339,2340
< VERIFY(refcount_held(&mc->mc_alloc_slots, pio));
---
> VERIFY(refcount_held(&mc->mc_alloc_slots[pio->io_allocator],
> pio));
2349c2351
< pio, flags));
---
> pio->io_allocator, pio, flags));
2354c2356
< &pio->io_alloc_list, pio);
---
> &pio->io_alloc_list, pio, pio->io_allocator);
2368c2370
< gbh_copies - copies, pio);
---
> gbh_copies - copies, pio->io_allocator, pio);
2426c2428
< zp.zp_copies, cio, flags));
---
> zp.zp_copies, cio->io_allocator, cio, flags));
2916c2918
< zio_io_to_allocate(spa_t *spa)
---
> zio_io_to_allocate(spa_t *spa, int allocator)
2920c2922
< ASSERT(MUTEX_HELD(&spa->spa_alloc_lock));
---
> ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator]));
2922c2924
< zio = avl_first(&spa->spa_alloc_tree);
---
> zio = avl_first(&spa->spa_alloc_trees[allocator]);
2931a2934
> ASSERT3U(zio->io_allocator, ==, allocator);
2933c2936
< zio->io_prop.zp_copies, zio, 0)) {
---
> zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) {
2937c2940
< avl_remove(&spa->spa_alloc_tree, zio);
---
> avl_remove(&spa->spa_alloc_trees[allocator], zio);
2961c2964,2974
< mutex_enter(&spa->spa_alloc_lock);
---
> zbookmark_phys_t *bm = &zio->io_bookmark;
> /*
> * We want to try to use as many allocators as possible to help improve
> * performance, but we also want logically adjacent IOs to be physically
> * adjacent to improve sequential read performance. We chunk each object
> * into 2^20 block regions, and then hash based on the objset, object,
> * level, and region to accomplish both of these goals.
> */
> zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object,
> bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
> mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]);
2964c2977
< avl_add(&spa->spa_alloc_tree, zio);
---
> avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio);
2966,2967c2979,2980
< nio = zio_io_to_allocate(zio->io_spa);
< mutex_exit(&spa->spa_alloc_lock);
---
> nio = zio_io_to_allocate(zio->io_spa, zio->io_allocator);
> mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]);
2988c3001
< zio_allocate_dispatch(spa_t *spa)
---
> zio_allocate_dispatch(spa_t *spa, int allocator)
2992,2994c3005,3007
< mutex_enter(&spa->spa_alloc_lock);
< zio = zio_io_to_allocate(spa);
< mutex_exit(&spa->spa_alloc_lock);
---
> mutex_enter(&spa->spa_alloc_locks[allocator]);
> zio = zio_io_to_allocate(spa, allocator);
> mutex_exit(&spa->spa_alloc_locks[allocator]);
3035c3048
< &zio->io_alloc_list, zio);
---
> &zio->io_alloc_list, zio, zio->io_allocator);
3095,3096c3108,3109
< zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp,
< uint64_t size, boolean_t *slog)
---
> zio_alloc_zil(spa_t *spa, uint64_t objset, uint64_t txg, blkptr_t *new_bp,
> blkptr_t *old_bp, uint64_t size, boolean_t *slog)
3103a3117,3122
> /*
> * When allocating a zil block, we don't have information about
> * the final destination of the block except the objset it's part
> * of, so we just hash the objset ID to pick the allocator to get
> * some parallelism.
> */
3105c3124,3125
< txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL);
---
> txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL,
> cityhash4(0, 0, 0, objset) % spa->spa_alloc_count);
3111c3131,3132
< &io_alloc_list, NULL);
---
> &io_alloc_list, NULL, cityhash4(0, 0, 0, objset) %
> spa->spa_alloc_count);
3681,3682c3702,3703
< zio->io_prop.zp_copies, zio);
< zio_allocate_dispatch(zio->io_spa);
---
> zio->io_prop.zp_copies, zio->io_allocator, zio);
> zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
3765c3786,3787
< metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags);
---
> metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
> pio->io_allocator, B_TRUE);
3769c3791
< 1, pio);
---
> 1, pio->io_allocator, pio);
3776c3798
< zio_allocate_dispatch(zio->io_spa);
---
> zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
3819,3820c3841,3844
< metaslab_group_alloc_verify(spa, zio->io_bp, zio);
< VERIFY(refcount_not_held(&mc->mc_alloc_slots, zio));
---
> metaslab_group_alloc_verify(spa, zio->io_bp, zio,
> zio->io_allocator);
> VERIFY(refcount_not_held(&mc->mc_alloc_slots[zio->io_allocator],
> zio));