Deleted Added
full compact
metaslab.c (208370) metaslab.c (209962)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 5 unchanged lines hidden (view full) ---

14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 5 unchanged lines hidden (view full) ---

14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/spa_impl.h>
28#include <sys/dmu.h>
29#include <sys/dmu_tx.h>
30#include <sys/space_map.h>
31#include <sys/metaslab_impl.h>
32#include <sys/vdev_impl.h>
33#include <sys/zio.h>
34
35uint64_t metaslab_aliquot = 512ULL << 10;
36uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
37
38/*
23 * Use is subject to license terms.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/spa_impl.h>
28#include <sys/dmu.h>
29#include <sys/dmu_tx.h>
30#include <sys/space_map.h>
31#include <sys/metaslab_impl.h>
32#include <sys/vdev_impl.h>
33#include <sys/zio.h>
34
35uint64_t metaslab_aliquot = 512ULL << 10;
36uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
37
38/*
39 * Minimum size which forces the dynamic allocator to change
40 * it's allocation strategy. Once the space map cannot satisfy
41 * an allocation of this size then it switches to using more
42 * aggressive strategy (i.e search by size rather than offset).
43 */
44uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
45
46/*
47 * The minimum free space, in percent, which must be available
48 * in a space map to continue allocations in a first-fit fashion.
49 * Once the space_map's free space drops below this level we dynamically
50 * switch to using best-fit allocations.
51 */
52int metaslab_df_free_pct = 30;
53
54/*
39 * ==========================================================================
40 * Metaslab classes
41 * ==========================================================================
42 */
43metaslab_class_t *
55 * ==========================================================================
56 * Metaslab classes
57 * ==========================================================================
58 */
59metaslab_class_t *
44metaslab_class_create(void)
60metaslab_class_create(space_map_ops_t *ops)
45{
46 metaslab_class_t *mc;
47
48 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
49
50 mc->mc_rotor = NULL;
61{
62 metaslab_class_t *mc;
63
64 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
65
66 mc->mc_rotor = NULL;
67 mc->mc_ops = ops;
51
52 return (mc);
53}
54
55void
56metaslab_class_destroy(metaslab_class_t *mc)
57{
58 metaslab_group_t *mg;

--- 138 unchanged lines hidden (view full) ---

197 ASSERT(msp->ms_group == mg);
198 avl_remove(&mg->mg_metaslab_tree, msp);
199 msp->ms_weight = weight;
200 avl_add(&mg->mg_metaslab_tree, msp);
201 mutex_exit(&mg->mg_lock);
202}
203
204/*
68
69 return (mc);
70}
71
72void
73metaslab_class_destroy(metaslab_class_t *mc)
74{
75 metaslab_group_t *mg;

--- 138 unchanged lines hidden (view full) ---

214 ASSERT(msp->ms_group == mg);
215 avl_remove(&mg->mg_metaslab_tree, msp);
216 msp->ms_weight = weight;
217 avl_add(&mg->mg_metaslab_tree, msp);
218 mutex_exit(&mg->mg_lock);
219}
220
221/*
205 * ==========================================================================
206 * The first-fit block allocator
207 * ==========================================================================
222 * This is a helper function that can be used by the allocator to find
223 * a suitable block to allocate. This will search the specified AVL
224 * tree looking for a block that matches the specified criteria.
208 */
225 */
209static void
210metaslab_ff_load(space_map_t *sm)
211{
212 ASSERT(sm->sm_ppd == NULL);
213 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
214}
215
216static void
217metaslab_ff_unload(space_map_t *sm)
218{
219 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
220 sm->sm_ppd = NULL;
221}
222
223static uint64_t
226static uint64_t
224metaslab_ff_alloc(space_map_t *sm, uint64_t size)
227metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
228 uint64_t align)
225{
229{
226 avl_tree_t *t = &sm->sm_root;
227 uint64_t align = size & -size;
228 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
229 space_seg_t *ss, ssearch;
230 avl_index_t where;
231
232 ssearch.ss_start = *cursor;
233 ssearch.ss_end = *cursor + size;
234
235 ss = avl_find(t, &ssearch, &where);
236 if (ss == NULL)

--- 12 unchanged lines hidden (view full) ---

249 /*
250 * If we know we've searched the whole map (*cursor == 0), give up.
251 * Otherwise, reset the cursor to the beginning and try again.
252 */
253 if (*cursor == 0)
254 return (-1ULL);
255
256 *cursor = 0;
230 space_seg_t *ss, ssearch;
231 avl_index_t where;
232
233 ssearch.ss_start = *cursor;
234 ssearch.ss_end = *cursor + size;
235
236 ss = avl_find(t, &ssearch, &where);
237 if (ss == NULL)

--- 12 unchanged lines hidden (view full) ---

250 /*
251 * If we know we've searched the whole map (*cursor == 0), give up.
252 * Otherwise, reset the cursor to the beginning and try again.
253 */
254 if (*cursor == 0)
255 return (-1ULL);
256
257 *cursor = 0;
257 return (metaslab_ff_alloc(sm, size));
258 return (metaslab_block_picker(t, cursor, size, align));
258}
259
259}
260
261/*
262 * ==========================================================================
263 * The first-fit block allocator
264 * ==========================================================================
265 */
266static void
267metaslab_ff_load(space_map_t *sm)
268{
269 ASSERT(sm->sm_ppd == NULL);
270 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
271 sm->sm_pp_root = NULL;
272}
273
274static void
275metaslab_ff_unload(space_map_t *sm)
276{
277 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
278 sm->sm_ppd = NULL;
279}
280
281static uint64_t
282metaslab_ff_alloc(space_map_t *sm, uint64_t size)
283{
284 avl_tree_t *t = &sm->sm_root;
285 uint64_t align = size & -size;
286 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
287
288 return (metaslab_block_picker(t, cursor, size, align));
289}
290
260/* ARGSUSED */
261static void
262metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size)
263{
264 /* No need to update cursor */
265}
266
267/* ARGSUSED */
268static void
269metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size)
270{
271 /* No need to update cursor */
272}
273
274static space_map_ops_t metaslab_ff_ops = {
275 metaslab_ff_load,
276 metaslab_ff_unload,
277 metaslab_ff_alloc,
278 metaslab_ff_claim,
291/* ARGSUSED */
292static void
293metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size)
294{
295 /* No need to update cursor */
296}
297
298/* ARGSUSED */
299static void
300metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size)
301{
302 /* No need to update cursor */
303}
304
305static space_map_ops_t metaslab_ff_ops = {
306 metaslab_ff_load,
307 metaslab_ff_unload,
308 metaslab_ff_alloc,
309 metaslab_ff_claim,
279 metaslab_ff_free
310 metaslab_ff_free,
311 NULL /* maxsize */
280};
281
282/*
312};
313
314/*
315 * Dynamic block allocator -
316 * Uses the first fit allocation scheme until space get low and then
317 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
318 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
319 */
320
321uint64_t
322metaslab_df_maxsize(space_map_t *sm)
323{
324 avl_tree_t *t = sm->sm_pp_root;
325 space_seg_t *ss;
326
327 if (t == NULL || (ss = avl_last(t)) == NULL)
328 return (0ULL);
329
330 return (ss->ss_end - ss->ss_start);
331}
332
333static int
334metaslab_df_seg_compare(const void *x1, const void *x2)
335{
336 const space_seg_t *s1 = x1;
337 const space_seg_t *s2 = x2;
338 uint64_t ss_size1 = s1->ss_end - s1->ss_start;
339 uint64_t ss_size2 = s2->ss_end - s2->ss_start;
340
341 if (ss_size1 < ss_size2)
342 return (-1);
343 if (ss_size1 > ss_size2)
344 return (1);
345
346 if (s1->ss_start < s2->ss_start)
347 return (-1);
348 if (s1->ss_start > s2->ss_start)
349 return (1);
350
351 return (0);
352}
353
354static void
355metaslab_df_load(space_map_t *sm)
356{
357 space_seg_t *ss;
358
359 ASSERT(sm->sm_ppd == NULL);
360 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
361
362 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
363 avl_create(sm->sm_pp_root, metaslab_df_seg_compare,
364 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
365
366 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
367 avl_add(sm->sm_pp_root, ss);
368}
369
370static void
371metaslab_df_unload(space_map_t *sm)
372{
373 void *cookie = NULL;
374
375 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
376 sm->sm_ppd = NULL;
377
378 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
379 /* tear down the tree */
380 }
381
382 avl_destroy(sm->sm_pp_root);
383 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
384 sm->sm_pp_root = NULL;
385}
386
387static uint64_t
388metaslab_df_alloc(space_map_t *sm, uint64_t size)
389{
390 avl_tree_t *t = &sm->sm_root;
391 uint64_t align = size & -size;
392 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
393 uint64_t max_size = metaslab_df_maxsize(sm);
394 int free_pct = sm->sm_space * 100 / sm->sm_size;
395
396 ASSERT(MUTEX_HELD(sm->sm_lock));
397 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
398
399 if (max_size < size)
400 return (-1ULL);
401
402 /*
403 * If we're running low on space switch to using the size
404 * sorted AVL tree (best-fit).
405 */
406 if (max_size < metaslab_df_alloc_threshold ||
407 free_pct < metaslab_df_free_pct) {
408 t = sm->sm_pp_root;
409 *cursor = 0;
410 }
411
412 return (metaslab_block_picker(t, cursor, size, 1ULL));
413}
414
415/* ARGSUSED */
416static void
417metaslab_df_claim(space_map_t *sm, uint64_t start, uint64_t size)
418{
419 /* No need to update cursor */
420}
421
422/* ARGSUSED */
423static void
424metaslab_df_free(space_map_t *sm, uint64_t start, uint64_t size)
425{
426 /* No need to update cursor */
427}
428
429static space_map_ops_t metaslab_df_ops = {
430 metaslab_df_load,
431 metaslab_df_unload,
432 metaslab_df_alloc,
433 metaslab_df_claim,
434 metaslab_df_free,
435 metaslab_df_maxsize
436};
437
438space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
439
440/*
283 * ==========================================================================
284 * Metaslabs
285 * ==========================================================================
286 */
287metaslab_t *
288metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
289 uint64_t start, uint64_t size, uint64_t txg)
290{

--- 118 unchanged lines hidden (view full) ---

409 * make it preferable to any inactive metaslab so we'll polish it off.
410 */
411 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
412
413 return (weight);
414}
415
416static int
441 * ==========================================================================
442 * Metaslabs
443 * ==========================================================================
444 */
445metaslab_t *
446metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
447 uint64_t start, uint64_t size, uint64_t txg)
448{

--- 118 unchanged lines hidden (view full) ---

567 * make it preferable to any inactive metaslab so we'll polish it off.
568 */
569 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
570
571 return (weight);
572}
573
574static int
417metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
575metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size)
418{
419 space_map_t *sm = &msp->ms_map;
576{
577 space_map_t *sm = &msp->ms_map;
578 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops;
420
421 ASSERT(MUTEX_HELD(&msp->ms_lock));
422
423 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
579
580 ASSERT(MUTEX_HELD(&msp->ms_lock));
581
582 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
424 int error = space_map_load(sm, &metaslab_ff_ops,
425 SM_FREE, &msp->ms_smo,
583 int error = space_map_load(sm, sm_ops, SM_FREE, &msp->ms_smo,
426 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset);
427 if (error) {
428 metaslab_group_sort(msp->ms_group, msp, 0);
429 return (error);
430 }
584 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset);
585 if (error) {
586 metaslab_group_sort(msp->ms_group, msp, 0);
587 return (error);
588 }
589
590 /*
591 * If we were able to load the map then make sure
592 * that this map is still able to satisfy our request.
593 */
594 if (msp->ms_weight < size)
595 return (ENOSPC);
596
431 metaslab_group_sort(msp->ms_group, msp,
432 msp->ms_weight | activation_weight);
433 }
434 ASSERT(sm->sm_loaded);
435 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
436
437 return (0);
438}

--- 192 unchanged lines hidden (view full) ---

631 metaslab_t *msp = NULL;
632 uint64_t offset = -1ULL;
633 avl_tree_t *t = &mg->mg_metaslab_tree;
634 uint64_t activation_weight;
635 uint64_t target_distance;
636 int i;
637
638 activation_weight = METASLAB_WEIGHT_PRIMARY;
597 metaslab_group_sort(msp->ms_group, msp,
598 msp->ms_weight | activation_weight);
599 }
600 ASSERT(sm->sm_loaded);
601 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
602
603 return (0);
604}

--- 192 unchanged lines hidden (view full) ---

797 metaslab_t *msp = NULL;
798 uint64_t offset = -1ULL;
799 avl_tree_t *t = &mg->mg_metaslab_tree;
800 uint64_t activation_weight;
801 uint64_t target_distance;
802 int i;
803
804 activation_weight = METASLAB_WEIGHT_PRIMARY;
639 for (i = 0; i < d; i++)
640 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id)
805 for (i = 0; i < d; i++) {
806 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
641 activation_weight = METASLAB_WEIGHT_SECONDARY;
807 activation_weight = METASLAB_WEIGHT_SECONDARY;
808 break;
809 }
810 }
642
643 for (;;) {
811
812 for (;;) {
813 boolean_t was_active;
814
644 mutex_enter(&mg->mg_lock);
645 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
646 if (msp->ms_weight < size) {
647 mutex_exit(&mg->mg_lock);
648 return (-1ULL);
649 }
650
815 mutex_enter(&mg->mg_lock);
816 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
817 if (msp->ms_weight < size) {
818 mutex_exit(&mg->mg_lock);
819 return (-1ULL);
820 }
821
822 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
651 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
652 break;
653
654 target_distance = min_distance +
655 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
656
657 for (i = 0; i < d; i++)
658 if (metaslab_distance(msp, &dva[i]) <

--- 9 unchanged lines hidden (view full) ---

668 mutex_enter(&msp->ms_lock);
669
670 /*
671 * Ensure that the metaslab we have selected is still
672 * capable of handling our request. It's possible that
673 * another thread may have changed the weight while we
674 * were blocked on the metaslab lock.
675 */
823 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
824 break;
825
826 target_distance = min_distance +
827 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
828
829 for (i = 0; i < d; i++)
830 if (metaslab_distance(msp, &dva[i]) <

--- 9 unchanged lines hidden (view full) ---

840 mutex_enter(&msp->ms_lock);
841
842 /*
843 * Ensure that the metaslab we have selected is still
844 * capable of handling our request. It's possible that
845 * another thread may have changed the weight while we
846 * were blocked on the metaslab lock.
847 */
676 if (msp->ms_weight < size) {
848 if (msp->ms_weight < size || (was_active &&
849 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
850 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
677 mutex_exit(&msp->ms_lock);
678 continue;
679 }
680
681 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
682 activation_weight == METASLAB_WEIGHT_PRIMARY) {
683 metaslab_passivate(msp,
684 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
685 mutex_exit(&msp->ms_lock);
686 continue;
687 }
688
851 mutex_exit(&msp->ms_lock);
852 continue;
853 }
854
855 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
856 activation_weight == METASLAB_WEIGHT_PRIMARY) {
857 metaslab_passivate(msp,
858 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
859 mutex_exit(&msp->ms_lock);
860 continue;
861 }
862
689 if (metaslab_activate(msp, activation_weight) != 0) {
863 if (metaslab_activate(msp, activation_weight, size) != 0) {
690 mutex_exit(&msp->ms_lock);
691 continue;
692 }
693
694 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL)
695 break;
696
697 metaslab_passivate(msp, size - 1);

--- 17 unchanged lines hidden (view full) ---

715static int
716metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
717 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
718{
719 metaslab_group_t *mg, *rotor;
720 vdev_t *vd;
721 int dshift = 3;
722 int all_zero;
864 mutex_exit(&msp->ms_lock);
865 continue;
866 }
867
868 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL)
869 break;
870
871 metaslab_passivate(msp, size - 1);

--- 17 unchanged lines hidden (view full) ---

889static int
890metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
891 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
892{
893 metaslab_group_t *mg, *rotor;
894 vdev_t *vd;
895 int dshift = 3;
896 int all_zero;
897 int zio_lock = B_FALSE;
898 boolean_t allocatable;
723 uint64_t offset = -1ULL;
724 uint64_t asize;
725 uint64_t distance;
726
727 ASSERT(!DVA_IS_VALID(&dva[d]));
728
729 /*
730 * For testing, make some blocks above a certain size be gang blocks.

--- 42 unchanged lines hidden (view full) ---

773 if (mg->mg_class != mc)
774 mg = mc->mc_rotor;
775
776 rotor = mg;
777top:
778 all_zero = B_TRUE;
779 do {
780 vd = mg->mg_vd;
899 uint64_t offset = -1ULL;
900 uint64_t asize;
901 uint64_t distance;
902
903 ASSERT(!DVA_IS_VALID(&dva[d]));
904
905 /*
906 * For testing, make some blocks above a certain size be gang blocks.

--- 42 unchanged lines hidden (view full) ---

949 if (mg->mg_class != mc)
950 mg = mc->mc_rotor;
951
952 rotor = mg;
953top:
954 all_zero = B_TRUE;
955 do {
956 vd = mg->mg_vd;
957
781 /*
782 * Don't allocate from faulted devices.
783 */
958 /*
959 * Don't allocate from faulted devices.
960 */
784 if (!vdev_allocatable(vd))
961 if (zio_lock) {
962 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
963 allocatable = vdev_allocatable(vd);
964 spa_config_exit(spa, SCL_ZIO, FTAG);
965 } else {
966 allocatable = vdev_allocatable(vd);
967 }
968 if (!allocatable)
785 goto next;
969 goto next;
970
786 /*
787 * Avoid writing single-copy data to a failing vdev
788 */
789 if ((vd->vdev_stat.vs_write_errors > 0 ||
790 vd->vdev_state < VDEV_STATE_HEALTHY) &&
791 d == 0 && dshift == 3) {
792 all_zero = B_FALSE;
793 goto next;

--- 59 unchanged lines hidden (view full) ---

853 } while ((mg = mg->mg_next) != rotor);
854
855 if (!all_zero) {
856 dshift++;
857 ASSERT(dshift < 64);
858 goto top;
859 }
860
971 /*
972 * Avoid writing single-copy data to a failing vdev
973 */
974 if ((vd->vdev_stat.vs_write_errors > 0 ||
975 vd->vdev_state < VDEV_STATE_HEALTHY) &&
976 d == 0 && dshift == 3) {
977 all_zero = B_FALSE;
978 goto next;

--- 59 unchanged lines hidden (view full) ---

1038 } while ((mg = mg->mg_next) != rotor);
1039
1040 if (!all_zero) {
1041 dshift++;
1042 ASSERT(dshift < 64);
1043 goto top;
1044 }
1045
1046 if (!allocatable && !zio_lock) {
1047 dshift = 3;
1048 zio_lock = B_TRUE;
1049 goto top;
1050 }
1051
861 bzero(&dva[d], sizeof (dva_t));
862
863 return (ENOSPC);
864}
865
866/*
867 * Free the block represented by DVA in the context of the specified
868 * transaction group.

--- 64 unchanged lines hidden (view full) ---

933
934 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
935
936 if (DVA_GET_GANG(dva))
937 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
938
939 mutex_enter(&msp->ms_lock);
940
1052 bzero(&dva[d], sizeof (dva_t));
1053
1054 return (ENOSPC);
1055}
1056
1057/*
1058 * Free the block represented by DVA in the context of the specified
1059 * transaction group.

--- 64 unchanged lines hidden (view full) ---

1124
1125 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1126
1127 if (DVA_GET_GANG(dva))
1128 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1129
1130 mutex_enter(&msp->ms_lock);
1131
941 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
1132 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY, 0);
942 if (error || txg == 0) { /* txg == 0 indicates dry run */
943 mutex_exit(&msp->ms_lock);
944 return (error);
945 }
946
947 space_map_claim(&msp->ms_map, offset, size);
948
1133 if (error || txg == 0) { /* txg == 0 indicates dry run */
1134 mutex_exit(&msp->ms_lock);
1135 return (error);
1136 }
1137
1138 space_map_claim(&msp->ms_map, offset, size);
1139
949 if (spa_mode & FWRITE) { /* don't dirty if we're zdb(1M) */
1140 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
950 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
951 vdev_dirty(vd, VDD_METASLAB, msp, txg);
952 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
953 }
954
955 mutex_exit(&msp->ms_lock);
956
957 return (0);

--- 92 unchanged lines hidden ---
1141 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
1142 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1143 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
1144 }
1145
1146 mutex_exit(&msp->ms_lock);
1147
1148 return (0);

--- 92 unchanged lines hidden ---