1337001Smav/* 2337001Smav * CDDL HEADER START 3337001Smav * 4337001Smav * The contents of this file are subject to the terms of the 5337001Smav * Common Development and Distribution License (the "License"). 6337001Smav * You may not use this file except in compliance with the License. 7337001Smav * 8337001Smav * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9337001Smav * or http://www.opensolaris.org/os/licensing. 10337001Smav * See the License for the specific language governing permissions 11337001Smav * and limitations under the License. 12337001Smav * 13337001Smav * When distributing Covered Code, include this CDDL HEADER in each 14337001Smav * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15337001Smav * If applicable, add the following below this CDDL HEADER, with the 16337001Smav * fields enclosed by brackets "[]" replaced with your own identifying 17337001Smav * information: Portions Copyright [yyyy] [name of copyright owner] 18337001Smav * 19337001Smav * CDDL HEADER END 20337001Smav */ 21337001Smav 22337001Smav/* 23337001Smav * Copyright (c) 2016 by Delphix. All rights reserved. 24337001Smav */ 25337001Smav 26337001Smav#include <sys/spa.h> 27337001Smav#include <sys/spa_impl.h> 28337001Smav#include <sys/txg.h> 29337001Smav#include <sys/vdev_impl.h> 30337001Smav#include <sys/refcount.h> 31337001Smav#include <sys/metaslab_impl.h> 32337001Smav#include <sys/dsl_synctask.h> 33337001Smav#include <sys/zap.h> 34337001Smav#include <sys/dmu_tx.h> 35337001Smav 36337001Smav/* 37337001Smav * Maximum number of metaslabs per group that can be initialized 38337001Smav * simultaneously. 39337001Smav */ 40337001Smavint max_initialize_ms = 3; 41337001Smav 42337001Smav/* 43337001Smav * Value that is written to disk during initialization. 44337001Smav */ 45337001Smavuint64_t zfs_initialize_value = 0xdeadbeefdeadbeefULL; 46337001Smav 47337001Smav/* maximum number of I/Os outstanding per leaf vdev */ 48337001Smavint zfs_initialize_limit = 1; 49337001Smav 50337001Smav/* size of initializing writes; default 1MiB, see zfs_remove_max_segment */ 51337001Smavuint64_t zfs_initialize_chunk_size = 1024 * 1024; 52337001Smav 53337001Smavstatic boolean_t 54337001Smavvdev_initialize_should_stop(vdev_t *vd) 55337001Smav{ 56337001Smav return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) || 57337001Smav vd->vdev_detached || vd->vdev_top->vdev_removing); 58337001Smav} 59337001Smav 60337001Smavstatic void 61337001Smavvdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx) 62337001Smav{ 63337001Smav /* 64337001Smav * We pass in the guid instead of the vdev_t since the vdev may 65337001Smav * have been freed prior to the sync task being processed. This 66337001Smav * happens when a vdev is detached as we call spa_config_vdev_exit(), 67337001Smav * stop the intializing thread, schedule the sync task, and free 68337001Smav * the vdev. Later when the scheduled sync task is invoked, it would 69337001Smav * find that the vdev has been freed. 70337001Smav */ 71337001Smav uint64_t guid = *(uint64_t *)arg; 72337001Smav uint64_t txg = dmu_tx_get_txg(tx); 73337001Smav kmem_free(arg, sizeof (uint64_t)); 74337001Smav 75337001Smav vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE); 76337001Smav if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd)) 77337001Smav return; 78337001Smav 79337001Smav uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK]; 80337001Smav vd->vdev_initialize_offset[txg & TXG_MASK] = 0; 81337001Smav 82337001Smav VERIFY(vd->vdev_leaf_zap != 0); 83337001Smav 84337001Smav objset_t *mos = vd->vdev_spa->spa_meta_objset; 85337001Smav 86337001Smav if (last_offset > 0) { 87337001Smav vd->vdev_initialize_last_offset = last_offset; 88337001Smav VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 89337001Smav VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, 90337001Smav sizeof (last_offset), 1, &last_offset, tx)); 91337001Smav } 92337001Smav if (vd->vdev_initialize_action_time > 0) { 93337001Smav uint64_t val = (uint64_t)vd->vdev_initialize_action_time; 94337001Smav VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 95337001Smav VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val), 96337001Smav 1, &val, tx)); 97337001Smav } 98337001Smav 99337001Smav uint64_t initialize_state = vd->vdev_initialize_state; 100337001Smav VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 101337001Smav VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1, 102337001Smav &initialize_state, tx)); 103337001Smav} 104337001Smav 105337001Smavstatic void 106337001Smavvdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state) 107337001Smav{ 108337001Smav ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 109337001Smav spa_t *spa = vd->vdev_spa; 110337001Smav 111337001Smav if (new_state == vd->vdev_initialize_state) 112337001Smav return; 113337001Smav 114337001Smav /* 115337001Smav * Copy the vd's guid, this will be freed by the sync task. 116337001Smav */ 117337001Smav uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); 118337001Smav *guid = vd->vdev_guid; 119337001Smav 120337001Smav /* 121337001Smav * If we're suspending, then preserving the original start time. 122337001Smav */ 123337001Smav if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) { 124337001Smav vd->vdev_initialize_action_time = gethrestime_sec(); 125337001Smav } 126337001Smav vd->vdev_initialize_state = new_state; 127337001Smav 128337001Smav dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 129337001Smav VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 130337001Smav dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync, 131337001Smav guid, 2, ZFS_SPACE_CHECK_RESERVED, tx); 132337001Smav 133337001Smav switch (new_state) { 134337001Smav case VDEV_INITIALIZE_ACTIVE: 135337001Smav spa_history_log_internal(spa, "initialize", tx, 136337001Smav "vdev=%s activated", vd->vdev_path); 137337001Smav break; 138337001Smav case VDEV_INITIALIZE_SUSPENDED: 139337001Smav spa_history_log_internal(spa, "initialize", tx, 140337001Smav "vdev=%s suspended", vd->vdev_path); 141337001Smav break; 142337001Smav case VDEV_INITIALIZE_CANCELED: 143337001Smav spa_history_log_internal(spa, "initialize", tx, 144337001Smav "vdev=%s canceled", vd->vdev_path); 145337001Smav break; 146337001Smav case VDEV_INITIALIZE_COMPLETE: 147337001Smav spa_history_log_internal(spa, "initialize", tx, 148337001Smav "vdev=%s complete", vd->vdev_path); 149337001Smav break; 150337001Smav default: 151337001Smav panic("invalid state %llu", (unsigned long long)new_state); 152337001Smav } 153337001Smav 154337001Smav dmu_tx_commit(tx); 155337001Smav} 156337001Smav 157337001Smavstatic void 158337001Smavvdev_initialize_cb(zio_t *zio) 159337001Smav{ 160337001Smav vdev_t *vd = zio->io_vd; 161337001Smav mutex_enter(&vd->vdev_initialize_io_lock); 162337001Smav if (zio->io_error == ENXIO && !vdev_writeable(vd)) { 163337001Smav /* 164337001Smav * The I/O failed because the vdev was unavailable; roll the 165337001Smav * last offset back. (This works because spa_sync waits on 166337001Smav * spa_txg_zio before it runs sync tasks.) 167337001Smav */ 168337001Smav uint64_t *off = 169337001Smav &vd->vdev_initialize_offset[zio->io_txg & TXG_MASK]; 170337001Smav *off = MIN(*off, zio->io_offset); 171337001Smav } else { 172337001Smav /* 173337001Smav * Since initializing is best-effort, we ignore I/O errors and 174337001Smav * rely on vdev_probe to determine if the errors are more 175337001Smav * critical. 176337001Smav */ 177337001Smav if (zio->io_error != 0) 178337001Smav vd->vdev_stat.vs_initialize_errors++; 179337001Smav 180337001Smav vd->vdev_initialize_bytes_done += zio->io_orig_size; 181337001Smav } 182337001Smav ASSERT3U(vd->vdev_initialize_inflight, >, 0); 183337001Smav vd->vdev_initialize_inflight--; 184337001Smav cv_broadcast(&vd->vdev_initialize_io_cv); 185337001Smav mutex_exit(&vd->vdev_initialize_io_lock); 186337001Smav 187337001Smav spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 188337001Smav} 189337001Smav 190337001Smav/* Takes care of physical writing and limiting # of concurrent ZIOs. */ 191337001Smavstatic int 192337001Smavvdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data) 193337001Smav{ 194337001Smav spa_t *spa = vd->vdev_spa; 195337001Smav 196337001Smav /* Limit inflight initializing I/Os */ 197337001Smav mutex_enter(&vd->vdev_initialize_io_lock); 198337001Smav while (vd->vdev_initialize_inflight >= zfs_initialize_limit) { 199337001Smav cv_wait(&vd->vdev_initialize_io_cv, 200337001Smav &vd->vdev_initialize_io_lock); 201337001Smav } 202337001Smav vd->vdev_initialize_inflight++; 203337001Smav mutex_exit(&vd->vdev_initialize_io_lock); 204337001Smav 205337001Smav dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 206337001Smav VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 207337001Smav uint64_t txg = dmu_tx_get_txg(tx); 208337001Smav 209337001Smav spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER); 210337001Smav mutex_enter(&vd->vdev_initialize_lock); 211337001Smav 212337001Smav if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) { 213337001Smav uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); 214337001Smav *guid = vd->vdev_guid; 215337001Smav 216337001Smav /* This is the first write of this txg. */ 217337001Smav dsl_sync_task_nowait(spa_get_dsl(spa), 218337001Smav vdev_initialize_zap_update_sync, guid, 2, 219337001Smav ZFS_SPACE_CHECK_RESERVED, tx); 220337001Smav } 221337001Smav 222337001Smav /* 223337001Smav * We know the vdev struct will still be around since all 224337001Smav * consumers of vdev_free must stop the initialization first. 225337001Smav */ 226337001Smav if (vdev_initialize_should_stop(vd)) { 227337001Smav mutex_enter(&vd->vdev_initialize_io_lock); 228337001Smav ASSERT3U(vd->vdev_initialize_inflight, >, 0); 229337001Smav vd->vdev_initialize_inflight--; 230337001Smav mutex_exit(&vd->vdev_initialize_io_lock); 231337001Smav spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 232337001Smav mutex_exit(&vd->vdev_initialize_lock); 233337001Smav dmu_tx_commit(tx); 234337001Smav return (SET_ERROR(EINTR)); 235337001Smav } 236337001Smav mutex_exit(&vd->vdev_initialize_lock); 237337001Smav 238337001Smav vd->vdev_initialize_offset[txg & TXG_MASK] = start + size; 239337001Smav zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start, 240337001Smav size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL, 241337001Smav ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE)); 242337001Smav /* vdev_initialize_cb releases SCL_STATE_ALL */ 243337001Smav 244337001Smav dmu_tx_commit(tx); 245337001Smav 246337001Smav return (0); 247337001Smav} 248337001Smav 249337001Smav/* 250337001Smav * Translate a logical range to the physical range for the specified vdev_t. 251337001Smav * This function is initially called with a leaf vdev and will walk each 252337001Smav * parent vdev until it reaches a top-level vdev. Once the top-level is 253337001Smav * reached the physical range is initialized and the recursive function 254337001Smav * begins to unwind. As it unwinds it calls the parent's vdev specific 255337001Smav * translation function to do the real conversion. 256337001Smav */ 257337001Smavvoid 258337001Smavvdev_xlate(vdev_t *vd, const range_seg_t *logical_rs, range_seg_t *physical_rs) 259337001Smav{ 260337001Smav /* 261337001Smav * Walk up the vdev tree 262337001Smav */ 263337001Smav if (vd != vd->vdev_top) { 264337001Smav vdev_xlate(vd->vdev_parent, logical_rs, physical_rs); 265337001Smav } else { 266337001Smav /* 267337001Smav * We've reached the top-level vdev, initialize the 268337001Smav * physical range to the logical range and start to 269337001Smav * unwind. 270337001Smav */ 271337001Smav physical_rs->rs_start = logical_rs->rs_start; 272337001Smav physical_rs->rs_end = logical_rs->rs_end; 273337001Smav return; 274337001Smav } 275337001Smav 276337001Smav vdev_t *pvd = vd->vdev_parent; 277337001Smav ASSERT3P(pvd, !=, NULL); 278337001Smav ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL); 279337001Smav 280337001Smav /* 281337001Smav * As this recursive function unwinds, translate the logical 282337001Smav * range into its physical components by calling the 283337001Smav * vdev specific translate function. 284337001Smav */ 285337001Smav range_seg_t intermediate = { 0 }; 286337001Smav pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate); 287337001Smav 288337001Smav physical_rs->rs_start = intermediate.rs_start; 289337001Smav physical_rs->rs_end = intermediate.rs_end; 290337001Smav} 291337001Smav 292337001Smav/* 293337001Smav * Callback to fill each ABD chunk with zfs_initialize_value. len must be 294337001Smav * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD 295337001Smav * allocation will guarantee these for us. 296337001Smav */ 297337001Smav/* ARGSUSED */ 298337001Smavstatic int 299337001Smavvdev_initialize_block_fill(void *buf, size_t len, void *unused) 300337001Smav{ 301337001Smav ASSERT0(len % sizeof (uint64_t)); 302337001Smav for (uint64_t i = 0; i < len; i += sizeof (uint64_t)) { 303337001Smav *(uint64_t *)((char *)(buf) + i) = zfs_initialize_value; 304337001Smav } 305337001Smav return (0); 306337001Smav} 307337001Smav 308337001Smavstatic abd_t * 309337001Smavvdev_initialize_block_alloc() 310337001Smav{ 311337001Smav /* Allocate ABD for filler data */ 312337001Smav abd_t *data = abd_alloc_for_io(zfs_initialize_chunk_size, B_FALSE); 313337001Smav 314337001Smav ASSERT0(zfs_initialize_chunk_size % sizeof (uint64_t)); 315337001Smav (void) abd_iterate_func(data, 0, zfs_initialize_chunk_size, 316337001Smav vdev_initialize_block_fill, NULL); 317337001Smav 318337001Smav return (data); 319337001Smav} 320337001Smav 321337001Smavstatic void 322337001Smavvdev_initialize_block_free(abd_t *data) 323337001Smav{ 324337001Smav abd_free(data); 325337001Smav} 326337001Smav 327337001Smavstatic int 328337001Smavvdev_initialize_ranges(vdev_t *vd, abd_t *data) 329337001Smav{ 330337001Smav avl_tree_t *rt = &vd->vdev_initialize_tree->rt_root; 331337001Smav 332337001Smav for (range_seg_t *rs = avl_first(rt); rs != NULL; 333337001Smav rs = AVL_NEXT(rt, rs)) { 334337001Smav uint64_t size = rs->rs_end - rs->rs_start; 335337001Smav 336337001Smav /* Split range into legally-sized physical chunks */ 337337001Smav uint64_t writes_required = 338337001Smav ((size - 1) / zfs_initialize_chunk_size) + 1; 339337001Smav 340337001Smav for (uint64_t w = 0; w < writes_required; w++) { 341337001Smav int error; 342337001Smav 343337001Smav error = vdev_initialize_write(vd, 344337001Smav VDEV_LABEL_START_SIZE + rs->rs_start + 345337001Smav (w * zfs_initialize_chunk_size), 346337001Smav MIN(size - (w * zfs_initialize_chunk_size), 347337001Smav zfs_initialize_chunk_size), data); 348337001Smav if (error != 0) 349337001Smav return (error); 350337001Smav } 351337001Smav } 352337001Smav return (0); 353337001Smav} 354337001Smav 355337001Smavstatic void 356337001Smavvdev_initialize_ms_load(metaslab_t *msp) 357337001Smav{ 358337001Smav ASSERT(MUTEX_HELD(&msp->ms_lock)); 359337001Smav 360337001Smav metaslab_load_wait(msp); 361337001Smav if (!msp->ms_loaded) 362337001Smav VERIFY0(metaslab_load(msp)); 363337001Smav} 364337001Smav 365337001Smavstatic void 366337001Smavvdev_initialize_mg_wait(metaslab_group_t *mg) 367337001Smav{ 368337001Smav ASSERT(MUTEX_HELD(&mg->mg_ms_initialize_lock)); 369337001Smav while (mg->mg_initialize_updating) { 370337001Smav cv_wait(&mg->mg_ms_initialize_cv, &mg->mg_ms_initialize_lock); 371337001Smav } 372337001Smav} 373337001Smav 374337001Smavstatic void 375337001Smavvdev_initialize_mg_mark(metaslab_group_t *mg) 376337001Smav{ 377337001Smav ASSERT(MUTEX_HELD(&mg->mg_ms_initialize_lock)); 378337001Smav ASSERT(mg->mg_initialize_updating); 379337001Smav 380337001Smav while (mg->mg_ms_initializing >= max_initialize_ms) { 381337001Smav cv_wait(&mg->mg_ms_initialize_cv, &mg->mg_ms_initialize_lock); 382337001Smav } 383337001Smav mg->mg_ms_initializing++; 384337001Smav ASSERT3U(mg->mg_ms_initializing, <=, max_initialize_ms); 385337001Smav} 386337001Smav 387337001Smav/* 388337001Smav * Mark the metaslab as being initialized to prevent any allocations 389337001Smav * on this metaslab. We must also track how many metaslabs are currently 390337001Smav * being initialized within a metaslab group and limit them to prevent 391337001Smav * allocation failures from occurring because all metaslabs are being 392337001Smav * initialized. 393337001Smav */ 394337001Smavstatic void 395337001Smavvdev_initialize_ms_mark(metaslab_t *msp) 396337001Smav{ 397337001Smav ASSERT(!MUTEX_HELD(&msp->ms_lock)); 398337001Smav metaslab_group_t *mg = msp->ms_group; 399337001Smav 400337001Smav mutex_enter(&mg->mg_ms_initialize_lock); 401337001Smav 402337001Smav /* 403337001Smav * To keep an accurate count of how many threads are initializing 404337001Smav * a specific metaslab group, we only allow one thread to mark 405337001Smav * the metaslab group at a time. This ensures that the value of 406337001Smav * ms_initializing will be accurate when we decide to mark a metaslab 407337001Smav * group as being initialized. To do this we force all other threads 408337001Smav * to wait till the metaslab's mg_initialize_updating flag is no 409337001Smav * longer set. 410337001Smav */ 411337001Smav vdev_initialize_mg_wait(mg); 412337001Smav mg->mg_initialize_updating = B_TRUE; 413337001Smav if (msp->ms_initializing == 0) { 414337001Smav vdev_initialize_mg_mark(mg); 415337001Smav } 416337001Smav mutex_enter(&msp->ms_lock); 417337001Smav msp->ms_initializing++; 418337001Smav mutex_exit(&msp->ms_lock); 419337001Smav 420337001Smav mg->mg_initialize_updating = B_FALSE; 421337001Smav cv_broadcast(&mg->mg_ms_initialize_cv); 422337001Smav mutex_exit(&mg->mg_ms_initialize_lock); 423337001Smav} 424337001Smav 425337001Smavstatic void 426337001Smavvdev_initialize_ms_unmark(metaslab_t *msp) 427337001Smav{ 428337001Smav ASSERT(!MUTEX_HELD(&msp->ms_lock)); 429337001Smav metaslab_group_t *mg = msp->ms_group; 430337001Smav mutex_enter(&mg->mg_ms_initialize_lock); 431337001Smav mutex_enter(&msp->ms_lock); 432337001Smav if (--msp->ms_initializing == 0) { 433337001Smav mg->mg_ms_initializing--; 434337001Smav cv_broadcast(&mg->mg_ms_initialize_cv); 435337001Smav } 436337001Smav mutex_exit(&msp->ms_lock); 437337001Smav mutex_exit(&mg->mg_ms_initialize_lock); 438337001Smav} 439337001Smav 440337001Smavstatic void 441337001Smavvdev_initialize_calculate_progress(vdev_t *vd) 442337001Smav{ 443337001Smav ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || 444337001Smav spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); 445337001Smav ASSERT(vd->vdev_leaf_zap != 0); 446337001Smav 447337001Smav vd->vdev_initialize_bytes_est = 0; 448337001Smav vd->vdev_initialize_bytes_done = 0; 449337001Smav 450337001Smav for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) { 451337001Smav metaslab_t *msp = vd->vdev_top->vdev_ms[i]; 452337001Smav mutex_enter(&msp->ms_lock); 453337001Smav 454337001Smav uint64_t ms_free = msp->ms_size - 455337001Smav space_map_allocated(msp->ms_sm); 456337001Smav 457337001Smav if (vd->vdev_top->vdev_ops == &vdev_raidz_ops) 458337001Smav ms_free /= vd->vdev_top->vdev_children; 459337001Smav 460337001Smav /* 461337001Smav * Convert the metaslab range to a physical range 462337001Smav * on our vdev. We use this to determine if we are 463337001Smav * in the middle of this metaslab range. 464337001Smav */ 465337001Smav range_seg_t logical_rs, physical_rs; 466337001Smav logical_rs.rs_start = msp->ms_start; 467337001Smav logical_rs.rs_end = msp->ms_start + msp->ms_size; 468337001Smav vdev_xlate(vd, &logical_rs, &physical_rs); 469337001Smav 470337001Smav if (vd->vdev_initialize_last_offset <= physical_rs.rs_start) { 471337001Smav vd->vdev_initialize_bytes_est += ms_free; 472337001Smav mutex_exit(&msp->ms_lock); 473337001Smav continue; 474337001Smav } else if (vd->vdev_initialize_last_offset > 475337001Smav physical_rs.rs_end) { 476337001Smav vd->vdev_initialize_bytes_done += ms_free; 477337001Smav vd->vdev_initialize_bytes_est += ms_free; 478337001Smav mutex_exit(&msp->ms_lock); 479337001Smav continue; 480337001Smav } 481337001Smav 482337001Smav /* 483337001Smav * If we get here, we're in the middle of initializing this 484337001Smav * metaslab. Load it and walk the free tree for more accurate 485337001Smav * progress estimation. 486337001Smav */ 487337001Smav vdev_initialize_ms_load(msp); 488337001Smav 489337001Smav for (range_seg_t *rs = avl_first(&msp->ms_allocatable->rt_root); rs; 490337001Smav rs = AVL_NEXT(&msp->ms_allocatable->rt_root, rs)) { 491337001Smav logical_rs.rs_start = rs->rs_start; 492337001Smav logical_rs.rs_end = rs->rs_end; 493337001Smav vdev_xlate(vd, &logical_rs, &physical_rs); 494337001Smav 495337001Smav uint64_t size = physical_rs.rs_end - 496337001Smav physical_rs.rs_start; 497337001Smav vd->vdev_initialize_bytes_est += size; 498337001Smav if (vd->vdev_initialize_last_offset > 499337001Smav physical_rs.rs_end) { 500337001Smav vd->vdev_initialize_bytes_done += size; 501337001Smav } else if (vd->vdev_initialize_last_offset > 502337001Smav physical_rs.rs_start && 503337001Smav vd->vdev_initialize_last_offset < 504337001Smav physical_rs.rs_end) { 505337001Smav vd->vdev_initialize_bytes_done += 506337001Smav vd->vdev_initialize_last_offset - 507337001Smav physical_rs.rs_start; 508337001Smav } 509337001Smav } 510337001Smav mutex_exit(&msp->ms_lock); 511337001Smav } 512337001Smav} 513337001Smav 514337001Smavstatic void 515337001Smavvdev_initialize_load(vdev_t *vd) 516337001Smav{ 517337001Smav ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || 518337001Smav spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); 519337001Smav ASSERT(vd->vdev_leaf_zap != 0); 520337001Smav 521337001Smav if (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE || 522337001Smav vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED) { 523337001Smav int err = zap_lookup(vd->vdev_spa->spa_meta_objset, 524337001Smav vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, 525337001Smav sizeof (vd->vdev_initialize_last_offset), 1, 526337001Smav &vd->vdev_initialize_last_offset); 527337001Smav ASSERT(err == 0 || err == ENOENT); 528337001Smav } 529337001Smav 530337001Smav vdev_initialize_calculate_progress(vd); 531337001Smav} 532337001Smav 533337001Smav 534337001Smav/* 535337001Smav * Convert the logical range into a physcial range and add it to our 536337001Smav * avl tree. 537337001Smav */ 538337001Smavvoid 539337001Smavvdev_initialize_range_add(void *arg, uint64_t start, uint64_t size) 540337001Smav{ 541337001Smav vdev_t *vd = arg; 542337001Smav range_seg_t logical_rs, physical_rs; 543337001Smav logical_rs.rs_start = start; 544337001Smav logical_rs.rs_end = start + size; 545337001Smav 546337001Smav ASSERT(vd->vdev_ops->vdev_op_leaf); 547337001Smav vdev_xlate(vd, &logical_rs, &physical_rs); 548337001Smav 549337001Smav IMPLY(vd->vdev_top == vd, 550337001Smav logical_rs.rs_start == physical_rs.rs_start); 551337001Smav IMPLY(vd->vdev_top == vd, 552337001Smav logical_rs.rs_end == physical_rs.rs_end); 553337001Smav 554337001Smav /* Only add segments that we have not visited yet */ 555337001Smav if (physical_rs.rs_end <= vd->vdev_initialize_last_offset) 556337001Smav return; 557337001Smav 558337001Smav /* Pick up where we left off mid-range. */ 559337001Smav if (vd->vdev_initialize_last_offset > physical_rs.rs_start) { 560337001Smav zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to " 561337001Smav "(%llu, %llu)", vd->vdev_path, 562337001Smav (u_longlong_t)physical_rs.rs_start, 563337001Smav (u_longlong_t)physical_rs.rs_end, 564337001Smav (u_longlong_t)vd->vdev_initialize_last_offset, 565337001Smav (u_longlong_t)physical_rs.rs_end); 566337001Smav ASSERT3U(physical_rs.rs_end, >, 567337001Smav vd->vdev_initialize_last_offset); 568337001Smav physical_rs.rs_start = vd->vdev_initialize_last_offset; 569337001Smav } 570337001Smav ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start); 571337001Smav 572337001Smav /* 573337001Smav * With raidz, it's possible that the logical range does not live on 574337001Smav * this leaf vdev. We only add the physical range to this vdev's if it 575337001Smav * has a length greater than 0. 576337001Smav */ 577337001Smav if (physical_rs.rs_end > physical_rs.rs_start) { 578337001Smav range_tree_add(vd->vdev_initialize_tree, physical_rs.rs_start, 579337001Smav physical_rs.rs_end - physical_rs.rs_start); 580337001Smav } else { 581337001Smav ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start); 582337001Smav } 583337001Smav} 584337001Smav 585337001Smavstatic void 586337001Smavvdev_initialize_thread(void *arg) 587337001Smav{ 588337001Smav vdev_t *vd = arg; 589337001Smav spa_t *spa = vd->vdev_spa; 590337001Smav int error = 0; 591337001Smav uint64_t ms_count = 0; 592337001Smav 593337001Smav ASSERT(vdev_is_concrete(vd)); 594337001Smav spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 595337001Smav 596337001Smav vd->vdev_initialize_last_offset = 0; 597337001Smav vdev_initialize_load(vd); 598337001Smav 599337001Smav abd_t *deadbeef = vdev_initialize_block_alloc(); 600337001Smav 601337001Smav vd->vdev_initialize_tree = range_tree_create(NULL, NULL); 602337001Smav 603337001Smav for (uint64_t i = 0; !vd->vdev_detached && 604337001Smav i < vd->vdev_top->vdev_ms_count; i++) { 605337001Smav metaslab_t *msp = vd->vdev_top->vdev_ms[i]; 606337001Smav 607337001Smav /* 608337001Smav * If we've expanded the top-level vdev or it's our 609337001Smav * first pass, calculate our progress. 610337001Smav */ 611337001Smav if (vd->vdev_top->vdev_ms_count != ms_count) { 612337001Smav vdev_initialize_calculate_progress(vd); 613337001Smav ms_count = vd->vdev_top->vdev_ms_count; 614337001Smav } 615337001Smav 616337001Smav vdev_initialize_ms_mark(msp); 617337001Smav mutex_enter(&msp->ms_lock); 618337001Smav vdev_initialize_ms_load(msp); 619337001Smav 620337001Smav range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add, 621337001Smav vd); 622337001Smav mutex_exit(&msp->ms_lock); 623337001Smav 624337001Smav spa_config_exit(spa, SCL_CONFIG, FTAG); 625337001Smav error = vdev_initialize_ranges(vd, deadbeef); 626337001Smav vdev_initialize_ms_unmark(msp); 627337001Smav spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 628337001Smav 629337001Smav range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL); 630337001Smav if (error != 0) 631337001Smav break; 632337001Smav } 633337001Smav 634337001Smav spa_config_exit(spa, SCL_CONFIG, FTAG); 635337001Smav mutex_enter(&vd->vdev_initialize_io_lock); 636337001Smav while (vd->vdev_initialize_inflight > 0) { 637337001Smav cv_wait(&vd->vdev_initialize_io_cv, 638337001Smav &vd->vdev_initialize_io_lock); 639337001Smav } 640337001Smav mutex_exit(&vd->vdev_initialize_io_lock); 641337001Smav 642337001Smav range_tree_destroy(vd->vdev_initialize_tree); 643337001Smav vdev_initialize_block_free(deadbeef); 644337001Smav vd->vdev_initialize_tree = NULL; 645337001Smav 646337001Smav mutex_enter(&vd->vdev_initialize_lock); 647337001Smav if (!vd->vdev_initialize_exit_wanted && vdev_writeable(vd)) { 648337001Smav vdev_initialize_change_state(vd, VDEV_INITIALIZE_COMPLETE); 649337001Smav } 650337001Smav ASSERT(vd->vdev_initialize_thread != NULL || 651337001Smav vd->vdev_initialize_inflight == 0); 652337001Smav 653337001Smav /* 654337001Smav * Drop the vdev_initialize_lock while we sync out the 655337001Smav * txg since it's possible that a device might be trying to 656337001Smav * come online and must check to see if it needs to restart an 657337001Smav * initialization. That thread will be holding the spa_config_lock 658337001Smav * which would prevent the txg_wait_synced from completing. 659337001Smav */ 660337001Smav mutex_exit(&vd->vdev_initialize_lock); 661337001Smav txg_wait_synced(spa_get_dsl(spa), 0); 662337001Smav mutex_enter(&vd->vdev_initialize_lock); 663337001Smav 664337001Smav vd->vdev_initialize_thread = NULL; 665337001Smav cv_broadcast(&vd->vdev_initialize_cv); 666337001Smav mutex_exit(&vd->vdev_initialize_lock); 667337007Smav thread_exit(); 668337001Smav} 669337001Smav 670337001Smav/* 671337001Smav * Initiates a device. Caller must hold vdev_initialize_lock. 672337001Smav * Device must be a leaf and not already be initializing. 673337001Smav */ 674337001Smavvoid 675337001Smavvdev_initialize(vdev_t *vd) 676337001Smav{ 677337001Smav ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 678337001Smav ASSERT(vd->vdev_ops->vdev_op_leaf); 679337001Smav ASSERT(vdev_is_concrete(vd)); 680337001Smav ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 681337001Smav ASSERT(!vd->vdev_detached); 682337001Smav ASSERT(!vd->vdev_initialize_exit_wanted); 683337001Smav ASSERT(!vd->vdev_top->vdev_removing); 684337001Smav 685337001Smav vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE); 686337001Smav vd->vdev_initialize_thread = thread_create(NULL, 0, 687337001Smav vdev_initialize_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 688337001Smav} 689337001Smav 690337001Smav/* 691337001Smav * Stop initializng a device, with the resultant initialing state being 692337001Smav * tgt_state. Blocks until the initializing thread has exited. 693337001Smav * Caller must hold vdev_initialize_lock and must not be writing to the spa 694337001Smav * config, as the initializing thread may try to enter the config as a reader 695337001Smav * before exiting. 696337001Smav */ 697337001Smavvoid 698337001Smavvdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state) 699337001Smav{ 700337001Smav spa_t *spa = vd->vdev_spa; 701337001Smav ASSERT(!spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_WRITER)); 702337001Smav 703337001Smav ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 704337001Smav ASSERT(vd->vdev_ops->vdev_op_leaf); 705337001Smav ASSERT(vdev_is_concrete(vd)); 706337001Smav 707337001Smav /* 708337001Smav * Allow cancel requests to proceed even if the initialize thread 709337001Smav * has stopped. 710337001Smav */ 711337001Smav if (vd->vdev_initialize_thread == NULL && 712337001Smav tgt_state != VDEV_INITIALIZE_CANCELED) { 713337001Smav return; 714337001Smav } 715337001Smav 716337001Smav vdev_initialize_change_state(vd, tgt_state); 717337001Smav vd->vdev_initialize_exit_wanted = B_TRUE; 718337001Smav while (vd->vdev_initialize_thread != NULL) 719337001Smav cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock); 720337001Smav 721337001Smav ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 722337001Smav vd->vdev_initialize_exit_wanted = B_FALSE; 723337001Smav} 724337001Smav 725337001Smavstatic void 726337001Smavvdev_initialize_stop_all_impl(vdev_t *vd, vdev_initializing_state_t tgt_state) 727337001Smav{ 728337001Smav if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) { 729337001Smav mutex_enter(&vd->vdev_initialize_lock); 730337001Smav vdev_initialize_stop(vd, tgt_state); 731337001Smav mutex_exit(&vd->vdev_initialize_lock); 732337001Smav return; 733337001Smav } 734337001Smav 735337001Smav for (uint64_t i = 0; i < vd->vdev_children; i++) { 736337001Smav vdev_initialize_stop_all_impl(vd->vdev_child[i], tgt_state); 737337001Smav } 738337001Smav} 739337001Smav 740337001Smav/* 741337001Smav * Convenience function to stop initializing of a vdev tree and set all 742337001Smav * initialize thread pointers to NULL. 743337001Smav */ 744337001Smavvoid 745337001Smavvdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state) 746337001Smav{ 747337001Smav vdev_initialize_stop_all_impl(vd, tgt_state); 748337001Smav 749337001Smav if (vd->vdev_spa->spa_sync_on) { 750337001Smav /* Make sure that our state has been synced to disk */ 751337001Smav txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0); 752337001Smav } 753337001Smav} 754337001Smav 755337001Smavvoid 756337001Smavvdev_initialize_restart(vdev_t *vd) 757337001Smav{ 758337001Smav ASSERT(MUTEX_HELD(&spa_namespace_lock)); 759337001Smav ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 760337001Smav 761337001Smav if (vd->vdev_leaf_zap != 0) { 762337001Smav mutex_enter(&vd->vdev_initialize_lock); 763337001Smav uint64_t initialize_state = VDEV_INITIALIZE_NONE; 764337001Smav int err = zap_lookup(vd->vdev_spa->spa_meta_objset, 765337001Smav vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_STATE, 766337001Smav sizeof (initialize_state), 1, &initialize_state); 767337001Smav ASSERT(err == 0 || err == ENOENT); 768337001Smav vd->vdev_initialize_state = initialize_state; 769337001Smav 770337001Smav uint64_t timestamp = 0; 771337001Smav err = zap_lookup(vd->vdev_spa->spa_meta_objset, 772337001Smav vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, 773337001Smav sizeof (timestamp), 1, ×tamp); 774337001Smav ASSERT(err == 0 || err == ENOENT); 775337001Smav vd->vdev_initialize_action_time = (time_t)timestamp; 776337001Smav 777337001Smav if (vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED || 778337001Smav vd->vdev_offline) { 779337001Smav /* load progress for reporting, but don't resume */ 780337001Smav vdev_initialize_load(vd); 781337001Smav } else if (vd->vdev_initialize_state == 782337001Smav VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd)) { 783337001Smav vdev_initialize(vd); 784337001Smav } 785337001Smav 786337001Smav mutex_exit(&vd->vdev_initialize_lock); 787337001Smav } 788337001Smav 789337001Smav for (uint64_t i = 0; i < vd->vdev_children; i++) { 790337001Smav vdev_initialize_restart(vd->vdev_child[i]); 791337001Smav } 792337001Smav} 793