1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2016 Gary Mills 25 */ 26 27#include <sys/dsl_scan.h> 28#include <sys/dsl_pool.h> 29#include <sys/dsl_dataset.h> 30#include <sys/dsl_prop.h> 31#include <sys/dsl_dir.h> 32#include <sys/dsl_synctask.h> 33#include <sys/dnode.h> 34#include <sys/dmu_tx.h> 35#include <sys/dmu_objset.h> 36#include <sys/arc.h> 37#include <sys/zap.h> 38#include <sys/zio.h> 39#include <sys/zfs_context.h> 40#include <sys/fs/zfs.h> 41#include <sys/zfs_znode.h> 42#include <sys/spa_impl.h> 43#include <sys/vdev_impl.h> 44#include <sys/zil_impl.h> 45#include <sys/zio_checksum.h> 46#include <sys/ddt.h> 47#include <sys/sa.h> 48#include <sys/sa_impl.h> 49#include <sys/zfeature.h> 50#ifdef _KERNEL 51#include <sys/zfs_vfsops.h> 52#endif 53 54typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 55 const zbookmark_phys_t *); 56 57static scan_cb_t dsl_scan_scrub_cb; 58static void dsl_scan_cancel_sync(void *, dmu_tx_t *); 59static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *); 60static boolean_t dsl_scan_restarting(dsl_scan_t *, dmu_tx_t *); 61 62unsigned int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 63unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ 64unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ 65unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ 66 67unsigned int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */ 68unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 69unsigned int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver 70 per txg */ 71boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 72boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 73 74SYSCTL_DECL(_vfs_zfs); 75SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RWTUN, 76 &zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev"); 77SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RWTUN, 78 &zfs_resilver_delay, 0, "Number of ticks to delay resilver"); 79SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RWTUN, 80 &zfs_scrub_delay, 0, "Number of ticks to delay scrub"); 81SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RWTUN, 82 &zfs_scan_idle, 0, "Idle scan window in clock ticks"); 83SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RWTUN, 84 &zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg"); 85SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RWTUN, 86 &zfs_free_min_time_ms, 0, "Min millisecs to free per txg"); 87SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RWTUN, 88 &zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg"); 89SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RWTUN, 90 &zfs_no_scrub_io, 0, "Disable scrub I/O"); 91SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RWTUN, 92 &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); 93 94enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 95/* max number of blocks to free in a single TXG */ 96uint64_t zfs_free_max_blocks = UINT64_MAX; 97SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, free_max_blocks, CTLFLAG_RWTUN, 98 &zfs_free_max_blocks, 0, "Maximum number of blocks to free in one TXG"); 99 100 101#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 102 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 103 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 104 105extern int zfs_txg_timeout; 106 107/* 108 * Enable/disable the processing of the free_bpobj object. 109 */ 110boolean_t zfs_free_bpobj_enabled = B_TRUE; 111 112SYSCTL_INT(_vfs_zfs, OID_AUTO, free_bpobj_enabled, CTLFLAG_RWTUN, 113 &zfs_free_bpobj_enabled, 0, "Enable free_bpobj processing"); 114 115/* the order has to match pool_scan_type */ 116static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 117 NULL, 118 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 119 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 120}; 121 122int 123dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 124{ 125 int err; 126 dsl_scan_t *scn; 127 spa_t *spa = dp->dp_spa; 128 uint64_t f; 129 130 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 131 scn->scn_dp = dp; 132 133 /* 134 * It's possible that we're resuming a scan after a reboot so 135 * make sure that the scan_async_destroying flag is initialized 136 * appropriately. 137 */ 138 ASSERT(!scn->scn_async_destroying); 139 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 140 SPA_FEATURE_ASYNC_DESTROY); 141 142 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 143 "scrub_func", sizeof (uint64_t), 1, &f); 144 if (err == 0) { 145 /* 146 * There was an old-style scrub in progress. Restart a 147 * new-style scrub from the beginning. 148 */ 149 scn->scn_restart_txg = txg; 150 zfs_dbgmsg("old-style scrub was in progress; " 151 "restarting new-style scrub in txg %llu", 152 scn->scn_restart_txg); 153 154 /* 155 * Load the queue obj from the old location so that it 156 * can be freed by dsl_scan_done(). 157 */ 158 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 159 "scrub_queue", sizeof (uint64_t), 1, 160 &scn->scn_phys.scn_queue_obj); 161 } else { 162 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 163 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 164 &scn->scn_phys); 165 if (err == ENOENT) 166 return (0); 167 else if (err) 168 return (err); 169 170 if (scn->scn_phys.scn_state == DSS_SCANNING && 171 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 172 /* 173 * A new-type scrub was in progress on an old 174 * pool, and the pool was accessed by old 175 * software. Restart from the beginning, since 176 * the old software may have changed the pool in 177 * the meantime. 178 */ 179 scn->scn_restart_txg = txg; 180 zfs_dbgmsg("new-style scrub was modified " 181 "by old software; restarting in txg %llu", 182 scn->scn_restart_txg); 183 } 184 } 185 186 spa_scan_stat_init(spa); 187 return (0); 188} 189 190void 191dsl_scan_fini(dsl_pool_t *dp) 192{ 193 if (dp->dp_scan) { 194 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 195 dp->dp_scan = NULL; 196 } 197} 198 199/* ARGSUSED */ 200static int 201dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 202{ 203 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 204 205 if (scn->scn_phys.scn_state == DSS_SCANNING) 206 return (SET_ERROR(EBUSY)); 207 208 return (0); 209} 210 211static void 212dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 213{ 214 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 215 pool_scan_func_t *funcp = arg; 216 dmu_object_type_t ot = 0; 217 dsl_pool_t *dp = scn->scn_dp; 218 spa_t *spa = dp->dp_spa; 219 220 ASSERT(scn->scn_phys.scn_state != DSS_SCANNING); 221 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 222 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 223 scn->scn_phys.scn_func = *funcp; 224 scn->scn_phys.scn_state = DSS_SCANNING; 225 scn->scn_phys.scn_min_txg = 0; 226 scn->scn_phys.scn_max_txg = tx->tx_txg; 227 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 228 scn->scn_phys.scn_start_time = gethrestime_sec(); 229 scn->scn_phys.scn_errors = 0; 230 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 231 scn->scn_restart_txg = 0; 232 scn->scn_done_txg = 0; 233 spa_scan_stat_init(spa); 234 235 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 236 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 237 238 /* rewrite all disk labels */ 239 vdev_config_dirty(spa->spa_root_vdev); 240 241 if (vdev_resilver_needed(spa->spa_root_vdev, 242 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 243 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 244 } else { 245 spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START); 246 } 247 248 spa->spa_scrub_started = B_TRUE; 249 /* 250 * If this is an incremental scrub, limit the DDT scrub phase 251 * to just the auto-ditto class (for correctness); the rest 252 * of the scrub should go faster using top-down pruning. 253 */ 254 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 255 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 256 257 } 258 259 /* back to the generic stuff */ 260 261 if (dp->dp_blkstats == NULL) { 262 dp->dp_blkstats = 263 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 264 } 265 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 266 267 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 268 ot = DMU_OT_ZAP_OTHER; 269 270 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 271 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 272 273 dsl_scan_sync_state(scn, tx); 274 275 spa_history_log_internal(spa, "scan setup", tx, 276 "func=%u mintxg=%llu maxtxg=%llu", 277 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 278} 279 280/* ARGSUSED */ 281static void 282dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 283{ 284 static const char *old_names[] = { 285 "scrub_bookmark", 286 "scrub_ddt_bookmark", 287 "scrub_ddt_class_max", 288 "scrub_queue", 289 "scrub_min_txg", 290 "scrub_max_txg", 291 "scrub_func", 292 "scrub_errors", 293 NULL 294 }; 295 296 dsl_pool_t *dp = scn->scn_dp; 297 spa_t *spa = dp->dp_spa; 298 int i; 299 300 /* Remove any remnants of an old-style scrub. */ 301 for (i = 0; old_names[i]; i++) { 302 (void) zap_remove(dp->dp_meta_objset, 303 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 304 } 305 306 if (scn->scn_phys.scn_queue_obj != 0) { 307 VERIFY(0 == dmu_object_free(dp->dp_meta_objset, 308 scn->scn_phys.scn_queue_obj, tx)); 309 scn->scn_phys.scn_queue_obj = 0; 310 } 311 312 /* 313 * If we were "restarted" from a stopped state, don't bother 314 * with anything else. 315 */ 316 if (scn->scn_phys.scn_state != DSS_SCANNING) 317 return; 318 319 if (complete) 320 scn->scn_phys.scn_state = DSS_FINISHED; 321 else 322 scn->scn_phys.scn_state = DSS_CANCELED; 323 324 if (dsl_scan_restarting(scn, tx)) 325 spa_history_log_internal(spa, "scan aborted, restarting", tx, 326 "errors=%llu", spa_get_errlog_size(spa)); 327 else if (!complete) 328 spa_history_log_internal(spa, "scan cancelled", tx, 329 "errors=%llu", spa_get_errlog_size(spa)); 330 else 331 spa_history_log_internal(spa, "scan done", tx, 332 "errors=%llu", spa_get_errlog_size(spa)); 333 334 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 335 mutex_enter(&spa->spa_scrub_lock); 336 while (spa->spa_scrub_inflight > 0) { 337 cv_wait(&spa->spa_scrub_io_cv, 338 &spa->spa_scrub_lock); 339 } 340 mutex_exit(&spa->spa_scrub_lock); 341 spa->spa_scrub_started = B_FALSE; 342 spa->spa_scrub_active = B_FALSE; 343 344 /* 345 * If the scrub/resilver completed, update all DTLs to 346 * reflect this. Whether it succeeded or not, vacate 347 * all temporary scrub DTLs. 348 */ 349 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 350 complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE); 351 if (complete) { 352 spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ? 353 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 354 } 355 spa_errlog_rotate(spa); 356 357 /* 358 * We may have finished replacing a device. 359 * Let the async thread assess this and handle the detach. 360 */ 361 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 362 } 363 364 scn->scn_phys.scn_end_time = gethrestime_sec(); 365} 366 367/* ARGSUSED */ 368static int 369dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 370{ 371 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 372 373 if (scn->scn_phys.scn_state != DSS_SCANNING) 374 return (SET_ERROR(ENOENT)); 375 return (0); 376} 377 378/* ARGSUSED */ 379static void 380dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 381{ 382 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 383 384 dsl_scan_done(scn, B_FALSE, tx); 385 dsl_scan_sync_state(scn, tx); 386} 387 388int 389dsl_scan_cancel(dsl_pool_t *dp) 390{ 391 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 392 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 393} 394 395static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 396 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 397 dmu_objset_type_t ostype, dmu_tx_t *tx); 398static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds, 399 dmu_objset_type_t ostype, 400 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx, zbookmark_phys_t *); 401 402void 403dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 404{ 405 zio_free(dp->dp_spa, txg, bp); 406} 407 408void 409dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 410{ 411 ASSERT(dsl_pool_sync_context(dp)); 412 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, BP_GET_PSIZE(bpp), 413 pio->io_flags)); 414} 415 416static uint64_t 417dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 418{ 419 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 420 if (ds->ds_is_snapshot) 421 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); 422 return (smt); 423} 424 425static void 426dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) 427{ 428 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 429 DMU_POOL_DIRECTORY_OBJECT, 430 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 431 &scn->scn_phys, tx)); 432} 433 434extern int zfs_vdev_async_write_active_min_dirty_percent; 435 436static boolean_t 437dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_phys_t *zb) 438{ 439 /* we never skip user/group accounting objects */ 440 if (zb && (int64_t)zb->zb_object < 0) 441 return (B_FALSE); 442 443 if (scn->scn_pausing) 444 return (B_TRUE); /* we're already pausing */ 445 446 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 447 return (B_FALSE); /* we're resuming */ 448 449 /* We only know how to resume from level-0 blocks. */ 450 if (zb && zb->zb_level != 0) 451 return (B_FALSE); 452 453 /* 454 * We pause if: 455 * - we have scanned for the maximum time: an entire txg 456 * timeout (default 5 sec) 457 * or 458 * - we have scanned for at least the minimum time (default 1 sec 459 * for scrub, 3 sec for resilver), and either we have sufficient 460 * dirty data that we are starting to write more quickly 461 * (default 30%), or someone is explicitly waiting for this txg 462 * to complete. 463 * or 464 * - the spa is shutting down because this pool is being exported 465 * or the machine is rebooting. 466 */ 467 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 468 zfs_resilver_min_time_ms : zfs_scan_min_time_ms; 469 uint64_t elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 470 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; 471 if (elapsed_nanosecs / NANOSEC >= zfs_txg_timeout || 472 (NSEC2MSEC(elapsed_nanosecs) > mintime && 473 (txg_sync_waiting(scn->scn_dp) || 474 dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent)) || 475 spa_shutting_down(scn->scn_dp->dp_spa)) { 476 if (zb) { 477 dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n", 478 (longlong_t)zb->zb_objset, 479 (longlong_t)zb->zb_object, 480 (longlong_t)zb->zb_level, 481 (longlong_t)zb->zb_blkid); 482 scn->scn_phys.scn_bookmark = *zb; 483 } 484 dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n", 485 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 486 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 487 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 488 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 489 scn->scn_pausing = B_TRUE; 490 return (B_TRUE); 491 } 492 return (B_FALSE); 493} 494 495typedef struct zil_scan_arg { 496 dsl_pool_t *zsa_dp; 497 zil_header_t *zsa_zh; 498} zil_scan_arg_t; 499 500/* ARGSUSED */ 501static int 502dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 503{ 504 zil_scan_arg_t *zsa = arg; 505 dsl_pool_t *dp = zsa->zsa_dp; 506 dsl_scan_t *scn = dp->dp_scan; 507 zil_header_t *zh = zsa->zsa_zh; 508 zbookmark_phys_t zb; 509 510 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 511 return (0); 512 513 /* 514 * One block ("stubby") can be allocated a long time ago; we 515 * want to visit that one because it has been allocated 516 * (on-disk) even if it hasn't been claimed (even though for 517 * scrub there's nothing to do to it). 518 */ 519 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa)) 520 return (0); 521 522 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 523 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 524 525 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 526 return (0); 527} 528 529/* ARGSUSED */ 530static int 531dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 532{ 533 if (lrc->lrc_txtype == TX_WRITE) { 534 zil_scan_arg_t *zsa = arg; 535 dsl_pool_t *dp = zsa->zsa_dp; 536 dsl_scan_t *scn = dp->dp_scan; 537 zil_header_t *zh = zsa->zsa_zh; 538 lr_write_t *lr = (lr_write_t *)lrc; 539 blkptr_t *bp = &lr->lr_blkptr; 540 zbookmark_phys_t zb; 541 542 if (BP_IS_HOLE(bp) || 543 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 544 return (0); 545 546 /* 547 * birth can be < claim_txg if this record's txg is 548 * already txg sync'ed (but this log block contains 549 * other records that are not synced) 550 */ 551 if (claim_txg == 0 || bp->blk_birth < claim_txg) 552 return (0); 553 554 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 555 lr->lr_foid, ZB_ZIL_LEVEL, 556 lr->lr_offset / BP_GET_LSIZE(bp)); 557 558 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 559 } 560 return (0); 561} 562 563static void 564dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 565{ 566 uint64_t claim_txg = zh->zh_claim_txg; 567 zil_scan_arg_t zsa = { dp, zh }; 568 zilog_t *zilog; 569 570 /* 571 * We only want to visit blocks that have been claimed but not yet 572 * replayed (or, in read-only mode, blocks that *would* be claimed). 573 */ 574 if (claim_txg == 0 && spa_writeable(dp->dp_spa)) 575 return; 576 577 zilog = zil_alloc(dp->dp_meta_objset, zh); 578 579 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 580 claim_txg); 581 582 zil_free(zilog); 583} 584 585/* ARGSUSED */ 586static void 587dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp, 588 uint64_t objset, uint64_t object, uint64_t blkid, zbookmark_phys_t *czb) 589{ 590 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 591 592 if (zfs_no_scrub_prefetch) 593 return; 594 595 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg || 596 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)) 597 return; 598 599 SET_BOOKMARK(czb, objset, object, BP_GET_LEVEL(bp), blkid); 600 601 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp, 602 NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 603 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, czb); 604} 605 606static boolean_t 607dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 608 const zbookmark_phys_t *zb) 609{ 610 /* 611 * We never skip over user/group accounting objects (obj<0) 612 */ 613 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 614 (int64_t)zb->zb_object >= 0) { 615 /* 616 * If we already visited this bp & everything below (in 617 * a prior txg sync), don't bother doing it again. 618 */ 619 if (zbookmark_subtree_completed(dnp, zb, 620 &scn->scn_phys.scn_bookmark)) 621 return (B_TRUE); 622 623 /* 624 * If we found the block we're trying to resume from, or 625 * we went past it to a different object, zero it out to 626 * indicate that it's OK to start checking for pausing 627 * again. 628 */ 629 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 630 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 631 dprintf("resuming at %llx/%llx/%llx/%llx\n", 632 (longlong_t)zb->zb_objset, 633 (longlong_t)zb->zb_object, 634 (longlong_t)zb->zb_level, 635 (longlong_t)zb->zb_blkid); 636 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 637 } 638 } 639 return (B_FALSE); 640} 641 642/* 643 * Return nonzero on i/o error. 644 * Return new buf to write out in *bufp. 645 */ 646static int 647dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 648 dnode_phys_t *dnp, const blkptr_t *bp, 649 const zbookmark_phys_t *zb, dmu_tx_t *tx) 650{ 651 dsl_pool_t *dp = scn->scn_dp; 652 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 653 int err; 654 655 if (BP_GET_LEVEL(bp) > 0) { 656 arc_flags_t flags = ARC_FLAG_WAIT; 657 int i; 658 blkptr_t *cbp; 659 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 660 arc_buf_t *buf; 661 zbookmark_phys_t *czb; 662 663 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 664 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 665 if (err) { 666 scn->scn_phys.scn_errors++; 667 return (err); 668 } 669 czb = kmem_alloc(sizeof (*czb), KM_SLEEP); 670 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 671 dsl_scan_prefetch(scn, buf, cbp, zb->zb_objset, 672 zb->zb_object, zb->zb_blkid * epb + i, czb); 673 } 674 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 675 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object, 676 zb->zb_level - 1, 677 zb->zb_blkid * epb + i); 678 dsl_scan_visitbp(cbp, czb, dnp, 679 ds, scn, ostype, tx); 680 } 681 kmem_free(czb, sizeof (*czb)); 682 arc_buf_destroy(buf, &buf); 683 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 684 arc_flags_t flags = ARC_FLAG_WAIT; 685 dnode_phys_t *cdnp; 686 int i, j; 687 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 688 arc_buf_t *buf; 689 zbookmark_phys_t *czb; 690 691 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 692 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 693 if (err) { 694 scn->scn_phys.scn_errors++; 695 return (err); 696 } 697 czb = kmem_alloc(sizeof (*czb), KM_SLEEP); 698 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 699 for (j = 0; j < cdnp->dn_nblkptr; j++) { 700 blkptr_t *cbp = &cdnp->dn_blkptr[j]; 701 dsl_scan_prefetch(scn, buf, cbp, 702 zb->zb_objset, zb->zb_blkid * epb + i, j, 703 czb); 704 } 705 } 706 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 707 dsl_scan_visitdnode(scn, ds, ostype, 708 cdnp, zb->zb_blkid * epb + i, tx, czb); 709 } 710 kmem_free(czb, sizeof (*czb)); 711 712 arc_buf_destroy(buf, &buf); 713 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 714 arc_flags_t flags = ARC_FLAG_WAIT; 715 objset_phys_t *osp; 716 arc_buf_t *buf; 717 zbookmark_phys_t *czb; 718 719 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 720 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 721 if (err) { 722 scn->scn_phys.scn_errors++; 723 return (err); 724 } 725 726 osp = buf->b_data; 727 728 czb = kmem_alloc(sizeof (*czb), KM_SLEEP); 729 dsl_scan_visitdnode(scn, ds, osp->os_type, 730 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx, czb); 731 732 if (OBJSET_BUF_HAS_USERUSED(buf)) { 733 /* 734 * We also always visit user/group accounting 735 * objects, and never skip them, even if we are 736 * pausing. This is necessary so that the space 737 * deltas from this txg get integrated. 738 */ 739 dsl_scan_visitdnode(scn, ds, osp->os_type, 740 &osp->os_groupused_dnode, 741 DMU_GROUPUSED_OBJECT, tx, czb); 742 dsl_scan_visitdnode(scn, ds, osp->os_type, 743 &osp->os_userused_dnode, 744 DMU_USERUSED_OBJECT, tx, czb); 745 } 746 kmem_free(czb, sizeof (*czb)); 747 arc_buf_destroy(buf, &buf); 748 } 749 750 return (0); 751} 752 753static void 754dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 755 dmu_objset_type_t ostype, dnode_phys_t *dnp, 756 uint64_t object, dmu_tx_t *tx, zbookmark_phys_t *czb) 757{ 758 int j; 759 760 for (j = 0; j < dnp->dn_nblkptr; j++) { 761 SET_BOOKMARK(czb, ds ? ds->ds_object : 0, object, 762 dnp->dn_nlevels - 1, j); 763 dsl_scan_visitbp(&dnp->dn_blkptr[j], 764 czb, dnp, ds, scn, ostype, tx); 765 } 766 767 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 768 SET_BOOKMARK(czb, ds ? ds->ds_object : 0, object, 769 0, DMU_SPILL_BLKID); 770 dsl_scan_visitbp(&dnp->dn_spill, 771 czb, dnp, ds, scn, ostype, tx); 772 } 773} 774 775/* 776 * The arguments are in this order because mdb can only print the 777 * first 5; we want them to be useful. 778 */ 779static void 780dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 781 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 782 dmu_objset_type_t ostype, dmu_tx_t *tx) 783{ 784 dsl_pool_t *dp = scn->scn_dp; 785 blkptr_t *bp_toread = NULL; 786 787 if (dsl_scan_check_pause(scn, zb)) 788 return; 789 790 if (dsl_scan_check_resume(scn, dnp, zb)) 791 return; 792 793 if (BP_IS_HOLE(bp)) 794 return; 795 796 scn->scn_visited_this_txg++; 797 798 dprintf_bp(bp, 799 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", 800 ds, ds ? ds->ds_object : 0, 801 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 802 bp); 803 804 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 805 return; 806 807 bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); 808 *bp_toread = *bp; 809 810 if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0) 811 goto out; 812 813 /* 814 * If dsl_scan_ddt() has aready visited this block, it will have 815 * already done any translations or scrubbing, so don't call the 816 * callback again. 817 */ 818 if (ddt_class_contains(dp->dp_spa, 819 scn->scn_phys.scn_ddt_class_max, bp)) { 820 goto out; 821 } 822 823 /* 824 * If this block is from the future (after cur_max_txg), then we 825 * are doing this on behalf of a deleted snapshot, and we will 826 * revisit the future block on the next pass of this dataset. 827 * Don't scan it now unless we need to because something 828 * under it was modified. 829 */ 830 if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) { 831 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 832 } 833 834out: 835 kmem_free(bp_toread, sizeof (blkptr_t)); 836} 837 838static void 839dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 840 dmu_tx_t *tx) 841{ 842 zbookmark_phys_t zb; 843 844 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 845 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 846 dsl_scan_visitbp(bp, &zb, NULL, 847 ds, scn, DMU_OST_NONE, tx); 848 849 dprintf_ds(ds, "finished scan%s", ""); 850} 851 852void 853dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 854{ 855 dsl_pool_t *dp = ds->ds_dir->dd_pool; 856 dsl_scan_t *scn = dp->dp_scan; 857 uint64_t mintxg; 858 859 if (scn->scn_phys.scn_state != DSS_SCANNING) 860 return; 861 862 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 863 if (ds->ds_is_snapshot) { 864 /* 865 * Note: 866 * - scn_cur_{min,max}_txg stays the same. 867 * - Setting the flag is not really necessary if 868 * scn_cur_max_txg == scn_max_txg, because there 869 * is nothing after this snapshot that we care 870 * about. However, we set it anyway and then 871 * ignore it when we retraverse it in 872 * dsl_scan_visitds(). 873 */ 874 scn->scn_phys.scn_bookmark.zb_objset = 875 dsl_dataset_phys(ds)->ds_next_snap_obj; 876 zfs_dbgmsg("destroying ds %llu; currently traversing; " 877 "reset zb_objset to %llu", 878 (u_longlong_t)ds->ds_object, 879 (u_longlong_t)dsl_dataset_phys(ds)-> 880 ds_next_snap_obj); 881 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN; 882 } else { 883 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 884 ZB_DESTROYED_OBJSET, 0, 0, 0); 885 zfs_dbgmsg("destroying ds %llu; currently traversing; " 886 "reset bookmark to -1,0,0,0", 887 (u_longlong_t)ds->ds_object); 888 } 889 } else if (zap_lookup_int_key(dp->dp_meta_objset, 890 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 891 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); 892 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 893 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 894 if (ds->ds_is_snapshot) { 895 /* 896 * We keep the same mintxg; it could be > 897 * ds_creation_txg if the previous snapshot was 898 * deleted too. 899 */ 900 VERIFY(zap_add_int_key(dp->dp_meta_objset, 901 scn->scn_phys.scn_queue_obj, 902 dsl_dataset_phys(ds)->ds_next_snap_obj, 903 mintxg, tx) == 0); 904 zfs_dbgmsg("destroying ds %llu; in queue; " 905 "replacing with %llu", 906 (u_longlong_t)ds->ds_object, 907 (u_longlong_t)dsl_dataset_phys(ds)-> 908 ds_next_snap_obj); 909 } else { 910 zfs_dbgmsg("destroying ds %llu; in queue; removing", 911 (u_longlong_t)ds->ds_object); 912 } 913 } 914 915 /* 916 * dsl_scan_sync() should be called after this, and should sync 917 * out our changed state, but just to be safe, do it here. 918 */ 919 dsl_scan_sync_state(scn, tx); 920} 921 922void 923dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 924{ 925 dsl_pool_t *dp = ds->ds_dir->dd_pool; 926 dsl_scan_t *scn = dp->dp_scan; 927 uint64_t mintxg; 928 929 if (scn->scn_phys.scn_state != DSS_SCANNING) 930 return; 931 932 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); 933 934 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 935 scn->scn_phys.scn_bookmark.zb_objset = 936 dsl_dataset_phys(ds)->ds_prev_snap_obj; 937 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 938 "reset zb_objset to %llu", 939 (u_longlong_t)ds->ds_object, 940 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 941 } else if (zap_lookup_int_key(dp->dp_meta_objset, 942 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 943 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 944 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 945 VERIFY(zap_add_int_key(dp->dp_meta_objset, 946 scn->scn_phys.scn_queue_obj, 947 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); 948 zfs_dbgmsg("snapshotting ds %llu; in queue; " 949 "replacing with %llu", 950 (u_longlong_t)ds->ds_object, 951 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 952 } 953 dsl_scan_sync_state(scn, tx); 954} 955 956void 957dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 958{ 959 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 960 dsl_scan_t *scn = dp->dp_scan; 961 uint64_t mintxg; 962 963 if (scn->scn_phys.scn_state != DSS_SCANNING) 964 return; 965 966 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) { 967 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object; 968 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 969 "reset zb_objset to %llu", 970 (u_longlong_t)ds1->ds_object, 971 (u_longlong_t)ds2->ds_object); 972 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) { 973 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object; 974 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 975 "reset zb_objset to %llu", 976 (u_longlong_t)ds2->ds_object, 977 (u_longlong_t)ds1->ds_object); 978 } 979 980 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 981 ds1->ds_object, &mintxg) == 0) { 982 int err; 983 984 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 985 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 986 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 987 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 988 err = zap_add_int_key(dp->dp_meta_objset, 989 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); 990 VERIFY(err == 0 || err == EEXIST); 991 if (err == EEXIST) { 992 /* Both were there to begin with */ 993 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 994 scn->scn_phys.scn_queue_obj, 995 ds1->ds_object, mintxg, tx)); 996 } 997 zfs_dbgmsg("clone_swap ds %llu; in queue; " 998 "replacing with %llu", 999 (u_longlong_t)ds1->ds_object, 1000 (u_longlong_t)ds2->ds_object); 1001 } else if (zap_lookup_int_key(dp->dp_meta_objset, 1002 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) { 1003 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 1004 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 1005 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1006 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 1007 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 1008 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); 1009 zfs_dbgmsg("clone_swap ds %llu; in queue; " 1010 "replacing with %llu", 1011 (u_longlong_t)ds2->ds_object, 1012 (u_longlong_t)ds1->ds_object); 1013 } 1014 1015 dsl_scan_sync_state(scn, tx); 1016} 1017 1018struct enqueue_clones_arg { 1019 dmu_tx_t *tx; 1020 uint64_t originobj; 1021}; 1022 1023/* ARGSUSED */ 1024static int 1025enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1026{ 1027 struct enqueue_clones_arg *eca = arg; 1028 dsl_dataset_t *ds; 1029 int err; 1030 dsl_scan_t *scn = dp->dp_scan; 1031 1032 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != eca->originobj) 1033 return (0); 1034 1035 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1036 if (err) 1037 return (err); 1038 1039 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != eca->originobj) { 1040 dsl_dataset_t *prev; 1041 err = dsl_dataset_hold_obj(dp, 1042 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 1043 1044 dsl_dataset_rele(ds, FTAG); 1045 if (err) 1046 return (err); 1047 ds = prev; 1048 } 1049 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1050 scn->scn_phys.scn_queue_obj, ds->ds_object, 1051 dsl_dataset_phys(ds)->ds_prev_snap_txg, eca->tx) == 0); 1052 dsl_dataset_rele(ds, FTAG); 1053 return (0); 1054} 1055 1056static void 1057dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 1058{ 1059 dsl_pool_t *dp = scn->scn_dp; 1060 dsl_dataset_t *ds; 1061 objset_t *os; 1062 1063 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1064 1065 if (scn->scn_phys.scn_cur_min_txg >= 1066 scn->scn_phys.scn_max_txg) { 1067 /* 1068 * This can happen if this snapshot was created after the 1069 * scan started, and we already completed a previous snapshot 1070 * that was created after the scan started. This snapshot 1071 * only references blocks with: 1072 * 1073 * birth < our ds_creation_txg 1074 * cur_min_txg is no less than ds_creation_txg. 1075 * We have already visited these blocks. 1076 * or 1077 * birth > scn_max_txg 1078 * The scan requested not to visit these blocks. 1079 * 1080 * Subsequent snapshots (and clones) can reference our 1081 * blocks, or blocks with even higher birth times. 1082 * Therefore we do not need to visit them either, 1083 * so we do not add them to the work queue. 1084 * 1085 * Note that checking for cur_min_txg >= cur_max_txg 1086 * is not sufficient, because in that case we may need to 1087 * visit subsequent snapshots. This happens when min_txg > 0, 1088 * which raises cur_min_txg. In this case we will visit 1089 * this dataset but skip all of its blocks, because the 1090 * rootbp's birth time is < cur_min_txg. Then we will 1091 * add the next snapshots/clones to the work queue. 1092 */ 1093 char *dsname = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1094 dsl_dataset_name(ds, dsname); 1095 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " 1096 "cur_min_txg (%llu) >= max_txg (%llu)", 1097 dsobj, dsname, 1098 scn->scn_phys.scn_cur_min_txg, 1099 scn->scn_phys.scn_max_txg); 1100 kmem_free(dsname, MAXNAMELEN); 1101 1102 goto out; 1103 } 1104 1105 if (dmu_objset_from_ds(ds, &os)) 1106 goto out; 1107 1108 /* 1109 * Only the ZIL in the head (non-snapshot) is valid. Even though 1110 * snapshots can have ZIL block pointers (which may be the same 1111 * BP as in the head), they must be ignored. So we traverse the 1112 * ZIL here, rather than in scan_recurse(), because the regular 1113 * snapshot block-sharing rules don't apply to it. 1114 */ 1115 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !ds->ds_is_snapshot) 1116 dsl_scan_zil(dp, &os->os_zil_header); 1117 1118 /* 1119 * Iterate over the bps in this ds. 1120 */ 1121 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1122 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 1123 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); 1124 rrw_exit(&ds->ds_bp_rwlock, FTAG); 1125 1126 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 1127 dsl_dataset_name(ds, dsname); 1128 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 1129 "pausing=%u", 1130 (longlong_t)dsobj, dsname, 1131 (longlong_t)scn->scn_phys.scn_cur_min_txg, 1132 (longlong_t)scn->scn_phys.scn_cur_max_txg, 1133 (int)scn->scn_pausing); 1134 kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); 1135 1136 if (scn->scn_pausing) 1137 goto out; 1138 1139 /* 1140 * We've finished this pass over this dataset. 1141 */ 1142 1143 /* 1144 * If we did not completely visit this dataset, do another pass. 1145 */ 1146 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 1147 zfs_dbgmsg("incomplete pass; visiting again"); 1148 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 1149 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1150 scn->scn_phys.scn_queue_obj, ds->ds_object, 1151 scn->scn_phys.scn_cur_max_txg, tx) == 0); 1152 goto out; 1153 } 1154 1155 /* 1156 * Add descendent datasets to work queue. 1157 */ 1158 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { 1159 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1160 scn->scn_phys.scn_queue_obj, 1161 dsl_dataset_phys(ds)->ds_next_snap_obj, 1162 dsl_dataset_phys(ds)->ds_creation_txg, tx) == 0); 1163 } 1164 if (dsl_dataset_phys(ds)->ds_num_children > 1) { 1165 boolean_t usenext = B_FALSE; 1166 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { 1167 uint64_t count; 1168 /* 1169 * A bug in a previous version of the code could 1170 * cause upgrade_clones_cb() to not set 1171 * ds_next_snap_obj when it should, leading to a 1172 * missing entry. Therefore we can only use the 1173 * next_clones_obj when its count is correct. 1174 */ 1175 int err = zap_count(dp->dp_meta_objset, 1176 dsl_dataset_phys(ds)->ds_next_clones_obj, &count); 1177 if (err == 0 && 1178 count == dsl_dataset_phys(ds)->ds_num_children - 1) 1179 usenext = B_TRUE; 1180 } 1181 1182 if (usenext) { 1183 VERIFY0(zap_join_key(dp->dp_meta_objset, 1184 dsl_dataset_phys(ds)->ds_next_clones_obj, 1185 scn->scn_phys.scn_queue_obj, 1186 dsl_dataset_phys(ds)->ds_creation_txg, tx)); 1187 } else { 1188 struct enqueue_clones_arg eca; 1189 eca.tx = tx; 1190 eca.originobj = ds->ds_object; 1191 1192 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1193 enqueue_clones_cb, &eca, DS_FIND_CHILDREN)); 1194 } 1195 } 1196 1197out: 1198 dsl_dataset_rele(ds, FTAG); 1199} 1200 1201/* ARGSUSED */ 1202static int 1203enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1204{ 1205 dmu_tx_t *tx = arg; 1206 dsl_dataset_t *ds; 1207 int err; 1208 dsl_scan_t *scn = dp->dp_scan; 1209 1210 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1211 if (err) 1212 return (err); 1213 1214 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 1215 dsl_dataset_t *prev; 1216 err = dsl_dataset_hold_obj(dp, 1217 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 1218 if (err) { 1219 dsl_dataset_rele(ds, FTAG); 1220 return (err); 1221 } 1222 1223 /* 1224 * If this is a clone, we don't need to worry about it for now. 1225 */ 1226 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { 1227 dsl_dataset_rele(ds, FTAG); 1228 dsl_dataset_rele(prev, FTAG); 1229 return (0); 1230 } 1231 dsl_dataset_rele(ds, FTAG); 1232 ds = prev; 1233 } 1234 1235 VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1236 ds->ds_object, dsl_dataset_phys(ds)->ds_prev_snap_txg, tx) == 0); 1237 dsl_dataset_rele(ds, FTAG); 1238 return (0); 1239} 1240 1241/* 1242 * Scrub/dedup interaction. 1243 * 1244 * If there are N references to a deduped block, we don't want to scrub it 1245 * N times -- ideally, we should scrub it exactly once. 1246 * 1247 * We leverage the fact that the dde's replication class (enum ddt_class) 1248 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 1249 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 1250 * 1251 * To prevent excess scrubbing, the scrub begins by walking the DDT 1252 * to find all blocks with refcnt > 1, and scrubs each of these once. 1253 * Since there are two replication classes which contain blocks with 1254 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 1255 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 1256 * 1257 * There would be nothing more to say if a block's refcnt couldn't change 1258 * during a scrub, but of course it can so we must account for changes 1259 * in a block's replication class. 1260 * 1261 * Here's an example of what can occur: 1262 * 1263 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 1264 * when visited during the top-down scrub phase, it will be scrubbed twice. 1265 * This negates our scrub optimization, but is otherwise harmless. 1266 * 1267 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 1268 * on each visit during the top-down scrub phase, it will never be scrubbed. 1269 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 1270 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 1271 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 1272 * while a scrub is in progress, it scrubs the block right then. 1273 */ 1274static void 1275dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 1276{ 1277 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 1278 ddt_entry_t dde = { 0 }; 1279 int error; 1280 uint64_t n = 0; 1281 1282 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 1283 ddt_t *ddt; 1284 1285 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 1286 break; 1287 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 1288 (longlong_t)ddb->ddb_class, 1289 (longlong_t)ddb->ddb_type, 1290 (longlong_t)ddb->ddb_checksum, 1291 (longlong_t)ddb->ddb_cursor); 1292 1293 /* There should be no pending changes to the dedup table */ 1294 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 1295 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 1296 1297 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 1298 n++; 1299 1300 if (dsl_scan_check_pause(scn, NULL)) 1301 break; 1302 } 1303 1304 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u", 1305 (longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max, 1306 (int)scn->scn_pausing); 1307 1308 ASSERT(error == 0 || error == ENOENT); 1309 ASSERT(error != ENOENT || 1310 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 1311} 1312 1313/* ARGSUSED */ 1314void 1315dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 1316 ddt_entry_t *dde, dmu_tx_t *tx) 1317{ 1318 const ddt_key_t *ddk = &dde->dde_key; 1319 ddt_phys_t *ddp = dde->dde_phys; 1320 blkptr_t bp; 1321 zbookmark_phys_t zb = { 0 }; 1322 1323 if (scn->scn_phys.scn_state != DSS_SCANNING) 1324 return; 1325 1326 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1327 if (ddp->ddp_phys_birth == 0 || 1328 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 1329 continue; 1330 ddt_bp_create(checksum, ddk, ddp, &bp); 1331 1332 scn->scn_visited_this_txg++; 1333 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 1334 } 1335} 1336 1337static void 1338dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 1339{ 1340 dsl_pool_t *dp = scn->scn_dp; 1341 zap_cursor_t zc; 1342 zap_attribute_t za; 1343 1344 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1345 scn->scn_phys.scn_ddt_class_max) { 1346 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1347 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1348 dsl_scan_ddt(scn, tx); 1349 if (scn->scn_pausing) 1350 return; 1351 } 1352 1353 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 1354 /* First do the MOS & ORIGIN */ 1355 1356 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1357 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1358 dsl_scan_visit_rootbp(scn, NULL, 1359 &dp->dp_meta_rootbp, tx); 1360 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 1361 if (scn->scn_pausing) 1362 return; 1363 1364 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 1365 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1366 enqueue_cb, tx, DS_FIND_CHILDREN)); 1367 } else { 1368 dsl_scan_visitds(scn, 1369 dp->dp_origin_snap->ds_object, tx); 1370 } 1371 ASSERT(!scn->scn_pausing); 1372 } else if (scn->scn_phys.scn_bookmark.zb_objset != 1373 ZB_DESTROYED_OBJSET) { 1374 /* 1375 * If we were paused, continue from here. Note if the 1376 * ds we were paused on was deleted, the zb_objset may 1377 * be -1, so we will skip this and find a new objset 1378 * below. 1379 */ 1380 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx); 1381 if (scn->scn_pausing) 1382 return; 1383 } 1384 1385 /* 1386 * In case we were paused right at the end of the ds, zero the 1387 * bookmark so we don't think that we're still trying to resume. 1388 */ 1389 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); 1390 1391 /* keep pulling things out of the zap-object-as-queue */ 1392 while (zap_cursor_init(&zc, dp->dp_meta_objset, 1393 scn->scn_phys.scn_queue_obj), 1394 zap_cursor_retrieve(&zc, &za) == 0) { 1395 dsl_dataset_t *ds; 1396 uint64_t dsobj; 1397 1398 dsobj = strtonum(za.za_name, NULL); 1399 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1400 scn->scn_phys.scn_queue_obj, dsobj, tx)); 1401 1402 /* Set up min/max txg */ 1403 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1404 if (za.za_first_integer != 0) { 1405 scn->scn_phys.scn_cur_min_txg = 1406 MAX(scn->scn_phys.scn_min_txg, 1407 za.za_first_integer); 1408 } else { 1409 scn->scn_phys.scn_cur_min_txg = 1410 MAX(scn->scn_phys.scn_min_txg, 1411 dsl_dataset_phys(ds)->ds_prev_snap_txg); 1412 } 1413 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 1414 dsl_dataset_rele(ds, FTAG); 1415 1416 dsl_scan_visitds(scn, dsobj, tx); 1417 zap_cursor_fini(&zc); 1418 if (scn->scn_pausing) 1419 return; 1420 } 1421 zap_cursor_fini(&zc); 1422} 1423 1424static boolean_t 1425dsl_scan_free_should_pause(dsl_scan_t *scn) 1426{ 1427 uint64_t elapsed_nanosecs; 1428 1429 if (zfs_recover) 1430 return (B_FALSE); 1431 1432 if (scn->scn_visited_this_txg >= zfs_free_max_blocks) 1433 return (B_TRUE); 1434 1435 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 1436 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 1437 (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms && 1438 txg_sync_waiting(scn->scn_dp)) || 1439 spa_shutting_down(scn->scn_dp->dp_spa)); 1440} 1441 1442static int 1443dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1444{ 1445 dsl_scan_t *scn = arg; 1446 1447 if (!scn->scn_is_bptree || 1448 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 1449 if (dsl_scan_free_should_pause(scn)) 1450 return (SET_ERROR(ERESTART)); 1451 } 1452 1453 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 1454 dmu_tx_get_txg(tx), bp, BP_GET_PSIZE(bp), 0)); 1455 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 1456 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 1457 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 1458 scn->scn_visited_this_txg++; 1459 return (0); 1460} 1461 1462boolean_t 1463dsl_scan_active(dsl_scan_t *scn) 1464{ 1465 spa_t *spa = scn->scn_dp->dp_spa; 1466 uint64_t used = 0, comp, uncomp; 1467 1468 if (spa->spa_load_state != SPA_LOAD_NONE) 1469 return (B_FALSE); 1470 if (spa_shutting_down(spa)) 1471 return (B_FALSE); 1472 if (scn->scn_phys.scn_state == DSS_SCANNING || 1473 (scn->scn_async_destroying && !scn->scn_async_stalled)) 1474 return (B_TRUE); 1475 1476 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1477 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 1478 &used, &comp, &uncomp); 1479 } 1480 return (used != 0); 1481} 1482 1483void 1484dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 1485{ 1486 dsl_scan_t *scn = dp->dp_scan; 1487 spa_t *spa = dp->dp_spa; 1488 int err = 0; 1489 1490 /* 1491 * Check for scn_restart_txg before checking spa_load_state, so 1492 * that we can restart an old-style scan while the pool is being 1493 * imported (see dsl_scan_init). 1494 */ 1495 if (dsl_scan_restarting(scn, tx)) { 1496 pool_scan_func_t func = POOL_SCAN_SCRUB; 1497 dsl_scan_done(scn, B_FALSE, tx); 1498 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 1499 func = POOL_SCAN_RESILVER; 1500 zfs_dbgmsg("restarting scan func=%u txg=%llu", 1501 func, tx->tx_txg); 1502 dsl_scan_setup_sync(&func, tx); 1503 } 1504 1505 /* 1506 * Only process scans in sync pass 1. 1507 */ 1508 if (spa_sync_pass(dp->dp_spa) > 1) 1509 return; 1510 1511 /* 1512 * If the spa is shutting down, then stop scanning. This will 1513 * ensure that the scan does not dirty any new data during the 1514 * shutdown phase. 1515 */ 1516 if (spa_shutting_down(spa)) 1517 return; 1518 1519 /* 1520 * If the scan is inactive due to a stalled async destroy, try again. 1521 */ 1522 if (!scn->scn_async_stalled && !dsl_scan_active(scn)) 1523 return; 1524 1525 scn->scn_visited_this_txg = 0; 1526 scn->scn_pausing = B_FALSE; 1527 scn->scn_sync_start_time = gethrtime(); 1528 spa->spa_scrub_active = B_TRUE; 1529 1530 /* 1531 * First process the async destroys. If we pause, don't do 1532 * any scrubbing or resilvering. This ensures that there are no 1533 * async destroys while we are scanning, so the scan code doesn't 1534 * have to worry about traversing it. It is also faster to free the 1535 * blocks than to scrub them. 1536 */ 1537 if (zfs_free_bpobj_enabled && 1538 spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1539 scn->scn_is_bptree = B_FALSE; 1540 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1541 NULL, ZIO_FLAG_MUSTSUCCEED); 1542 err = bpobj_iterate(&dp->dp_free_bpobj, 1543 dsl_scan_free_block_cb, scn, tx); 1544 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root)); 1545 1546 if (err != 0 && err != ERESTART) 1547 zfs_panic_recover("error %u from bpobj_iterate()", err); 1548 } 1549 1550 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 1551 ASSERT(scn->scn_async_destroying); 1552 scn->scn_is_bptree = B_TRUE; 1553 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1554 NULL, ZIO_FLAG_MUSTSUCCEED); 1555 err = bptree_iterate(dp->dp_meta_objset, 1556 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 1557 VERIFY0(zio_wait(scn->scn_zio_root)); 1558 1559 if (err == EIO || err == ECKSUM) { 1560 err = 0; 1561 } else if (err != 0 && err != ERESTART) { 1562 zfs_panic_recover("error %u from " 1563 "traverse_dataset_destroyed()", err); 1564 } 1565 1566 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 1567 /* finished; deactivate async destroy feature */ 1568 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 1569 ASSERT(!spa_feature_is_active(spa, 1570 SPA_FEATURE_ASYNC_DESTROY)); 1571 VERIFY0(zap_remove(dp->dp_meta_objset, 1572 DMU_POOL_DIRECTORY_OBJECT, 1573 DMU_POOL_BPTREE_OBJ, tx)); 1574 VERIFY0(bptree_free(dp->dp_meta_objset, 1575 dp->dp_bptree_obj, tx)); 1576 dp->dp_bptree_obj = 0; 1577 scn->scn_async_destroying = B_FALSE; 1578 scn->scn_async_stalled = B_FALSE; 1579 } else { 1580 /* 1581 * If we didn't make progress, mark the async 1582 * destroy as stalled, so that we will not initiate 1583 * a spa_sync() on its behalf. Note that we only 1584 * check this if we are not finished, because if the 1585 * bptree had no blocks for us to visit, we can 1586 * finish without "making progress". 1587 */ 1588 scn->scn_async_stalled = 1589 (scn->scn_visited_this_txg == 0); 1590 } 1591 } 1592 if (scn->scn_visited_this_txg) { 1593 zfs_dbgmsg("freed %llu blocks in %llums from " 1594 "free_bpobj/bptree txg %llu; err=%d", 1595 (longlong_t)scn->scn_visited_this_txg, 1596 (longlong_t) 1597 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 1598 (longlong_t)tx->tx_txg, err); 1599 scn->scn_visited_this_txg = 0; 1600 1601 /* 1602 * Write out changes to the DDT that may be required as a 1603 * result of the blocks freed. This ensures that the DDT 1604 * is clean when a scrub/resilver runs. 1605 */ 1606 ddt_sync(spa, tx->tx_txg); 1607 } 1608 if (err != 0) 1609 return; 1610 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 1611 zfs_free_leak_on_eio && 1612 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || 1613 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || 1614 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { 1615 /* 1616 * We have finished background destroying, but there is still 1617 * some space left in the dp_free_dir. Transfer this leaked 1618 * space to the dp_leak_dir. 1619 */ 1620 if (dp->dp_leak_dir == NULL) { 1621 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 1622 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 1623 LEAK_DIR_NAME, tx); 1624 VERIFY0(dsl_pool_open_special_dir(dp, 1625 LEAK_DIR_NAME, &dp->dp_leak_dir)); 1626 rrw_exit(&dp->dp_config_rwlock, FTAG); 1627 } 1628 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 1629 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 1630 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 1631 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 1632 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 1633 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 1634 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 1635 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 1636 } 1637 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) { 1638 /* finished; verify that space accounting went to zero */ 1639 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); 1640 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); 1641 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); 1642 } 1643 1644 if (scn->scn_phys.scn_state != DSS_SCANNING) 1645 return; 1646 1647 if (scn->scn_done_txg == tx->tx_txg) { 1648 ASSERT(!scn->scn_pausing); 1649 /* finished with scan. */ 1650 zfs_dbgmsg("txg %llu scan complete", tx->tx_txg); 1651 dsl_scan_done(scn, B_TRUE, tx); 1652 ASSERT3U(spa->spa_scrub_inflight, ==, 0); 1653 dsl_scan_sync_state(scn, tx); 1654 return; 1655 } 1656 1657 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1658 scn->scn_phys.scn_ddt_class_max) { 1659 zfs_dbgmsg("doing scan sync txg %llu; " 1660 "ddt bm=%llu/%llu/%llu/%llx", 1661 (longlong_t)tx->tx_txg, 1662 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 1663 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 1664 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 1665 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 1666 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0); 1667 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0); 1668 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0); 1669 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0); 1670 } else { 1671 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu", 1672 (longlong_t)tx->tx_txg, 1673 (longlong_t)scn->scn_phys.scn_bookmark.zb_objset, 1674 (longlong_t)scn->scn_phys.scn_bookmark.zb_object, 1675 (longlong_t)scn->scn_phys.scn_bookmark.zb_level, 1676 (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid); 1677 } 1678 1679 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1680 NULL, ZIO_FLAG_CANFAIL); 1681 dsl_pool_config_enter(dp, FTAG); 1682 dsl_scan_visit(scn, tx); 1683 dsl_pool_config_exit(dp, FTAG); 1684 (void) zio_wait(scn->scn_zio_root); 1685 scn->scn_zio_root = NULL; 1686 1687 zfs_dbgmsg("visited %llu blocks in %llums", 1688 (longlong_t)scn->scn_visited_this_txg, 1689 (longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time)); 1690 1691 if (!scn->scn_pausing) { 1692 scn->scn_done_txg = tx->tx_txg + 1; 1693 zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu", 1694 tx->tx_txg, scn->scn_done_txg); 1695 } 1696 1697 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 1698 mutex_enter(&spa->spa_scrub_lock); 1699 while (spa->spa_scrub_inflight > 0) { 1700 cv_wait(&spa->spa_scrub_io_cv, 1701 &spa->spa_scrub_lock); 1702 } 1703 mutex_exit(&spa->spa_scrub_lock); 1704 } 1705 1706 dsl_scan_sync_state(scn, tx); 1707} 1708 1709/* 1710 * This will start a new scan, or restart an existing one. 1711 */ 1712void 1713dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 1714{ 1715 if (txg == 0) { 1716 dmu_tx_t *tx; 1717 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1718 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1719 1720 txg = dmu_tx_get_txg(tx); 1721 dp->dp_scan->scn_restart_txg = txg; 1722 dmu_tx_commit(tx); 1723 } else { 1724 dp->dp_scan->scn_restart_txg = txg; 1725 } 1726 zfs_dbgmsg("restarting resilver txg=%llu", txg); 1727} 1728 1729boolean_t 1730dsl_scan_resilvering(dsl_pool_t *dp) 1731{ 1732 return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING && 1733 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 1734} 1735 1736/* 1737 * scrub consumers 1738 */ 1739 1740static void 1741count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 1742{ 1743 int i; 1744 1745 /* 1746 * If we resume after a reboot, zab will be NULL; don't record 1747 * incomplete stats in that case. 1748 */ 1749 if (zab == NULL) 1750 return; 1751 1752 for (i = 0; i < 4; i++) { 1753 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 1754 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 1755 if (t & DMU_OT_NEWTYPE) 1756 t = DMU_OT_OTHER; 1757 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 1758 int equal; 1759 1760 zb->zb_count++; 1761 zb->zb_asize += BP_GET_ASIZE(bp); 1762 zb->zb_lsize += BP_GET_LSIZE(bp); 1763 zb->zb_psize += BP_GET_PSIZE(bp); 1764 zb->zb_gangs += BP_COUNT_GANG(bp); 1765 1766 switch (BP_GET_NDVAS(bp)) { 1767 case 2: 1768 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 1769 DVA_GET_VDEV(&bp->blk_dva[1])) 1770 zb->zb_ditto_2_of_2_samevdev++; 1771 break; 1772 case 3: 1773 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 1774 DVA_GET_VDEV(&bp->blk_dva[1])) + 1775 (DVA_GET_VDEV(&bp->blk_dva[0]) == 1776 DVA_GET_VDEV(&bp->blk_dva[2])) + 1777 (DVA_GET_VDEV(&bp->blk_dva[1]) == 1778 DVA_GET_VDEV(&bp->blk_dva[2])); 1779 if (equal == 1) 1780 zb->zb_ditto_2_of_3_samevdev++; 1781 else if (equal == 3) 1782 zb->zb_ditto_3_of_3_samevdev++; 1783 break; 1784 } 1785 } 1786} 1787 1788static void 1789dsl_scan_scrub_done(zio_t *zio) 1790{ 1791 spa_t *spa = zio->io_spa; 1792 1793 zio_data_buf_free(zio->io_data, zio->io_size); 1794 1795 mutex_enter(&spa->spa_scrub_lock); 1796 spa->spa_scrub_inflight--; 1797 cv_broadcast(&spa->spa_scrub_io_cv); 1798 1799 if (zio->io_error && (zio->io_error != ECKSUM || 1800 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 1801 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++; 1802 } 1803 mutex_exit(&spa->spa_scrub_lock); 1804} 1805 1806static int 1807dsl_scan_scrub_cb(dsl_pool_t *dp, 1808 const blkptr_t *bp, const zbookmark_phys_t *zb) 1809{ 1810 dsl_scan_t *scn = dp->dp_scan; 1811 size_t size = BP_GET_PSIZE(bp); 1812 spa_t *spa = dp->dp_spa; 1813 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 1814 boolean_t needs_io; 1815 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 1816 unsigned int scan_delay = 0; 1817 1818 if (phys_birth <= scn->scn_phys.scn_min_txg || 1819 phys_birth >= scn->scn_phys.scn_max_txg) 1820 return (0); 1821 1822 count_block(dp->dp_blkstats, bp); 1823 1824 if (BP_IS_EMBEDDED(bp)) 1825 return (0); 1826 1827 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 1828 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 1829 zio_flags |= ZIO_FLAG_SCRUB; 1830 needs_io = B_TRUE; 1831 scan_delay = zfs_scrub_delay; 1832 } else { 1833 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 1834 zio_flags |= ZIO_FLAG_RESILVER; 1835 needs_io = B_FALSE; 1836 scan_delay = zfs_resilver_delay; 1837 } 1838 1839 /* If it's an intent log block, failure is expected. */ 1840 if (zb->zb_level == ZB_ZIL_LEVEL) 1841 zio_flags |= ZIO_FLAG_SPECULATIVE; 1842 1843 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 1844 vdev_t *vd = vdev_lookup_top(spa, 1845 DVA_GET_VDEV(&bp->blk_dva[d])); 1846 1847 /* 1848 * Keep track of how much data we've examined so that 1849 * zpool(1M) status can make useful progress reports. 1850 */ 1851 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]); 1852 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]); 1853 1854 /* if it's a resilver, this may not be in the target range */ 1855 if (!needs_io) { 1856 if (DVA_GET_GANG(&bp->blk_dva[d])) { 1857 /* 1858 * Gang members may be spread across multiple 1859 * vdevs, so the best estimate we have is the 1860 * scrub range, which has already been checked. 1861 * XXX -- it would be better to change our 1862 * allocation policy to ensure that all 1863 * gang members reside on the same vdev. 1864 */ 1865 needs_io = B_TRUE; 1866 } else { 1867 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL, 1868 phys_birth, 1); 1869 } 1870 } 1871 } 1872 1873 if (needs_io && !zfs_no_scrub_io) { 1874 vdev_t *rvd = spa->spa_root_vdev; 1875 uint64_t maxinflight = rvd->vdev_children * 1876 MAX(zfs_top_maxinflight, 1); 1877 void *data = zio_data_buf_alloc(size); 1878 1879 mutex_enter(&spa->spa_scrub_lock); 1880 while (spa->spa_scrub_inflight >= maxinflight) 1881 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1882 spa->spa_scrub_inflight++; 1883 mutex_exit(&spa->spa_scrub_lock); 1884 1885 /* 1886 * If we're seeing recent (zfs_scan_idle) "important" I/Os 1887 * then throttle our workload to limit the impact of a scan. 1888 */ 1889 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle) 1890 delay(MAX((int)scan_delay, 0)); 1891 1892 zio_nowait(zio_read(NULL, spa, bp, data, size, 1893 dsl_scan_scrub_done, NULL, ZIO_PRIORITY_SCRUB, 1894 zio_flags, zb)); 1895 } 1896 1897 /* do not relocate this block */ 1898 return (0); 1899} 1900 1901int 1902dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 1903{ 1904 spa_t *spa = dp->dp_spa; 1905 1906 /* 1907 * Purge all vdev caches and probe all devices. We do this here 1908 * rather than in sync context because this requires a writer lock 1909 * on the spa_config lock, which we can't do from sync context. The 1910 * spa_scrub_reopen flag indicates that vdev_open() should not 1911 * attempt to start another scrub. 1912 */ 1913 spa_vdev_state_enter(spa, SCL_NONE); 1914 spa->spa_scrub_reopen = B_TRUE; 1915 vdev_reopen(spa->spa_root_vdev); 1916 spa->spa_scrub_reopen = B_FALSE; 1917 (void) spa_vdev_state_exit(spa, NULL, 0); 1918 1919 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 1920 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_NONE)); 1921} 1922 1923static boolean_t 1924dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) 1925{ 1926 return (scn->scn_restart_txg != 0 && 1927 scn->scn_restart_txg <= tx->tx_txg); 1928} 1929