dsl_scan.c revision 240868
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 */ 25 26#include <sys/dsl_scan.h> 27#include <sys/dsl_pool.h> 28#include <sys/dsl_dataset.h> 29#include <sys/dsl_prop.h> 30#include <sys/dsl_dir.h> 31#include <sys/dsl_synctask.h> 32#include <sys/dnode.h> 33#include <sys/dmu_tx.h> 34#include <sys/dmu_objset.h> 35#include <sys/arc.h> 36#include <sys/zap.h> 37#include <sys/zio.h> 38#include <sys/zfs_context.h> 39#include <sys/fs/zfs.h> 40#include <sys/zfs_znode.h> 41#include <sys/spa_impl.h> 42#include <sys/vdev_impl.h> 43#include <sys/zil_impl.h> 44#include <sys/zio_checksum.h> 45#include <sys/ddt.h> 46#include <sys/sa.h> 47#include <sys/sa_impl.h> 48#include <sys/zfeature.h> 49#ifdef _KERNEL 50#include <sys/zfs_vfsops.h> 51#endif 52 53typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *); 54 55static scan_cb_t dsl_scan_defrag_cb; 56static scan_cb_t dsl_scan_scrub_cb; 57static scan_cb_t dsl_scan_remove_cb; 58static dsl_syncfunc_t dsl_scan_cancel_sync; 59static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx); 60 61unsigned int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 62unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ 63unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ 64unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ 65 66unsigned int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */ 67unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 68unsigned int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver 69 per txg */ 70boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 71boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */ 72 73SYSCTL_DECL(_vfs_zfs); 74TUNABLE_INT("vfs.zfs.top_maxinflight", &zfs_top_maxinflight); 75SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RW, 76 &zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev"); 77TUNABLE_INT("vfs.zfs.resilver_delay", &zfs_resilver_delay); 78SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RW, 79 &zfs_resilver_delay, 0, "Number of ticks to delay resilver"); 80TUNABLE_INT("vfs.zfs.scrub_delay", &zfs_scrub_delay); 81SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RW, 82 &zfs_scrub_delay, 0, "Number of ticks to delay scrub"); 83TUNABLE_INT("vfs.zfs.scan_idle", &zfs_scan_idle); 84SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RW, 85 &zfs_scan_idle, 0, "Idle scan window in clock ticks"); 86TUNABLE_INT("vfs.zfs.scan_min_time_ms", &zfs_scan_min_time_ms); 87SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RW, 88 &zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg"); 89TUNABLE_INT("vfs.zfs.free_min_time_ms", &zfs_free_min_time_ms); 90SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RW, 91 &zfs_free_min_time_ms, 0, "Min millisecs to free per txg"); 92TUNABLE_INT("vfs.zfs.resilver_min_time_ms", &zfs_resilver_min_time_ms); 93SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RW, 94 &zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg"); 95TUNABLE_INT("vfs.zfs.no_scrub_io", &zfs_no_scrub_io); 96SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RW, 97 &zfs_no_scrub_io, 0, "Disable scrub I/O"); 98TUNABLE_INT("vfs.zfs.no_scrub_prefetch", &zfs_no_scrub_prefetch); 99SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RW, 100 &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); 101 102enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 103 104#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 105 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 106 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 107 108extern int zfs_txg_timeout; 109 110/* the order has to match pool_scan_type */ 111static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 112 NULL, 113 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 114 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 115}; 116 117int 118dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 119{ 120 int err; 121 dsl_scan_t *scn; 122 spa_t *spa = dp->dp_spa; 123 uint64_t f; 124 125 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 126 scn->scn_dp = dp; 127 128 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 129 "scrub_func", sizeof (uint64_t), 1, &f); 130 if (err == 0) { 131 /* 132 * There was an old-style scrub in progress. Restart a 133 * new-style scrub from the beginning. 134 */ 135 scn->scn_restart_txg = txg; 136 zfs_dbgmsg("old-style scrub was in progress; " 137 "restarting new-style scrub in txg %llu", 138 scn->scn_restart_txg); 139 140 /* 141 * Load the queue obj from the old location so that it 142 * can be freed by dsl_scan_done(). 143 */ 144 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 145 "scrub_queue", sizeof (uint64_t), 1, 146 &scn->scn_phys.scn_queue_obj); 147 } else { 148 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 149 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 150 &scn->scn_phys); 151 if (err == ENOENT) 152 return (0); 153 else if (err) 154 return (err); 155 156 if (scn->scn_phys.scn_state == DSS_SCANNING && 157 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 158 /* 159 * A new-type scrub was in progress on an old 160 * pool, and the pool was accessed by old 161 * software. Restart from the beginning, since 162 * the old software may have changed the pool in 163 * the meantime. 164 */ 165 scn->scn_restart_txg = txg; 166 zfs_dbgmsg("new-style scrub was modified " 167 "by old software; restarting in txg %llu", 168 scn->scn_restart_txg); 169 } 170 } 171 172 spa_scan_stat_init(spa); 173 return (0); 174} 175 176void 177dsl_scan_fini(dsl_pool_t *dp) 178{ 179 if (dp->dp_scan) { 180 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 181 dp->dp_scan = NULL; 182 } 183} 184 185/* ARGSUSED */ 186static int 187dsl_scan_setup_check(void *arg1, void *arg2, dmu_tx_t *tx) 188{ 189 dsl_scan_t *scn = arg1; 190 191 if (scn->scn_phys.scn_state == DSS_SCANNING) 192 return (EBUSY); 193 194 return (0); 195} 196 197/* ARGSUSED */ 198static void 199dsl_scan_setup_sync(void *arg1, void *arg2, dmu_tx_t *tx) 200{ 201 dsl_scan_t *scn = arg1; 202 pool_scan_func_t *funcp = arg2; 203 dmu_object_type_t ot = 0; 204 dsl_pool_t *dp = scn->scn_dp; 205 spa_t *spa = dp->dp_spa; 206 207 ASSERT(scn->scn_phys.scn_state != DSS_SCANNING); 208 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 209 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 210 scn->scn_phys.scn_func = *funcp; 211 scn->scn_phys.scn_state = DSS_SCANNING; 212 scn->scn_phys.scn_min_txg = 0; 213 scn->scn_phys.scn_max_txg = tx->tx_txg; 214 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 215 scn->scn_phys.scn_start_time = gethrestime_sec(); 216 scn->scn_phys.scn_errors = 0; 217 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 218 scn->scn_restart_txg = 0; 219 spa_scan_stat_init(spa); 220 221 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 222 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 223 224 /* rewrite all disk labels */ 225 vdev_config_dirty(spa->spa_root_vdev); 226 227 if (vdev_resilver_needed(spa->spa_root_vdev, 228 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 229 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 230 } else { 231 spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START); 232 } 233 234 spa->spa_scrub_started = B_TRUE; 235 /* 236 * If this is an incremental scrub, limit the DDT scrub phase 237 * to just the auto-ditto class (for correctness); the rest 238 * of the scrub should go faster using top-down pruning. 239 */ 240 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 241 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 242 243 } 244 245 /* back to the generic stuff */ 246 247 if (dp->dp_blkstats == NULL) { 248 dp->dp_blkstats = 249 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 250 } 251 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 252 253 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 254 ot = DMU_OT_ZAP_OTHER; 255 256 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 257 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 258 259 dsl_scan_sync_state(scn, tx); 260 261 spa_history_log_internal(LOG_POOL_SCAN, spa, tx, 262 "func=%u mintxg=%llu maxtxg=%llu", 263 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 264} 265 266/* ARGSUSED */ 267static void 268dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 269{ 270 static const char *old_names[] = { 271 "scrub_bookmark", 272 "scrub_ddt_bookmark", 273 "scrub_ddt_class_max", 274 "scrub_queue", 275 "scrub_min_txg", 276 "scrub_max_txg", 277 "scrub_func", 278 "scrub_errors", 279 NULL 280 }; 281 282 dsl_pool_t *dp = scn->scn_dp; 283 spa_t *spa = dp->dp_spa; 284 int i; 285 286 /* Remove any remnants of an old-style scrub. */ 287 for (i = 0; old_names[i]; i++) { 288 (void) zap_remove(dp->dp_meta_objset, 289 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 290 } 291 292 if (scn->scn_phys.scn_queue_obj != 0) { 293 VERIFY(0 == dmu_object_free(dp->dp_meta_objset, 294 scn->scn_phys.scn_queue_obj, tx)); 295 scn->scn_phys.scn_queue_obj = 0; 296 } 297 298 /* 299 * If we were "restarted" from a stopped state, don't bother 300 * with anything else. 301 */ 302 if (scn->scn_phys.scn_state != DSS_SCANNING) 303 return; 304 305 if (complete) 306 scn->scn_phys.scn_state = DSS_FINISHED; 307 else 308 scn->scn_phys.scn_state = DSS_CANCELED; 309 310 spa_history_log_internal(LOG_POOL_SCAN_DONE, spa, tx, 311 "complete=%u", complete); 312 313 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 314 mutex_enter(&spa->spa_scrub_lock); 315 while (spa->spa_scrub_inflight > 0) { 316 cv_wait(&spa->spa_scrub_io_cv, 317 &spa->spa_scrub_lock); 318 } 319 mutex_exit(&spa->spa_scrub_lock); 320 spa->spa_scrub_started = B_FALSE; 321 spa->spa_scrub_active = B_FALSE; 322 323 /* 324 * If the scrub/resilver completed, update all DTLs to 325 * reflect this. Whether it succeeded or not, vacate 326 * all temporary scrub DTLs. 327 */ 328 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 329 complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE); 330 if (complete) { 331 spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ? 332 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 333 } 334 spa_errlog_rotate(spa); 335 336 /* 337 * We may have finished replacing a device. 338 * Let the async thread assess this and handle the detach. 339 */ 340 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 341 } 342 343 scn->scn_phys.scn_end_time = gethrestime_sec(); 344} 345 346/* ARGSUSED */ 347static int 348dsl_scan_cancel_check(void *arg1, void *arg2, dmu_tx_t *tx) 349{ 350 dsl_scan_t *scn = arg1; 351 352 if (scn->scn_phys.scn_state != DSS_SCANNING) 353 return (ENOENT); 354 return (0); 355} 356 357/* ARGSUSED */ 358static void 359dsl_scan_cancel_sync(void *arg1, void *arg2, dmu_tx_t *tx) 360{ 361 dsl_scan_t *scn = arg1; 362 363 dsl_scan_done(scn, B_FALSE, tx); 364 dsl_scan_sync_state(scn, tx); 365} 366 367int 368dsl_scan_cancel(dsl_pool_t *dp) 369{ 370 boolean_t complete = B_FALSE; 371 int err; 372 373 err = dsl_sync_task_do(dp, dsl_scan_cancel_check, 374 dsl_scan_cancel_sync, dp->dp_scan, &complete, 3); 375 return (err); 376} 377 378static void dsl_scan_visitbp(blkptr_t *bp, 379 const zbookmark_t *zb, dnode_phys_t *dnp, arc_buf_t *pbuf, 380 dsl_dataset_t *ds, dsl_scan_t *scn, dmu_objset_type_t ostype, 381 dmu_tx_t *tx); 382static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds, 383 dmu_objset_type_t ostype, 384 dnode_phys_t *dnp, arc_buf_t *buf, uint64_t object, dmu_tx_t *tx); 385 386void 387dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 388{ 389 zio_free(dp->dp_spa, txg, bp); 390} 391 392void 393dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 394{ 395 ASSERT(dsl_pool_sync_context(dp)); 396 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, BP_GET_PSIZE(bpp), 397 pio->io_flags)); 398} 399 400int 401dsl_read(zio_t *pio, spa_t *spa, const blkptr_t *bpp, arc_buf_t *pbuf, 402 arc_done_func_t *done, void *private, int priority, int zio_flags, 403 uint32_t *arc_flags, const zbookmark_t *zb) 404{ 405 return (arc_read(pio, spa, bpp, pbuf, done, private, 406 priority, zio_flags, arc_flags, zb)); 407} 408 409int 410dsl_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bpp, 411 arc_done_func_t *done, void *private, int priority, int zio_flags, 412 uint32_t *arc_flags, const zbookmark_t *zb) 413{ 414 return (arc_read_nolock(pio, spa, bpp, done, private, 415 priority, zio_flags, arc_flags, zb)); 416} 417 418static uint64_t 419dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 420{ 421 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 422 if (dsl_dataset_is_snapshot(ds)) 423 return (MIN(smt, ds->ds_phys->ds_creation_txg)); 424 return (smt); 425} 426 427static void 428dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) 429{ 430 VERIFY(0 == zap_update(scn->scn_dp->dp_meta_objset, 431 DMU_POOL_DIRECTORY_OBJECT, 432 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 433 &scn->scn_phys, tx)); 434} 435 436static boolean_t 437dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_t *zb) 438{ 439 uint64_t elapsed_nanosecs; 440 unsigned int mintime; 441 442 /* we never skip user/group accounting objects */ 443 if (zb && (int64_t)zb->zb_object < 0) 444 return (B_FALSE); 445 446 if (scn->scn_pausing) 447 return (B_TRUE); /* we're already pausing */ 448 449 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 450 return (B_FALSE); /* we're resuming */ 451 452 /* We only know how to resume from level-0 blocks. */ 453 if (zb && zb->zb_level != 0) 454 return (B_FALSE); 455 456 mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 457 zfs_resilver_min_time_ms : zfs_scan_min_time_ms; 458 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 459 if (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 460 (elapsed_nanosecs / MICROSEC > mintime && 461 txg_sync_waiting(scn->scn_dp)) || 462 spa_shutting_down(scn->scn_dp->dp_spa)) { 463 if (zb) { 464 dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n", 465 (longlong_t)zb->zb_objset, 466 (longlong_t)zb->zb_object, 467 (longlong_t)zb->zb_level, 468 (longlong_t)zb->zb_blkid); 469 scn->scn_phys.scn_bookmark = *zb; 470 } 471 dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n", 472 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 473 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 474 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 475 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 476 scn->scn_pausing = B_TRUE; 477 return (B_TRUE); 478 } 479 return (B_FALSE); 480} 481 482typedef struct zil_scan_arg { 483 dsl_pool_t *zsa_dp; 484 zil_header_t *zsa_zh; 485} zil_scan_arg_t; 486 487/* ARGSUSED */ 488static int 489dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 490{ 491 zil_scan_arg_t *zsa = arg; 492 dsl_pool_t *dp = zsa->zsa_dp; 493 dsl_scan_t *scn = dp->dp_scan; 494 zil_header_t *zh = zsa->zsa_zh; 495 zbookmark_t zb; 496 497 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 498 return (0); 499 500 /* 501 * One block ("stubby") can be allocated a long time ago; we 502 * want to visit that one because it has been allocated 503 * (on-disk) even if it hasn't been claimed (even though for 504 * scrub there's nothing to do to it). 505 */ 506 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa)) 507 return (0); 508 509 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 510 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 511 512 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 513 return (0); 514} 515 516/* ARGSUSED */ 517static int 518dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 519{ 520 if (lrc->lrc_txtype == TX_WRITE) { 521 zil_scan_arg_t *zsa = arg; 522 dsl_pool_t *dp = zsa->zsa_dp; 523 dsl_scan_t *scn = dp->dp_scan; 524 zil_header_t *zh = zsa->zsa_zh; 525 lr_write_t *lr = (lr_write_t *)lrc; 526 blkptr_t *bp = &lr->lr_blkptr; 527 zbookmark_t zb; 528 529 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 530 return (0); 531 532 /* 533 * birth can be < claim_txg if this record's txg is 534 * already txg sync'ed (but this log block contains 535 * other records that are not synced) 536 */ 537 if (claim_txg == 0 || bp->blk_birth < claim_txg) 538 return (0); 539 540 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 541 lr->lr_foid, ZB_ZIL_LEVEL, 542 lr->lr_offset / BP_GET_LSIZE(bp)); 543 544 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 545 } 546 return (0); 547} 548 549static void 550dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 551{ 552 uint64_t claim_txg = zh->zh_claim_txg; 553 zil_scan_arg_t zsa = { dp, zh }; 554 zilog_t *zilog; 555 556 /* 557 * We only want to visit blocks that have been claimed but not yet 558 * replayed (or, in read-only mode, blocks that *would* be claimed). 559 */ 560 if (claim_txg == 0 && spa_writeable(dp->dp_spa)) 561 return; 562 563 zilog = zil_alloc(dp->dp_meta_objset, zh); 564 565 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 566 claim_txg); 567 568 zil_free(zilog); 569} 570 571/* ARGSUSED */ 572static void 573dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp, 574 uint64_t objset, uint64_t object, uint64_t blkid) 575{ 576 zbookmark_t czb; 577 uint32_t flags = ARC_NOWAIT | ARC_PREFETCH; 578 579 if (zfs_no_scrub_prefetch) 580 return; 581 582 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg || 583 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)) 584 return; 585 586 SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid); 587 588 /* 589 * XXX need to make sure all of these arc_read() prefetches are 590 * done before setting xlateall (similar to dsl_read()) 591 */ 592 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp, 593 buf, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 594 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb); 595} 596 597static boolean_t 598dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 599 const zbookmark_t *zb) 600{ 601 /* 602 * We never skip over user/group accounting objects (obj<0) 603 */ 604 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 605 (int64_t)zb->zb_object >= 0) { 606 /* 607 * If we already visited this bp & everything below (in 608 * a prior txg sync), don't bother doing it again. 609 */ 610 if (zbookmark_is_before(dnp, zb, &scn->scn_phys.scn_bookmark)) 611 return (B_TRUE); 612 613 /* 614 * If we found the block we're trying to resume from, or 615 * we went past it to a different object, zero it out to 616 * indicate that it's OK to start checking for pausing 617 * again. 618 */ 619 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 620 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 621 dprintf("resuming at %llx/%llx/%llx/%llx\n", 622 (longlong_t)zb->zb_objset, 623 (longlong_t)zb->zb_object, 624 (longlong_t)zb->zb_level, 625 (longlong_t)zb->zb_blkid); 626 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 627 } 628 } 629 return (B_FALSE); 630} 631 632/* 633 * Return nonzero on i/o error. 634 * Return new buf to write out in *bufp. 635 */ 636static int 637dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 638 dnode_phys_t *dnp, const blkptr_t *bp, 639 const zbookmark_t *zb, dmu_tx_t *tx, arc_buf_t **bufp) 640{ 641 dsl_pool_t *dp = scn->scn_dp; 642 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 643 int err; 644 645 if (BP_GET_LEVEL(bp) > 0) { 646 uint32_t flags = ARC_WAIT; 647 int i; 648 blkptr_t *cbp; 649 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 650 651 err = arc_read_nolock(NULL, dp->dp_spa, bp, 652 arc_getbuf_func, bufp, 653 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 654 if (err) { 655 scn->scn_phys.scn_errors++; 656 return (err); 657 } 658 for (i = 0, cbp = (*bufp)->b_data; i < epb; i++, cbp++) { 659 dsl_scan_prefetch(scn, *bufp, cbp, zb->zb_objset, 660 zb->zb_object, zb->zb_blkid * epb + i); 661 } 662 for (i = 0, cbp = (*bufp)->b_data; i < epb; i++, cbp++) { 663 zbookmark_t czb; 664 665 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 666 zb->zb_level - 1, 667 zb->zb_blkid * epb + i); 668 dsl_scan_visitbp(cbp, &czb, dnp, 669 *bufp, ds, scn, ostype, tx); 670 } 671 } else if (BP_GET_TYPE(bp) == DMU_OT_USERGROUP_USED) { 672 uint32_t flags = ARC_WAIT; 673 674 err = arc_read_nolock(NULL, dp->dp_spa, bp, 675 arc_getbuf_func, bufp, 676 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 677 if (err) { 678 scn->scn_phys.scn_errors++; 679 return (err); 680 } 681 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 682 uint32_t flags = ARC_WAIT; 683 dnode_phys_t *cdnp; 684 int i, j; 685 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 686 687 err = arc_read_nolock(NULL, dp->dp_spa, bp, 688 arc_getbuf_func, bufp, 689 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 690 if (err) { 691 scn->scn_phys.scn_errors++; 692 return (err); 693 } 694 for (i = 0, cdnp = (*bufp)->b_data; i < epb; i++, cdnp++) { 695 for (j = 0; j < cdnp->dn_nblkptr; j++) { 696 blkptr_t *cbp = &cdnp->dn_blkptr[j]; 697 dsl_scan_prefetch(scn, *bufp, cbp, 698 zb->zb_objset, zb->zb_blkid * epb + i, j); 699 } 700 } 701 for (i = 0, cdnp = (*bufp)->b_data; i < epb; i++, cdnp++) { 702 dsl_scan_visitdnode(scn, ds, ostype, 703 cdnp, *bufp, zb->zb_blkid * epb + i, tx); 704 } 705 706 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 707 uint32_t flags = ARC_WAIT; 708 objset_phys_t *osp; 709 710 err = arc_read_nolock(NULL, dp->dp_spa, bp, 711 arc_getbuf_func, bufp, 712 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 713 if (err) { 714 scn->scn_phys.scn_errors++; 715 return (err); 716 } 717 718 osp = (*bufp)->b_data; 719 720 dsl_scan_visitdnode(scn, ds, osp->os_type, 721 &osp->os_meta_dnode, *bufp, DMU_META_DNODE_OBJECT, tx); 722 723 if (OBJSET_BUF_HAS_USERUSED(*bufp)) { 724 /* 725 * We also always visit user/group accounting 726 * objects, and never skip them, even if we are 727 * pausing. This is necessary so that the space 728 * deltas from this txg get integrated. 729 */ 730 dsl_scan_visitdnode(scn, ds, osp->os_type, 731 &osp->os_groupused_dnode, *bufp, 732 DMU_GROUPUSED_OBJECT, tx); 733 dsl_scan_visitdnode(scn, ds, osp->os_type, 734 &osp->os_userused_dnode, *bufp, 735 DMU_USERUSED_OBJECT, tx); 736 } 737 } 738 739 return (0); 740} 741 742static void 743dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 744 dmu_objset_type_t ostype, dnode_phys_t *dnp, arc_buf_t *buf, 745 uint64_t object, dmu_tx_t *tx) 746{ 747 int j; 748 749 for (j = 0; j < dnp->dn_nblkptr; j++) { 750 zbookmark_t czb; 751 752 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 753 dnp->dn_nlevels - 1, j); 754 dsl_scan_visitbp(&dnp->dn_blkptr[j], 755 &czb, dnp, buf, ds, scn, ostype, tx); 756 } 757 758 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 759 zbookmark_t czb; 760 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 761 0, DMU_SPILL_BLKID); 762 dsl_scan_visitbp(&dnp->dn_spill, 763 &czb, dnp, buf, ds, scn, ostype, tx); 764 } 765} 766 767/* 768 * The arguments are in this order because mdb can only print the 769 * first 5; we want them to be useful. 770 */ 771static void 772dsl_scan_visitbp(blkptr_t *bp, const zbookmark_t *zb, 773 dnode_phys_t *dnp, arc_buf_t *pbuf, 774 dsl_dataset_t *ds, dsl_scan_t *scn, dmu_objset_type_t ostype, 775 dmu_tx_t *tx) 776{ 777 dsl_pool_t *dp = scn->scn_dp; 778 arc_buf_t *buf = NULL; 779 blkptr_t bp_toread = *bp; 780 781 /* ASSERT(pbuf == NULL || arc_released(pbuf)); */ 782 783 if (dsl_scan_check_pause(scn, zb)) 784 return; 785 786 if (dsl_scan_check_resume(scn, dnp, zb)) 787 return; 788 789 if (bp->blk_birth == 0) 790 return; 791 792 scn->scn_visited_this_txg++; 793 794 dprintf_bp(bp, 795 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx buf=%p bp=%p", 796 ds, ds ? ds->ds_object : 0, 797 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 798 pbuf, bp); 799 800 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 801 return; 802 803 if (dsl_scan_recurse(scn, ds, ostype, dnp, &bp_toread, zb, tx, 804 &buf) != 0) 805 return; 806 807 /* 808 * If dsl_scan_ddt() has aready visited this block, it will have 809 * already done any translations or scrubbing, so don't call the 810 * callback again. 811 */ 812 if (ddt_class_contains(dp->dp_spa, 813 scn->scn_phys.scn_ddt_class_max, bp)) { 814 ASSERT(buf == NULL); 815 return; 816 } 817 818 /* 819 * If this block is from the future (after cur_max_txg), then we 820 * are doing this on behalf of a deleted snapshot, and we will 821 * revisit the future block on the next pass of this dataset. 822 * Don't scan it now unless we need to because something 823 * under it was modified. 824 */ 825 if (bp->blk_birth <= scn->scn_phys.scn_cur_max_txg) { 826 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 827 } 828 if (buf) 829 (void) arc_buf_remove_ref(buf, &buf); 830} 831 832static void 833dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 834 dmu_tx_t *tx) 835{ 836 zbookmark_t zb; 837 838 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 839 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 840 dsl_scan_visitbp(bp, &zb, NULL, NULL, 841 ds, scn, DMU_OST_NONE, tx); 842 843 dprintf_ds(ds, "finished scan%s", ""); 844} 845 846void 847dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 848{ 849 dsl_pool_t *dp = ds->ds_dir->dd_pool; 850 dsl_scan_t *scn = dp->dp_scan; 851 uint64_t mintxg; 852 853 if (scn->scn_phys.scn_state != DSS_SCANNING) 854 return; 855 856 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 857 if (dsl_dataset_is_snapshot(ds)) { 858 /* Note, scn_cur_{min,max}_txg stays the same. */ 859 scn->scn_phys.scn_bookmark.zb_objset = 860 ds->ds_phys->ds_next_snap_obj; 861 zfs_dbgmsg("destroying ds %llu; currently traversing; " 862 "reset zb_objset to %llu", 863 (u_longlong_t)ds->ds_object, 864 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 865 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN; 866 } else { 867 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 868 ZB_DESTROYED_OBJSET, 0, 0, 0); 869 zfs_dbgmsg("destroying ds %llu; currently traversing; " 870 "reset bookmark to -1,0,0,0", 871 (u_longlong_t)ds->ds_object); 872 } 873 } else if (zap_lookup_int_key(dp->dp_meta_objset, 874 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 875 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1); 876 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 877 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 878 if (dsl_dataset_is_snapshot(ds)) { 879 /* 880 * We keep the same mintxg; it could be > 881 * ds_creation_txg if the previous snapshot was 882 * deleted too. 883 */ 884 VERIFY(zap_add_int_key(dp->dp_meta_objset, 885 scn->scn_phys.scn_queue_obj, 886 ds->ds_phys->ds_next_snap_obj, mintxg, tx) == 0); 887 zfs_dbgmsg("destroying ds %llu; in queue; " 888 "replacing with %llu", 889 (u_longlong_t)ds->ds_object, 890 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 891 } else { 892 zfs_dbgmsg("destroying ds %llu; in queue; removing", 893 (u_longlong_t)ds->ds_object); 894 } 895 } else { 896 zfs_dbgmsg("destroying ds %llu; ignoring", 897 (u_longlong_t)ds->ds_object); 898 } 899 900 /* 901 * dsl_scan_sync() should be called after this, and should sync 902 * out our changed state, but just to be safe, do it here. 903 */ 904 dsl_scan_sync_state(scn, tx); 905} 906 907void 908dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 909{ 910 dsl_pool_t *dp = ds->ds_dir->dd_pool; 911 dsl_scan_t *scn = dp->dp_scan; 912 uint64_t mintxg; 913 914 if (scn->scn_phys.scn_state != DSS_SCANNING) 915 return; 916 917 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0); 918 919 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 920 scn->scn_phys.scn_bookmark.zb_objset = 921 ds->ds_phys->ds_prev_snap_obj; 922 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 923 "reset zb_objset to %llu", 924 (u_longlong_t)ds->ds_object, 925 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 926 } else if (zap_lookup_int_key(dp->dp_meta_objset, 927 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 928 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 929 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 930 VERIFY(zap_add_int_key(dp->dp_meta_objset, 931 scn->scn_phys.scn_queue_obj, 932 ds->ds_phys->ds_prev_snap_obj, mintxg, tx) == 0); 933 zfs_dbgmsg("snapshotting ds %llu; in queue; " 934 "replacing with %llu", 935 (u_longlong_t)ds->ds_object, 936 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 937 } 938 dsl_scan_sync_state(scn, tx); 939} 940 941void 942dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 943{ 944 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 945 dsl_scan_t *scn = dp->dp_scan; 946 uint64_t mintxg; 947 948 if (scn->scn_phys.scn_state != DSS_SCANNING) 949 return; 950 951 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) { 952 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object; 953 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 954 "reset zb_objset to %llu", 955 (u_longlong_t)ds1->ds_object, 956 (u_longlong_t)ds2->ds_object); 957 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) { 958 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object; 959 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 960 "reset zb_objset to %llu", 961 (u_longlong_t)ds2->ds_object, 962 (u_longlong_t)ds1->ds_object); 963 } 964 965 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 966 ds1->ds_object, &mintxg) == 0) { 967 int err; 968 969 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 970 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 971 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 972 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 973 err = zap_add_int_key(dp->dp_meta_objset, 974 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); 975 VERIFY(err == 0 || err == EEXIST); 976 if (err == EEXIST) { 977 /* Both were there to begin with */ 978 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 979 scn->scn_phys.scn_queue_obj, 980 ds1->ds_object, mintxg, tx)); 981 } 982 zfs_dbgmsg("clone_swap ds %llu; in queue; " 983 "replacing with %llu", 984 (u_longlong_t)ds1->ds_object, 985 (u_longlong_t)ds2->ds_object); 986 } else if (zap_lookup_int_key(dp->dp_meta_objset, 987 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) { 988 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 989 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 990 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 991 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 992 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 993 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); 994 zfs_dbgmsg("clone_swap ds %llu; in queue; " 995 "replacing with %llu", 996 (u_longlong_t)ds2->ds_object, 997 (u_longlong_t)ds1->ds_object); 998 } 999 1000 dsl_scan_sync_state(scn, tx); 1001} 1002 1003struct enqueue_clones_arg { 1004 dmu_tx_t *tx; 1005 uint64_t originobj; 1006}; 1007 1008/* ARGSUSED */ 1009static int 1010enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) 1011{ 1012 struct enqueue_clones_arg *eca = arg; 1013 dsl_dataset_t *ds; 1014 int err; 1015 dsl_pool_t *dp = spa->spa_dsl_pool; 1016 dsl_scan_t *scn = dp->dp_scan; 1017 1018 err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); 1019 if (err) 1020 return (err); 1021 1022 if (ds->ds_dir->dd_phys->dd_origin_obj == eca->originobj) { 1023 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) { 1024 dsl_dataset_t *prev; 1025 err = dsl_dataset_hold_obj(dp, 1026 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev); 1027 1028 dsl_dataset_rele(ds, FTAG); 1029 if (err) 1030 return (err); 1031 ds = prev; 1032 } 1033 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1034 scn->scn_phys.scn_queue_obj, ds->ds_object, 1035 ds->ds_phys->ds_prev_snap_txg, eca->tx) == 0); 1036 } 1037 dsl_dataset_rele(ds, FTAG); 1038 return (0); 1039} 1040 1041static void 1042dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 1043{ 1044 dsl_pool_t *dp = scn->scn_dp; 1045 dsl_dataset_t *ds; 1046 objset_t *os; 1047 1048 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1049 1050 if (dmu_objset_from_ds(ds, &os)) 1051 goto out; 1052 1053 /* 1054 * Only the ZIL in the head (non-snapshot) is valid. Even though 1055 * snapshots can have ZIL block pointers (which may be the same 1056 * BP as in the head), they must be ignored. So we traverse the 1057 * ZIL here, rather than in scan_recurse(), because the regular 1058 * snapshot block-sharing rules don't apply to it. 1059 */ 1060 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds)) 1061 dsl_scan_zil(dp, &os->os_zil_header); 1062 1063 /* 1064 * Iterate over the bps in this ds. 1065 */ 1066 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1067 dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx); 1068 1069 char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); 1070 dsl_dataset_name(ds, dsname); 1071 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 1072 "pausing=%u", 1073 (longlong_t)dsobj, dsname, 1074 (longlong_t)scn->scn_phys.scn_cur_min_txg, 1075 (longlong_t)scn->scn_phys.scn_cur_max_txg, 1076 (int)scn->scn_pausing); 1077 kmem_free(dsname, ZFS_MAXNAMELEN); 1078 1079 if (scn->scn_pausing) 1080 goto out; 1081 1082 /* 1083 * We've finished this pass over this dataset. 1084 */ 1085 1086 /* 1087 * If we did not completely visit this dataset, do another pass. 1088 */ 1089 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 1090 zfs_dbgmsg("incomplete pass; visiting again"); 1091 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 1092 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1093 scn->scn_phys.scn_queue_obj, ds->ds_object, 1094 scn->scn_phys.scn_cur_max_txg, tx) == 0); 1095 goto out; 1096 } 1097 1098 /* 1099 * Add descendent datasets to work queue. 1100 */ 1101 if (ds->ds_phys->ds_next_snap_obj != 0) { 1102 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1103 scn->scn_phys.scn_queue_obj, ds->ds_phys->ds_next_snap_obj, 1104 ds->ds_phys->ds_creation_txg, tx) == 0); 1105 } 1106 if (ds->ds_phys->ds_num_children > 1) { 1107 boolean_t usenext = B_FALSE; 1108 if (ds->ds_phys->ds_next_clones_obj != 0) { 1109 uint64_t count; 1110 /* 1111 * A bug in a previous version of the code could 1112 * cause upgrade_clones_cb() to not set 1113 * ds_next_snap_obj when it should, leading to a 1114 * missing entry. Therefore we can only use the 1115 * next_clones_obj when its count is correct. 1116 */ 1117 int err = zap_count(dp->dp_meta_objset, 1118 ds->ds_phys->ds_next_clones_obj, &count); 1119 if (err == 0 && 1120 count == ds->ds_phys->ds_num_children - 1) 1121 usenext = B_TRUE; 1122 } 1123 1124 if (usenext) { 1125 VERIFY(zap_join_key(dp->dp_meta_objset, 1126 ds->ds_phys->ds_next_clones_obj, 1127 scn->scn_phys.scn_queue_obj, 1128 ds->ds_phys->ds_creation_txg, tx) == 0); 1129 } else { 1130 struct enqueue_clones_arg eca; 1131 eca.tx = tx; 1132 eca.originobj = ds->ds_object; 1133 1134 (void) dmu_objset_find_spa(ds->ds_dir->dd_pool->dp_spa, 1135 NULL, enqueue_clones_cb, &eca, DS_FIND_CHILDREN); 1136 } 1137 } 1138 1139out: 1140 dsl_dataset_rele(ds, FTAG); 1141} 1142 1143/* ARGSUSED */ 1144static int 1145enqueue_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) 1146{ 1147 dmu_tx_t *tx = arg; 1148 dsl_dataset_t *ds; 1149 int err; 1150 dsl_pool_t *dp = spa->spa_dsl_pool; 1151 dsl_scan_t *scn = dp->dp_scan; 1152 1153 err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); 1154 if (err) 1155 return (err); 1156 1157 while (ds->ds_phys->ds_prev_snap_obj != 0) { 1158 dsl_dataset_t *prev; 1159 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 1160 FTAG, &prev); 1161 if (err) { 1162 dsl_dataset_rele(ds, FTAG); 1163 return (err); 1164 } 1165 1166 /* 1167 * If this is a clone, we don't need to worry about it for now. 1168 */ 1169 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) { 1170 dsl_dataset_rele(ds, FTAG); 1171 dsl_dataset_rele(prev, FTAG); 1172 return (0); 1173 } 1174 dsl_dataset_rele(ds, FTAG); 1175 ds = prev; 1176 } 1177 1178 VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1179 ds->ds_object, ds->ds_phys->ds_prev_snap_txg, tx) == 0); 1180 dsl_dataset_rele(ds, FTAG); 1181 return (0); 1182} 1183 1184/* 1185 * Scrub/dedup interaction. 1186 * 1187 * If there are N references to a deduped block, we don't want to scrub it 1188 * N times -- ideally, we should scrub it exactly once. 1189 * 1190 * We leverage the fact that the dde's replication class (enum ddt_class) 1191 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 1192 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 1193 * 1194 * To prevent excess scrubbing, the scrub begins by walking the DDT 1195 * to find all blocks with refcnt > 1, and scrubs each of these once. 1196 * Since there are two replication classes which contain blocks with 1197 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 1198 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 1199 * 1200 * There would be nothing more to say if a block's refcnt couldn't change 1201 * during a scrub, but of course it can so we must account for changes 1202 * in a block's replication class. 1203 * 1204 * Here's an example of what can occur: 1205 * 1206 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 1207 * when visited during the top-down scrub phase, it will be scrubbed twice. 1208 * This negates our scrub optimization, but is otherwise harmless. 1209 * 1210 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 1211 * on each visit during the top-down scrub phase, it will never be scrubbed. 1212 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 1213 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 1214 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 1215 * while a scrub is in progress, it scrubs the block right then. 1216 */ 1217static void 1218dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 1219{ 1220 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 1221 ddt_entry_t dde = { 0 }; 1222 int error; 1223 uint64_t n = 0; 1224 1225 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 1226 ddt_t *ddt; 1227 1228 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 1229 break; 1230 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 1231 (longlong_t)ddb->ddb_class, 1232 (longlong_t)ddb->ddb_type, 1233 (longlong_t)ddb->ddb_checksum, 1234 (longlong_t)ddb->ddb_cursor); 1235 1236 /* There should be no pending changes to the dedup table */ 1237 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 1238 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 1239 1240 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 1241 n++; 1242 1243 if (dsl_scan_check_pause(scn, NULL)) 1244 break; 1245 } 1246 1247 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u", 1248 (longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max, 1249 (int)scn->scn_pausing); 1250 1251 ASSERT(error == 0 || error == ENOENT); 1252 ASSERT(error != ENOENT || 1253 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 1254} 1255 1256/* ARGSUSED */ 1257void 1258dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 1259 ddt_entry_t *dde, dmu_tx_t *tx) 1260{ 1261 const ddt_key_t *ddk = &dde->dde_key; 1262 ddt_phys_t *ddp = dde->dde_phys; 1263 blkptr_t bp; 1264 zbookmark_t zb = { 0 }; 1265 1266 if (scn->scn_phys.scn_state != DSS_SCANNING) 1267 return; 1268 1269 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1270 if (ddp->ddp_phys_birth == 0 || 1271 ddp->ddp_phys_birth > scn->scn_phys.scn_cur_max_txg) 1272 continue; 1273 ddt_bp_create(checksum, ddk, ddp, &bp); 1274 1275 scn->scn_visited_this_txg++; 1276 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 1277 } 1278} 1279 1280static void 1281dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 1282{ 1283 dsl_pool_t *dp = scn->scn_dp; 1284 zap_cursor_t zc; 1285 zap_attribute_t za; 1286 1287 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1288 scn->scn_phys.scn_ddt_class_max) { 1289 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1290 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1291 dsl_scan_ddt(scn, tx); 1292 if (scn->scn_pausing) 1293 return; 1294 } 1295 1296 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 1297 /* First do the MOS & ORIGIN */ 1298 1299 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1300 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1301 dsl_scan_visit_rootbp(scn, NULL, 1302 &dp->dp_meta_rootbp, tx); 1303 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 1304 if (scn->scn_pausing) 1305 return; 1306 1307 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 1308 VERIFY(0 == dmu_objset_find_spa(dp->dp_spa, 1309 NULL, enqueue_cb, tx, DS_FIND_CHILDREN)); 1310 } else { 1311 dsl_scan_visitds(scn, 1312 dp->dp_origin_snap->ds_object, tx); 1313 } 1314 ASSERT(!scn->scn_pausing); 1315 } else if (scn->scn_phys.scn_bookmark.zb_objset != 1316 ZB_DESTROYED_OBJSET) { 1317 /* 1318 * If we were paused, continue from here. Note if the 1319 * ds we were paused on was deleted, the zb_objset may 1320 * be -1, so we will skip this and find a new objset 1321 * below. 1322 */ 1323 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx); 1324 if (scn->scn_pausing) 1325 return; 1326 } 1327 1328 /* 1329 * In case we were paused right at the end of the ds, zero the 1330 * bookmark so we don't think that we're still trying to resume. 1331 */ 1332 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_t)); 1333 1334 /* keep pulling things out of the zap-object-as-queue */ 1335 while (zap_cursor_init(&zc, dp->dp_meta_objset, 1336 scn->scn_phys.scn_queue_obj), 1337 zap_cursor_retrieve(&zc, &za) == 0) { 1338 dsl_dataset_t *ds; 1339 uint64_t dsobj; 1340 1341 dsobj = strtonum(za.za_name, NULL); 1342 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1343 scn->scn_phys.scn_queue_obj, dsobj, tx)); 1344 1345 /* Set up min/max txg */ 1346 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1347 if (za.za_first_integer != 0) { 1348 scn->scn_phys.scn_cur_min_txg = 1349 MAX(scn->scn_phys.scn_min_txg, 1350 za.za_first_integer); 1351 } else { 1352 scn->scn_phys.scn_cur_min_txg = 1353 MAX(scn->scn_phys.scn_min_txg, 1354 ds->ds_phys->ds_prev_snap_txg); 1355 } 1356 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 1357 dsl_dataset_rele(ds, FTAG); 1358 1359 dsl_scan_visitds(scn, dsobj, tx); 1360 zap_cursor_fini(&zc); 1361 if (scn->scn_pausing) 1362 return; 1363 } 1364 zap_cursor_fini(&zc); 1365} 1366 1367static boolean_t 1368dsl_scan_free_should_pause(dsl_scan_t *scn) 1369{ 1370 uint64_t elapsed_nanosecs; 1371 1372 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 1373 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 1374 (elapsed_nanosecs / MICROSEC > zfs_free_min_time_ms && 1375 txg_sync_waiting(scn->scn_dp)) || 1376 spa_shutting_down(scn->scn_dp->dp_spa)); 1377} 1378 1379static int 1380dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1381{ 1382 dsl_scan_t *scn = arg; 1383 1384 if (!scn->scn_is_bptree || 1385 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 1386 if (dsl_scan_free_should_pause(scn)) 1387 return (ERESTART); 1388 } 1389 1390 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 1391 dmu_tx_get_txg(tx), bp, BP_GET_PSIZE(bp), 0)); 1392 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 1393 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 1394 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 1395 scn->scn_visited_this_txg++; 1396 return (0); 1397} 1398 1399boolean_t 1400dsl_scan_active(dsl_scan_t *scn) 1401{ 1402 spa_t *spa = scn->scn_dp->dp_spa; 1403 uint64_t used = 0, comp, uncomp; 1404 1405 if (spa->spa_load_state != SPA_LOAD_NONE) 1406 return (B_FALSE); 1407 if (spa_shutting_down(spa)) 1408 return (B_FALSE); 1409 1410 if (scn->scn_phys.scn_state == DSS_SCANNING) 1411 return (B_TRUE); 1412 1413 if (spa_feature_is_active(spa, 1414 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) { 1415 return (B_TRUE); 1416 } 1417 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1418 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 1419 &used, &comp, &uncomp); 1420 } 1421 return (used != 0); 1422} 1423 1424void 1425dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 1426{ 1427 dsl_scan_t *scn = dp->dp_scan; 1428 spa_t *spa = dp->dp_spa; 1429 int err; 1430 1431 /* 1432 * Check for scn_restart_txg before checking spa_load_state, so 1433 * that we can restart an old-style scan while the pool is being 1434 * imported (see dsl_scan_init). 1435 */ 1436 if (scn->scn_restart_txg != 0 && 1437 scn->scn_restart_txg <= tx->tx_txg) { 1438 pool_scan_func_t func = POOL_SCAN_SCRUB; 1439 dsl_scan_done(scn, B_FALSE, tx); 1440 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 1441 func = POOL_SCAN_RESILVER; 1442 zfs_dbgmsg("restarting scan func=%u txg=%llu", 1443 func, tx->tx_txg); 1444 dsl_scan_setup_sync(scn, &func, tx); 1445 } 1446 1447 if (!dsl_scan_active(scn) || 1448 spa_sync_pass(dp->dp_spa) > 1) 1449 return; 1450 1451 scn->scn_visited_this_txg = 0; 1452 scn->scn_pausing = B_FALSE; 1453 scn->scn_sync_start_time = gethrtime(); 1454 spa->spa_scrub_active = B_TRUE; 1455 1456 /* 1457 * First process the free list. If we pause the free, don't do 1458 * any scanning. This ensures that there is no free list when 1459 * we are scanning, so the scan code doesn't have to worry about 1460 * traversing it. 1461 */ 1462 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1463 scn->scn_is_bptree = B_FALSE; 1464 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1465 NULL, ZIO_FLAG_MUSTSUCCEED); 1466 err = bpobj_iterate(&dp->dp_free_bpobj, 1467 dsl_scan_free_block_cb, scn, tx); 1468 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root)); 1469 1470 if (err == 0 && spa_feature_is_active(spa, 1471 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) { 1472 scn->scn_is_bptree = B_TRUE; 1473 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1474 NULL, ZIO_FLAG_MUSTSUCCEED); 1475 err = bptree_iterate(dp->dp_meta_objset, 1476 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, 1477 scn, tx); 1478 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root)); 1479 if (err != 0) 1480 return; 1481 1482 /* disable async destroy feature */ 1483 spa_feature_decr(spa, 1484 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY], tx); 1485 ASSERT(!spa_feature_is_active(spa, 1486 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])); 1487 VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset, 1488 DMU_POOL_DIRECTORY_OBJECT, 1489 DMU_POOL_BPTREE_OBJ, tx)); 1490 VERIFY3U(0, ==, bptree_free(dp->dp_meta_objset, 1491 dp->dp_bptree_obj, tx)); 1492 dp->dp_bptree_obj = 0; 1493 } 1494 if (scn->scn_visited_this_txg) { 1495 zfs_dbgmsg("freed %llu blocks in %llums from " 1496 "free_bpobj/bptree txg %llu", 1497 (longlong_t)scn->scn_visited_this_txg, 1498 (longlong_t) 1499 (gethrtime() - scn->scn_sync_start_time) / MICROSEC, 1500 (longlong_t)tx->tx_txg); 1501 scn->scn_visited_this_txg = 0; 1502 /* 1503 * Re-sync the ddt so that we can further modify 1504 * it when doing bprewrite. 1505 */ 1506 ddt_sync(spa, tx->tx_txg); 1507 } 1508 if (err == ERESTART) 1509 return; 1510 } 1511 1512 if (scn->scn_phys.scn_state != DSS_SCANNING) 1513 return; 1514 1515 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1516 scn->scn_phys.scn_ddt_class_max) { 1517 zfs_dbgmsg("doing scan sync txg %llu; " 1518 "ddt bm=%llu/%llu/%llu/%llx", 1519 (longlong_t)tx->tx_txg, 1520 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 1521 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 1522 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 1523 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 1524 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0); 1525 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0); 1526 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0); 1527 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0); 1528 } else { 1529 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu", 1530 (longlong_t)tx->tx_txg, 1531 (longlong_t)scn->scn_phys.scn_bookmark.zb_objset, 1532 (longlong_t)scn->scn_phys.scn_bookmark.zb_object, 1533 (longlong_t)scn->scn_phys.scn_bookmark.zb_level, 1534 (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid); 1535 } 1536 1537 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1538 NULL, ZIO_FLAG_CANFAIL); 1539 dsl_scan_visit(scn, tx); 1540 (void) zio_wait(scn->scn_zio_root); 1541 scn->scn_zio_root = NULL; 1542 1543 zfs_dbgmsg("visited %llu blocks in %llums", 1544 (longlong_t)scn->scn_visited_this_txg, 1545 (longlong_t)(gethrtime() - scn->scn_sync_start_time) / MICROSEC); 1546 1547 if (!scn->scn_pausing) { 1548 /* finished with scan. */ 1549 zfs_dbgmsg("finished scan txg %llu", (longlong_t)tx->tx_txg); 1550 dsl_scan_done(scn, B_TRUE, tx); 1551 } 1552 1553 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 1554 mutex_enter(&spa->spa_scrub_lock); 1555 while (spa->spa_scrub_inflight > 0) { 1556 cv_wait(&spa->spa_scrub_io_cv, 1557 &spa->spa_scrub_lock); 1558 } 1559 mutex_exit(&spa->spa_scrub_lock); 1560 } 1561 1562 dsl_scan_sync_state(scn, tx); 1563} 1564 1565/* 1566 * This will start a new scan, or restart an existing one. 1567 */ 1568void 1569dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 1570{ 1571 if (txg == 0) { 1572 dmu_tx_t *tx; 1573 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1574 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1575 1576 txg = dmu_tx_get_txg(tx); 1577 dp->dp_scan->scn_restart_txg = txg; 1578 dmu_tx_commit(tx); 1579 } else { 1580 dp->dp_scan->scn_restart_txg = txg; 1581 } 1582 zfs_dbgmsg("restarting resilver txg=%llu", txg); 1583} 1584 1585boolean_t 1586dsl_scan_resilvering(dsl_pool_t *dp) 1587{ 1588 return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING && 1589 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 1590} 1591 1592/* 1593 * scrub consumers 1594 */ 1595 1596static void 1597count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 1598{ 1599 int i; 1600 1601 /* 1602 * If we resume after a reboot, zab will be NULL; don't record 1603 * incomplete stats in that case. 1604 */ 1605 if (zab == NULL) 1606 return; 1607 1608 for (i = 0; i < 4; i++) { 1609 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 1610 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 1611 if (t & DMU_OT_NEWTYPE) 1612 t = DMU_OT_OTHER; 1613 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 1614 int equal; 1615 1616 zb->zb_count++; 1617 zb->zb_asize += BP_GET_ASIZE(bp); 1618 zb->zb_lsize += BP_GET_LSIZE(bp); 1619 zb->zb_psize += BP_GET_PSIZE(bp); 1620 zb->zb_gangs += BP_COUNT_GANG(bp); 1621 1622 switch (BP_GET_NDVAS(bp)) { 1623 case 2: 1624 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 1625 DVA_GET_VDEV(&bp->blk_dva[1])) 1626 zb->zb_ditto_2_of_2_samevdev++; 1627 break; 1628 case 3: 1629 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 1630 DVA_GET_VDEV(&bp->blk_dva[1])) + 1631 (DVA_GET_VDEV(&bp->blk_dva[0]) == 1632 DVA_GET_VDEV(&bp->blk_dva[2])) + 1633 (DVA_GET_VDEV(&bp->blk_dva[1]) == 1634 DVA_GET_VDEV(&bp->blk_dva[2])); 1635 if (equal == 1) 1636 zb->zb_ditto_2_of_3_samevdev++; 1637 else if (equal == 3) 1638 zb->zb_ditto_3_of_3_samevdev++; 1639 break; 1640 } 1641 } 1642} 1643 1644static void 1645dsl_scan_scrub_done(zio_t *zio) 1646{ 1647 spa_t *spa = zio->io_spa; 1648 1649 zio_data_buf_free(zio->io_data, zio->io_size); 1650 1651 mutex_enter(&spa->spa_scrub_lock); 1652 spa->spa_scrub_inflight--; 1653 cv_broadcast(&spa->spa_scrub_io_cv); 1654 1655 if (zio->io_error && (zio->io_error != ECKSUM || 1656 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 1657 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++; 1658 } 1659 mutex_exit(&spa->spa_scrub_lock); 1660} 1661 1662static int 1663dsl_scan_scrub_cb(dsl_pool_t *dp, 1664 const blkptr_t *bp, const zbookmark_t *zb) 1665{ 1666 dsl_scan_t *scn = dp->dp_scan; 1667 size_t size = BP_GET_PSIZE(bp); 1668 spa_t *spa = dp->dp_spa; 1669 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 1670 boolean_t needs_io; 1671 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 1672 int zio_priority; 1673 unsigned int scan_delay = 0; 1674 1675 if (phys_birth <= scn->scn_phys.scn_min_txg || 1676 phys_birth >= scn->scn_phys.scn_max_txg) 1677 return (0); 1678 1679 count_block(dp->dp_blkstats, bp); 1680 1681 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 1682 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 1683 zio_flags |= ZIO_FLAG_SCRUB; 1684 zio_priority = ZIO_PRIORITY_SCRUB; 1685 needs_io = B_TRUE; 1686 scan_delay = zfs_scrub_delay; 1687 } else if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) { 1688 zio_flags |= ZIO_FLAG_RESILVER; 1689 zio_priority = ZIO_PRIORITY_RESILVER; 1690 needs_io = B_FALSE; 1691 scan_delay = zfs_resilver_delay; 1692 } 1693 1694 /* If it's an intent log block, failure is expected. */ 1695 if (zb->zb_level == ZB_ZIL_LEVEL) 1696 zio_flags |= ZIO_FLAG_SPECULATIVE; 1697 1698 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 1699 vdev_t *vd = vdev_lookup_top(spa, 1700 DVA_GET_VDEV(&bp->blk_dva[d])); 1701 1702 /* 1703 * Keep track of how much data we've examined so that 1704 * zpool(1M) status can make useful progress reports. 1705 */ 1706 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]); 1707 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]); 1708 1709 /* if it's a resilver, this may not be in the target range */ 1710 if (!needs_io) { 1711 if (DVA_GET_GANG(&bp->blk_dva[d])) { 1712 /* 1713 * Gang members may be spread across multiple 1714 * vdevs, so the best estimate we have is the 1715 * scrub range, which has already been checked. 1716 * XXX -- it would be better to change our 1717 * allocation policy to ensure that all 1718 * gang members reside on the same vdev. 1719 */ 1720 needs_io = B_TRUE; 1721 } else { 1722 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL, 1723 phys_birth, 1); 1724 } 1725 } 1726 } 1727 1728 if (needs_io && !zfs_no_scrub_io) { 1729 vdev_t *rvd = spa->spa_root_vdev; 1730 uint64_t maxinflight = rvd->vdev_children * 1731 MAX(zfs_top_maxinflight, 1); 1732 void *data = zio_data_buf_alloc(size); 1733 1734 mutex_enter(&spa->spa_scrub_lock); 1735 while (spa->spa_scrub_inflight >= maxinflight) 1736 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1737 spa->spa_scrub_inflight++; 1738 mutex_exit(&spa->spa_scrub_lock); 1739 1740 /* 1741 * If we're seeing recent (zfs_scan_idle) "important" I/Os 1742 * then throttle our workload to limit the impact of a scan. 1743 */ 1744 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle) 1745 delay(MAX((int)scan_delay, 0)); 1746 1747 zio_nowait(zio_read(NULL, spa, bp, data, size, 1748 dsl_scan_scrub_done, NULL, zio_priority, 1749 zio_flags, zb)); 1750 } 1751 1752 /* do not relocate this block */ 1753 return (0); 1754} 1755 1756int 1757dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 1758{ 1759 spa_t *spa = dp->dp_spa; 1760 1761 /* 1762 * Purge all vdev caches and probe all devices. We do this here 1763 * rather than in sync context because this requires a writer lock 1764 * on the spa_config lock, which we can't do from sync context. The 1765 * spa_scrub_reopen flag indicates that vdev_open() should not 1766 * attempt to start another scrub. 1767 */ 1768 spa_vdev_state_enter(spa, SCL_NONE); 1769 spa->spa_scrub_reopen = B_TRUE; 1770 vdev_reopen(spa->spa_root_vdev); 1771 spa->spa_scrub_reopen = B_FALSE; 1772 (void) spa_vdev_state_exit(spa, NULL, 0); 1773 1774 return (dsl_sync_task_do(dp, dsl_scan_setup_check, 1775 dsl_scan_setup_sync, dp->dp_scan, &func, 0)); 1776} 1777