1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 24 */ 25 26#include <sys/dsl_scan.h> 27#include <sys/dsl_pool.h> 28#include <sys/dsl_dataset.h> 29#include <sys/dsl_prop.h> 30#include <sys/dsl_dir.h> 31#include <sys/dsl_synctask.h> 32#include <sys/dnode.h> 33#include <sys/dmu_tx.h> 34#include <sys/dmu_objset.h> 35#include <sys/arc.h> 36#include <sys/zap.h> 37#include <sys/zio.h> 38#include <sys/zfs_context.h> 39#include <sys/fs/zfs.h> 40#include <sys/zfs_znode.h> 41#include <sys/spa_impl.h> 42#include <sys/vdev_impl.h> 43#include <sys/zil_impl.h> 44#include <sys/zio_checksum.h> 45#include <sys/ddt.h> 46#include <sys/sa.h> 47#include <sys/sa_impl.h> 48#include <sys/zfeature.h> 49#ifdef _KERNEL 50#include <sys/zfs_vfsops.h> 51#endif 52 53typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 54 const zbookmark_phys_t *); 55 56static scan_cb_t dsl_scan_scrub_cb; 57static void dsl_scan_cancel_sync(void *, dmu_tx_t *); 58static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx); 59 60unsigned int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 61unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ 62unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ 63unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ 64 65unsigned int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */ 66unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 67unsigned int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver 68 per txg */ 69boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 70boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 71 72SYSCTL_DECL(_vfs_zfs); 73TUNABLE_INT("vfs.zfs.top_maxinflight", &zfs_top_maxinflight); 74SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RW, 75 &zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev"); 76TUNABLE_INT("vfs.zfs.resilver_delay", &zfs_resilver_delay); 77SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RW, 78 &zfs_resilver_delay, 0, "Number of ticks to delay resilver"); 79TUNABLE_INT("vfs.zfs.scrub_delay", &zfs_scrub_delay); 80SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RW, 81 &zfs_scrub_delay, 0, "Number of ticks to delay scrub"); 82TUNABLE_INT("vfs.zfs.scan_idle", &zfs_scan_idle); 83SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RW, 84 &zfs_scan_idle, 0, "Idle scan window in clock ticks"); 85TUNABLE_INT("vfs.zfs.scan_min_time_ms", &zfs_scan_min_time_ms); 86SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RW, 87 &zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg"); 88TUNABLE_INT("vfs.zfs.free_min_time_ms", &zfs_free_min_time_ms); 89SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RW, 90 &zfs_free_min_time_ms, 0, "Min millisecs to free per txg"); 91TUNABLE_INT("vfs.zfs.resilver_min_time_ms", &zfs_resilver_min_time_ms); 92SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RW, 93 &zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg"); 94TUNABLE_INT("vfs.zfs.no_scrub_io", &zfs_no_scrub_io); 95SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RW, 96 &zfs_no_scrub_io, 0, "Disable scrub I/O"); 97TUNABLE_INT("vfs.zfs.no_scrub_prefetch", &zfs_no_scrub_prefetch); 98SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RW, 99 &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); 100 101enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 102/* max number of blocks to free in a single TXG */ 103uint64_t zfs_free_max_blocks = UINT64_MAX; 104SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, free_max_blocks, CTLFLAG_RWTUN, 105 &zfs_free_max_blocks, 0, "Maximum number of blocks to free in one TXG"); 106 107 108#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 109 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 110 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 111 112extern int zfs_txg_timeout; 113 114/* the order has to match pool_scan_type */ 115static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 116 NULL, 117 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 118 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 119}; 120 121int 122dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 123{ 124 int err; 125 dsl_scan_t *scn; 126 spa_t *spa = dp->dp_spa; 127 uint64_t f; 128 129 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 130 scn->scn_dp = dp; 131 132 /* 133 * It's possible that we're resuming a scan after a reboot so 134 * make sure that the scan_async_destroying flag is initialized 135 * appropriately. 136 */ 137 ASSERT(!scn->scn_async_destroying); 138 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 139 SPA_FEATURE_ASYNC_DESTROY); 140 141 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 142 "scrub_func", sizeof (uint64_t), 1, &f); 143 if (err == 0) { 144 /* 145 * There was an old-style scrub in progress. Restart a 146 * new-style scrub from the beginning. 147 */ 148 scn->scn_restart_txg = txg; 149 zfs_dbgmsg("old-style scrub was in progress; " 150 "restarting new-style scrub in txg %llu", 151 scn->scn_restart_txg); 152 153 /* 154 * Load the queue obj from the old location so that it 155 * can be freed by dsl_scan_done(). 156 */ 157 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 158 "scrub_queue", sizeof (uint64_t), 1, 159 &scn->scn_phys.scn_queue_obj); 160 } else { 161 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 162 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 163 &scn->scn_phys); 164 if (err == ENOENT) 165 return (0); 166 else if (err) 167 return (err); 168 169 if (scn->scn_phys.scn_state == DSS_SCANNING && 170 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 171 /* 172 * A new-type scrub was in progress on an old 173 * pool, and the pool was accessed by old 174 * software. Restart from the beginning, since 175 * the old software may have changed the pool in 176 * the meantime. 177 */ 178 scn->scn_restart_txg = txg; 179 zfs_dbgmsg("new-style scrub was modified " 180 "by old software; restarting in txg %llu", 181 scn->scn_restart_txg); 182 } 183 } 184 185 spa_scan_stat_init(spa); 186 return (0); 187} 188 189void 190dsl_scan_fini(dsl_pool_t *dp) 191{ 192 if (dp->dp_scan) { 193 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 194 dp->dp_scan = NULL; 195 } 196} 197 198/* ARGSUSED */ 199static int 200dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 201{ 202 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 203 204 if (scn->scn_phys.scn_state == DSS_SCANNING) 205 return (SET_ERROR(EBUSY)); 206 207 return (0); 208} 209 210static void 211dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 212{ 213 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 214 pool_scan_func_t *funcp = arg; 215 dmu_object_type_t ot = 0; 216 dsl_pool_t *dp = scn->scn_dp; 217 spa_t *spa = dp->dp_spa; 218 219 ASSERT(scn->scn_phys.scn_state != DSS_SCANNING); 220 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 221 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 222 scn->scn_phys.scn_func = *funcp; 223 scn->scn_phys.scn_state = DSS_SCANNING; 224 scn->scn_phys.scn_min_txg = 0; 225 scn->scn_phys.scn_max_txg = tx->tx_txg; 226 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 227 scn->scn_phys.scn_start_time = gethrestime_sec(); 228 scn->scn_phys.scn_errors = 0; 229 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 230 scn->scn_restart_txg = 0; 231 scn->scn_done_txg = 0; 232 spa_scan_stat_init(spa); 233 234 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 235 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 236 237 /* rewrite all disk labels */ 238 vdev_config_dirty(spa->spa_root_vdev); 239 240 if (vdev_resilver_needed(spa->spa_root_vdev, 241 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 242 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 243 } else { 244 spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START); 245 } 246 247 spa->spa_scrub_started = B_TRUE; 248 /* 249 * If this is an incremental scrub, limit the DDT scrub phase 250 * to just the auto-ditto class (for correctness); the rest 251 * of the scrub should go faster using top-down pruning. 252 */ 253 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 254 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 255 256 } 257 258 /* back to the generic stuff */ 259 260 if (dp->dp_blkstats == NULL) { 261 dp->dp_blkstats = 262 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 263 } 264 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 265 266 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 267 ot = DMU_OT_ZAP_OTHER; 268 269 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 270 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 271 272 dsl_scan_sync_state(scn, tx); 273 274 spa_history_log_internal(spa, "scan setup", tx, 275 "func=%u mintxg=%llu maxtxg=%llu", 276 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 277} 278 279/* ARGSUSED */ 280static void 281dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 282{ 283 static const char *old_names[] = { 284 "scrub_bookmark", 285 "scrub_ddt_bookmark", 286 "scrub_ddt_class_max", 287 "scrub_queue", 288 "scrub_min_txg", 289 "scrub_max_txg", 290 "scrub_func", 291 "scrub_errors", 292 NULL 293 }; 294 295 dsl_pool_t *dp = scn->scn_dp; 296 spa_t *spa = dp->dp_spa; 297 int i; 298 299 /* Remove any remnants of an old-style scrub. */ 300 for (i = 0; old_names[i]; i++) { 301 (void) zap_remove(dp->dp_meta_objset, 302 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 303 } 304 305 if (scn->scn_phys.scn_queue_obj != 0) { 306 VERIFY(0 == dmu_object_free(dp->dp_meta_objset, 307 scn->scn_phys.scn_queue_obj, tx)); 308 scn->scn_phys.scn_queue_obj = 0; 309 } 310 311 /* 312 * If we were "restarted" from a stopped state, don't bother 313 * with anything else. 314 */ 315 if (scn->scn_phys.scn_state != DSS_SCANNING) 316 return; 317 318 if (complete) 319 scn->scn_phys.scn_state = DSS_FINISHED; 320 else 321 scn->scn_phys.scn_state = DSS_CANCELED; 322 323 spa_history_log_internal(spa, "scan done", tx, 324 "complete=%u", complete); 325 326 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 327 mutex_enter(&spa->spa_scrub_lock); 328 while (spa->spa_scrub_inflight > 0) { 329 cv_wait(&spa->spa_scrub_io_cv, 330 &spa->spa_scrub_lock); 331 } 332 mutex_exit(&spa->spa_scrub_lock); 333 spa->spa_scrub_started = B_FALSE; 334 spa->spa_scrub_active = B_FALSE; 335 336 /* 337 * If the scrub/resilver completed, update all DTLs to 338 * reflect this. Whether it succeeded or not, vacate 339 * all temporary scrub DTLs. 340 */ 341 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 342 complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE); 343 if (complete) { 344 spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ? 345 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 346 } 347 spa_errlog_rotate(spa); 348 349 /* 350 * We may have finished replacing a device. 351 * Let the async thread assess this and handle the detach. 352 */ 353 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 354 } 355 356 scn->scn_phys.scn_end_time = gethrestime_sec(); 357} 358 359/* ARGSUSED */ 360static int 361dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 362{ 363 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 364 365 if (scn->scn_phys.scn_state != DSS_SCANNING) 366 return (SET_ERROR(ENOENT)); 367 return (0); 368} 369 370/* ARGSUSED */ 371static void 372dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 373{ 374 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 375 376 dsl_scan_done(scn, B_FALSE, tx); 377 dsl_scan_sync_state(scn, tx); 378} 379 380int 381dsl_scan_cancel(dsl_pool_t *dp) 382{ 383 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 384 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 385} 386 387static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 388 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 389 dmu_objset_type_t ostype, dmu_tx_t *tx); 390static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds, 391 dmu_objset_type_t ostype, 392 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); 393 394void 395dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 396{ 397 zio_free(dp->dp_spa, txg, bp); 398} 399 400void 401dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 402{ 403 ASSERT(dsl_pool_sync_context(dp)); 404 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, BP_GET_PSIZE(bpp), 405 pio->io_flags)); 406} 407 408static uint64_t 409dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 410{ 411 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 412 if (dsl_dataset_is_snapshot(ds)) 413 return (MIN(smt, ds->ds_phys->ds_creation_txg)); 414 return (smt); 415} 416 417static void 418dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) 419{ 420 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 421 DMU_POOL_DIRECTORY_OBJECT, 422 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 423 &scn->scn_phys, tx)); 424} 425
| 1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 24 */ 25 26#include <sys/dsl_scan.h> 27#include <sys/dsl_pool.h> 28#include <sys/dsl_dataset.h> 29#include <sys/dsl_prop.h> 30#include <sys/dsl_dir.h> 31#include <sys/dsl_synctask.h> 32#include <sys/dnode.h> 33#include <sys/dmu_tx.h> 34#include <sys/dmu_objset.h> 35#include <sys/arc.h> 36#include <sys/zap.h> 37#include <sys/zio.h> 38#include <sys/zfs_context.h> 39#include <sys/fs/zfs.h> 40#include <sys/zfs_znode.h> 41#include <sys/spa_impl.h> 42#include <sys/vdev_impl.h> 43#include <sys/zil_impl.h> 44#include <sys/zio_checksum.h> 45#include <sys/ddt.h> 46#include <sys/sa.h> 47#include <sys/sa_impl.h> 48#include <sys/zfeature.h> 49#ifdef _KERNEL 50#include <sys/zfs_vfsops.h> 51#endif 52 53typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 54 const zbookmark_phys_t *); 55 56static scan_cb_t dsl_scan_scrub_cb; 57static void dsl_scan_cancel_sync(void *, dmu_tx_t *); 58static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx); 59 60unsigned int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 61unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ 62unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ 63unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ 64 65unsigned int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */ 66unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 67unsigned int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver 68 per txg */ 69boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 70boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 71 72SYSCTL_DECL(_vfs_zfs); 73TUNABLE_INT("vfs.zfs.top_maxinflight", &zfs_top_maxinflight); 74SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RW, 75 &zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev"); 76TUNABLE_INT("vfs.zfs.resilver_delay", &zfs_resilver_delay); 77SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RW, 78 &zfs_resilver_delay, 0, "Number of ticks to delay resilver"); 79TUNABLE_INT("vfs.zfs.scrub_delay", &zfs_scrub_delay); 80SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RW, 81 &zfs_scrub_delay, 0, "Number of ticks to delay scrub"); 82TUNABLE_INT("vfs.zfs.scan_idle", &zfs_scan_idle); 83SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RW, 84 &zfs_scan_idle, 0, "Idle scan window in clock ticks"); 85TUNABLE_INT("vfs.zfs.scan_min_time_ms", &zfs_scan_min_time_ms); 86SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RW, 87 &zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg"); 88TUNABLE_INT("vfs.zfs.free_min_time_ms", &zfs_free_min_time_ms); 89SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RW, 90 &zfs_free_min_time_ms, 0, "Min millisecs to free per txg"); 91TUNABLE_INT("vfs.zfs.resilver_min_time_ms", &zfs_resilver_min_time_ms); 92SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RW, 93 &zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg"); 94TUNABLE_INT("vfs.zfs.no_scrub_io", &zfs_no_scrub_io); 95SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RW, 96 &zfs_no_scrub_io, 0, "Disable scrub I/O"); 97TUNABLE_INT("vfs.zfs.no_scrub_prefetch", &zfs_no_scrub_prefetch); 98SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RW, 99 &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); 100 101enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 102/* max number of blocks to free in a single TXG */ 103uint64_t zfs_free_max_blocks = UINT64_MAX; 104SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, free_max_blocks, CTLFLAG_RWTUN, 105 &zfs_free_max_blocks, 0, "Maximum number of blocks to free in one TXG"); 106 107 108#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 109 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 110 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 111 112extern int zfs_txg_timeout; 113 114/* the order has to match pool_scan_type */ 115static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 116 NULL, 117 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 118 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 119}; 120 121int 122dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 123{ 124 int err; 125 dsl_scan_t *scn; 126 spa_t *spa = dp->dp_spa; 127 uint64_t f; 128 129 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 130 scn->scn_dp = dp; 131 132 /* 133 * It's possible that we're resuming a scan after a reboot so 134 * make sure that the scan_async_destroying flag is initialized 135 * appropriately. 136 */ 137 ASSERT(!scn->scn_async_destroying); 138 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 139 SPA_FEATURE_ASYNC_DESTROY); 140 141 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 142 "scrub_func", sizeof (uint64_t), 1, &f); 143 if (err == 0) { 144 /* 145 * There was an old-style scrub in progress. Restart a 146 * new-style scrub from the beginning. 147 */ 148 scn->scn_restart_txg = txg; 149 zfs_dbgmsg("old-style scrub was in progress; " 150 "restarting new-style scrub in txg %llu", 151 scn->scn_restart_txg); 152 153 /* 154 * Load the queue obj from the old location so that it 155 * can be freed by dsl_scan_done(). 156 */ 157 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 158 "scrub_queue", sizeof (uint64_t), 1, 159 &scn->scn_phys.scn_queue_obj); 160 } else { 161 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 162 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 163 &scn->scn_phys); 164 if (err == ENOENT) 165 return (0); 166 else if (err) 167 return (err); 168 169 if (scn->scn_phys.scn_state == DSS_SCANNING && 170 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 171 /* 172 * A new-type scrub was in progress on an old 173 * pool, and the pool was accessed by old 174 * software. Restart from the beginning, since 175 * the old software may have changed the pool in 176 * the meantime. 177 */ 178 scn->scn_restart_txg = txg; 179 zfs_dbgmsg("new-style scrub was modified " 180 "by old software; restarting in txg %llu", 181 scn->scn_restart_txg); 182 } 183 } 184 185 spa_scan_stat_init(spa); 186 return (0); 187} 188 189void 190dsl_scan_fini(dsl_pool_t *dp) 191{ 192 if (dp->dp_scan) { 193 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 194 dp->dp_scan = NULL; 195 } 196} 197 198/* ARGSUSED */ 199static int 200dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 201{ 202 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 203 204 if (scn->scn_phys.scn_state == DSS_SCANNING) 205 return (SET_ERROR(EBUSY)); 206 207 return (0); 208} 209 210static void 211dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 212{ 213 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 214 pool_scan_func_t *funcp = arg; 215 dmu_object_type_t ot = 0; 216 dsl_pool_t *dp = scn->scn_dp; 217 spa_t *spa = dp->dp_spa; 218 219 ASSERT(scn->scn_phys.scn_state != DSS_SCANNING); 220 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 221 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 222 scn->scn_phys.scn_func = *funcp; 223 scn->scn_phys.scn_state = DSS_SCANNING; 224 scn->scn_phys.scn_min_txg = 0; 225 scn->scn_phys.scn_max_txg = tx->tx_txg; 226 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 227 scn->scn_phys.scn_start_time = gethrestime_sec(); 228 scn->scn_phys.scn_errors = 0; 229 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 230 scn->scn_restart_txg = 0; 231 scn->scn_done_txg = 0; 232 spa_scan_stat_init(spa); 233 234 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 235 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 236 237 /* rewrite all disk labels */ 238 vdev_config_dirty(spa->spa_root_vdev); 239 240 if (vdev_resilver_needed(spa->spa_root_vdev, 241 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 242 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 243 } else { 244 spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START); 245 } 246 247 spa->spa_scrub_started = B_TRUE; 248 /* 249 * If this is an incremental scrub, limit the DDT scrub phase 250 * to just the auto-ditto class (for correctness); the rest 251 * of the scrub should go faster using top-down pruning. 252 */ 253 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 254 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 255 256 } 257 258 /* back to the generic stuff */ 259 260 if (dp->dp_blkstats == NULL) { 261 dp->dp_blkstats = 262 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 263 } 264 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 265 266 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 267 ot = DMU_OT_ZAP_OTHER; 268 269 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 270 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 271 272 dsl_scan_sync_state(scn, tx); 273 274 spa_history_log_internal(spa, "scan setup", tx, 275 "func=%u mintxg=%llu maxtxg=%llu", 276 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 277} 278 279/* ARGSUSED */ 280static void 281dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 282{ 283 static const char *old_names[] = { 284 "scrub_bookmark", 285 "scrub_ddt_bookmark", 286 "scrub_ddt_class_max", 287 "scrub_queue", 288 "scrub_min_txg", 289 "scrub_max_txg", 290 "scrub_func", 291 "scrub_errors", 292 NULL 293 }; 294 295 dsl_pool_t *dp = scn->scn_dp; 296 spa_t *spa = dp->dp_spa; 297 int i; 298 299 /* Remove any remnants of an old-style scrub. */ 300 for (i = 0; old_names[i]; i++) { 301 (void) zap_remove(dp->dp_meta_objset, 302 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 303 } 304 305 if (scn->scn_phys.scn_queue_obj != 0) { 306 VERIFY(0 == dmu_object_free(dp->dp_meta_objset, 307 scn->scn_phys.scn_queue_obj, tx)); 308 scn->scn_phys.scn_queue_obj = 0; 309 } 310 311 /* 312 * If we were "restarted" from a stopped state, don't bother 313 * with anything else. 314 */ 315 if (scn->scn_phys.scn_state != DSS_SCANNING) 316 return; 317 318 if (complete) 319 scn->scn_phys.scn_state = DSS_FINISHED; 320 else 321 scn->scn_phys.scn_state = DSS_CANCELED; 322 323 spa_history_log_internal(spa, "scan done", tx, 324 "complete=%u", complete); 325 326 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 327 mutex_enter(&spa->spa_scrub_lock); 328 while (spa->spa_scrub_inflight > 0) { 329 cv_wait(&spa->spa_scrub_io_cv, 330 &spa->spa_scrub_lock); 331 } 332 mutex_exit(&spa->spa_scrub_lock); 333 spa->spa_scrub_started = B_FALSE; 334 spa->spa_scrub_active = B_FALSE; 335 336 /* 337 * If the scrub/resilver completed, update all DTLs to 338 * reflect this. Whether it succeeded or not, vacate 339 * all temporary scrub DTLs. 340 */ 341 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 342 complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE); 343 if (complete) { 344 spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ? 345 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 346 } 347 spa_errlog_rotate(spa); 348 349 /* 350 * We may have finished replacing a device. 351 * Let the async thread assess this and handle the detach. 352 */ 353 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 354 } 355 356 scn->scn_phys.scn_end_time = gethrestime_sec(); 357} 358 359/* ARGSUSED */ 360static int 361dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 362{ 363 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 364 365 if (scn->scn_phys.scn_state != DSS_SCANNING) 366 return (SET_ERROR(ENOENT)); 367 return (0); 368} 369 370/* ARGSUSED */ 371static void 372dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 373{ 374 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 375 376 dsl_scan_done(scn, B_FALSE, tx); 377 dsl_scan_sync_state(scn, tx); 378} 379 380int 381dsl_scan_cancel(dsl_pool_t *dp) 382{ 383 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 384 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 385} 386 387static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 388 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 389 dmu_objset_type_t ostype, dmu_tx_t *tx); 390static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds, 391 dmu_objset_type_t ostype, 392 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); 393 394void 395dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 396{ 397 zio_free(dp->dp_spa, txg, bp); 398} 399 400void 401dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 402{ 403 ASSERT(dsl_pool_sync_context(dp)); 404 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, BP_GET_PSIZE(bpp), 405 pio->io_flags)); 406} 407 408static uint64_t 409dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 410{ 411 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 412 if (dsl_dataset_is_snapshot(ds)) 413 return (MIN(smt, ds->ds_phys->ds_creation_txg)); 414 return (smt); 415} 416 417static void 418dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) 419{ 420 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 421 DMU_POOL_DIRECTORY_OBJECT, 422 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 423 &scn->scn_phys, tx)); 424} 425
|
452 spa_shutting_down(scn->scn_dp->dp_spa)) { 453 if (zb) { 454 dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n", 455 (longlong_t)zb->zb_objset, 456 (longlong_t)zb->zb_object, 457 (longlong_t)zb->zb_level, 458 (longlong_t)zb->zb_blkid); 459 scn->scn_phys.scn_bookmark = *zb; 460 } 461 dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n", 462 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 463 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 464 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 465 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 466 scn->scn_pausing = B_TRUE; 467 return (B_TRUE); 468 } 469 return (B_FALSE); 470} 471 472typedef struct zil_scan_arg { 473 dsl_pool_t *zsa_dp; 474 zil_header_t *zsa_zh; 475} zil_scan_arg_t; 476 477/* ARGSUSED */ 478static int 479dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 480{ 481 zil_scan_arg_t *zsa = arg; 482 dsl_pool_t *dp = zsa->zsa_dp; 483 dsl_scan_t *scn = dp->dp_scan; 484 zil_header_t *zh = zsa->zsa_zh; 485 zbookmark_phys_t zb; 486 487 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 488 return (0); 489 490 /* 491 * One block ("stubby") can be allocated a long time ago; we 492 * want to visit that one because it has been allocated 493 * (on-disk) even if it hasn't been claimed (even though for 494 * scrub there's nothing to do to it). 495 */ 496 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa)) 497 return (0); 498 499 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 500 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 501 502 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 503 return (0); 504} 505 506/* ARGSUSED */ 507static int 508dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 509{ 510 if (lrc->lrc_txtype == TX_WRITE) { 511 zil_scan_arg_t *zsa = arg; 512 dsl_pool_t *dp = zsa->zsa_dp; 513 dsl_scan_t *scn = dp->dp_scan; 514 zil_header_t *zh = zsa->zsa_zh; 515 lr_write_t *lr = (lr_write_t *)lrc; 516 blkptr_t *bp = &lr->lr_blkptr; 517 zbookmark_phys_t zb; 518 519 if (BP_IS_HOLE(bp) || 520 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 521 return (0); 522 523 /* 524 * birth can be < claim_txg if this record's txg is 525 * already txg sync'ed (but this log block contains 526 * other records that are not synced) 527 */ 528 if (claim_txg == 0 || bp->blk_birth < claim_txg) 529 return (0); 530 531 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 532 lr->lr_foid, ZB_ZIL_LEVEL, 533 lr->lr_offset / BP_GET_LSIZE(bp)); 534 535 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 536 } 537 return (0); 538} 539 540static void 541dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 542{ 543 uint64_t claim_txg = zh->zh_claim_txg; 544 zil_scan_arg_t zsa = { dp, zh }; 545 zilog_t *zilog; 546 547 /* 548 * We only want to visit blocks that have been claimed but not yet 549 * replayed (or, in read-only mode, blocks that *would* be claimed). 550 */ 551 if (claim_txg == 0 && spa_writeable(dp->dp_spa)) 552 return; 553 554 zilog = zil_alloc(dp->dp_meta_objset, zh); 555 556 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 557 claim_txg); 558 559 zil_free(zilog); 560} 561 562/* ARGSUSED */ 563static void 564dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp, 565 uint64_t objset, uint64_t object, uint64_t blkid) 566{ 567 zbookmark_phys_t czb; 568 uint32_t flags = ARC_NOWAIT | ARC_PREFETCH; 569 570 if (zfs_no_scrub_prefetch) 571 return; 572 573 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg || 574 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)) 575 return; 576 577 SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid); 578 579 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp, 580 NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 581 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb); 582} 583 584static boolean_t 585dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 586 const zbookmark_phys_t *zb) 587{ 588 /* 589 * We never skip over user/group accounting objects (obj<0) 590 */ 591 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 592 (int64_t)zb->zb_object >= 0) { 593 /* 594 * If we already visited this bp & everything below (in 595 * a prior txg sync), don't bother doing it again. 596 */ 597 if (zbookmark_is_before(dnp, zb, &scn->scn_phys.scn_bookmark)) 598 return (B_TRUE); 599 600 /* 601 * If we found the block we're trying to resume from, or 602 * we went past it to a different object, zero it out to 603 * indicate that it's OK to start checking for pausing 604 * again. 605 */ 606 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 607 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 608 dprintf("resuming at %llx/%llx/%llx/%llx\n", 609 (longlong_t)zb->zb_objset, 610 (longlong_t)zb->zb_object, 611 (longlong_t)zb->zb_level, 612 (longlong_t)zb->zb_blkid); 613 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 614 } 615 } 616 return (B_FALSE); 617} 618 619/* 620 * Return nonzero on i/o error. 621 * Return new buf to write out in *bufp. 622 */ 623static int 624dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 625 dnode_phys_t *dnp, const blkptr_t *bp, 626 const zbookmark_phys_t *zb, dmu_tx_t *tx) 627{ 628 dsl_pool_t *dp = scn->scn_dp; 629 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 630 int err; 631 632 if (BP_GET_LEVEL(bp) > 0) { 633 uint32_t flags = ARC_WAIT; 634 int i; 635 blkptr_t *cbp; 636 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 637 arc_buf_t *buf; 638 639 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 640 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 641 if (err) { 642 scn->scn_phys.scn_errors++; 643 return (err); 644 } 645 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 646 dsl_scan_prefetch(scn, buf, cbp, zb->zb_objset, 647 zb->zb_object, zb->zb_blkid * epb + i); 648 } 649 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 650 zbookmark_phys_t czb; 651 652 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 653 zb->zb_level - 1, 654 zb->zb_blkid * epb + i); 655 dsl_scan_visitbp(cbp, &czb, dnp, 656 ds, scn, ostype, tx); 657 } 658 (void) arc_buf_remove_ref(buf, &buf); 659 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 660 uint32_t flags = ARC_WAIT; 661 dnode_phys_t *cdnp; 662 int i, j; 663 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 664 arc_buf_t *buf; 665 666 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 667 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 668 if (err) { 669 scn->scn_phys.scn_errors++; 670 return (err); 671 } 672 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 673 for (j = 0; j < cdnp->dn_nblkptr; j++) { 674 blkptr_t *cbp = &cdnp->dn_blkptr[j]; 675 dsl_scan_prefetch(scn, buf, cbp, 676 zb->zb_objset, zb->zb_blkid * epb + i, j); 677 } 678 } 679 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 680 dsl_scan_visitdnode(scn, ds, ostype, 681 cdnp, zb->zb_blkid * epb + i, tx); 682 } 683 684 (void) arc_buf_remove_ref(buf, &buf); 685 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 686 uint32_t flags = ARC_WAIT; 687 objset_phys_t *osp; 688 arc_buf_t *buf; 689 690 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 691 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 692 if (err) { 693 scn->scn_phys.scn_errors++; 694 return (err); 695 } 696 697 osp = buf->b_data; 698 699 dsl_scan_visitdnode(scn, ds, osp->os_type, 700 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); 701 702 if (OBJSET_BUF_HAS_USERUSED(buf)) { 703 /* 704 * We also always visit user/group accounting 705 * objects, and never skip them, even if we are 706 * pausing. This is necessary so that the space 707 * deltas from this txg get integrated. 708 */ 709 dsl_scan_visitdnode(scn, ds, osp->os_type, 710 &osp->os_groupused_dnode, 711 DMU_GROUPUSED_OBJECT, tx); 712 dsl_scan_visitdnode(scn, ds, osp->os_type, 713 &osp->os_userused_dnode, 714 DMU_USERUSED_OBJECT, tx); 715 } 716 (void) arc_buf_remove_ref(buf, &buf); 717 } 718 719 return (0); 720} 721 722static void 723dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 724 dmu_objset_type_t ostype, dnode_phys_t *dnp, 725 uint64_t object, dmu_tx_t *tx) 726{ 727 int j; 728 729 for (j = 0; j < dnp->dn_nblkptr; j++) { 730 zbookmark_phys_t czb; 731 732 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 733 dnp->dn_nlevels - 1, j); 734 dsl_scan_visitbp(&dnp->dn_blkptr[j], 735 &czb, dnp, ds, scn, ostype, tx); 736 } 737 738 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 739 zbookmark_phys_t czb; 740 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 741 0, DMU_SPILL_BLKID); 742 dsl_scan_visitbp(&dnp->dn_spill, 743 &czb, dnp, ds, scn, ostype, tx); 744 } 745} 746 747/* 748 * The arguments are in this order because mdb can only print the 749 * first 5; we want them to be useful. 750 */ 751static void 752dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 753 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 754 dmu_objset_type_t ostype, dmu_tx_t *tx) 755{ 756 dsl_pool_t *dp = scn->scn_dp; 757 arc_buf_t *buf = NULL; 758 blkptr_t bp_toread = *bp; 759 760 /* ASSERT(pbuf == NULL || arc_released(pbuf)); */ 761 762 if (dsl_scan_check_pause(scn, zb)) 763 return; 764 765 if (dsl_scan_check_resume(scn, dnp, zb)) 766 return; 767 768 if (BP_IS_HOLE(bp)) 769 return; 770 771 scn->scn_visited_this_txg++; 772 773 dprintf_bp(bp, 774 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", 775 ds, ds ? ds->ds_object : 0, 776 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 777 bp); 778 779 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 780 return; 781 782 if (dsl_scan_recurse(scn, ds, ostype, dnp, &bp_toread, zb, tx) != 0) 783 return; 784 785 /* 786 * If dsl_scan_ddt() has aready visited this block, it will have 787 * already done any translations or scrubbing, so don't call the 788 * callback again. 789 */ 790 if (ddt_class_contains(dp->dp_spa, 791 scn->scn_phys.scn_ddt_class_max, bp)) { 792 ASSERT(buf == NULL); 793 return; 794 } 795 796 /* 797 * If this block is from the future (after cur_max_txg), then we 798 * are doing this on behalf of a deleted snapshot, and we will 799 * revisit the future block on the next pass of this dataset. 800 * Don't scan it now unless we need to because something 801 * under it was modified. 802 */ 803 if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) { 804 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 805 } 806} 807 808static void 809dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 810 dmu_tx_t *tx) 811{ 812 zbookmark_phys_t zb; 813 814 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 815 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 816 dsl_scan_visitbp(bp, &zb, NULL, 817 ds, scn, DMU_OST_NONE, tx); 818 819 dprintf_ds(ds, "finished scan%s", ""); 820} 821 822void 823dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 824{ 825 dsl_pool_t *dp = ds->ds_dir->dd_pool; 826 dsl_scan_t *scn = dp->dp_scan; 827 uint64_t mintxg; 828 829 if (scn->scn_phys.scn_state != DSS_SCANNING) 830 return; 831 832 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 833 if (dsl_dataset_is_snapshot(ds)) { 834 /* Note, scn_cur_{min,max}_txg stays the same. */ 835 scn->scn_phys.scn_bookmark.zb_objset = 836 ds->ds_phys->ds_next_snap_obj; 837 zfs_dbgmsg("destroying ds %llu; currently traversing; " 838 "reset zb_objset to %llu", 839 (u_longlong_t)ds->ds_object, 840 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 841 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN; 842 } else { 843 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 844 ZB_DESTROYED_OBJSET, 0, 0, 0); 845 zfs_dbgmsg("destroying ds %llu; currently traversing; " 846 "reset bookmark to -1,0,0,0", 847 (u_longlong_t)ds->ds_object); 848 } 849 } else if (zap_lookup_int_key(dp->dp_meta_objset, 850 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 851 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1); 852 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 853 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 854 if (dsl_dataset_is_snapshot(ds)) { 855 /* 856 * We keep the same mintxg; it could be > 857 * ds_creation_txg if the previous snapshot was 858 * deleted too. 859 */ 860 VERIFY(zap_add_int_key(dp->dp_meta_objset, 861 scn->scn_phys.scn_queue_obj, 862 ds->ds_phys->ds_next_snap_obj, mintxg, tx) == 0); 863 zfs_dbgmsg("destroying ds %llu; in queue; " 864 "replacing with %llu", 865 (u_longlong_t)ds->ds_object, 866 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 867 } else { 868 zfs_dbgmsg("destroying ds %llu; in queue; removing", 869 (u_longlong_t)ds->ds_object); 870 } 871 } else { 872 zfs_dbgmsg("destroying ds %llu; ignoring", 873 (u_longlong_t)ds->ds_object); 874 } 875 876 /* 877 * dsl_scan_sync() should be called after this, and should sync 878 * out our changed state, but just to be safe, do it here. 879 */ 880 dsl_scan_sync_state(scn, tx); 881} 882 883void 884dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 885{ 886 dsl_pool_t *dp = ds->ds_dir->dd_pool; 887 dsl_scan_t *scn = dp->dp_scan; 888 uint64_t mintxg; 889 890 if (scn->scn_phys.scn_state != DSS_SCANNING) 891 return; 892 893 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0); 894 895 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 896 scn->scn_phys.scn_bookmark.zb_objset = 897 ds->ds_phys->ds_prev_snap_obj; 898 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 899 "reset zb_objset to %llu", 900 (u_longlong_t)ds->ds_object, 901 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 902 } else if (zap_lookup_int_key(dp->dp_meta_objset, 903 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 904 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 905 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 906 VERIFY(zap_add_int_key(dp->dp_meta_objset, 907 scn->scn_phys.scn_queue_obj, 908 ds->ds_phys->ds_prev_snap_obj, mintxg, tx) == 0); 909 zfs_dbgmsg("snapshotting ds %llu; in queue; " 910 "replacing with %llu", 911 (u_longlong_t)ds->ds_object, 912 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 913 } 914 dsl_scan_sync_state(scn, tx); 915} 916 917void 918dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 919{ 920 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 921 dsl_scan_t *scn = dp->dp_scan; 922 uint64_t mintxg; 923 924 if (scn->scn_phys.scn_state != DSS_SCANNING) 925 return; 926 927 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) { 928 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object; 929 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 930 "reset zb_objset to %llu", 931 (u_longlong_t)ds1->ds_object, 932 (u_longlong_t)ds2->ds_object); 933 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) { 934 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object; 935 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 936 "reset zb_objset to %llu", 937 (u_longlong_t)ds2->ds_object, 938 (u_longlong_t)ds1->ds_object); 939 } 940 941 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 942 ds1->ds_object, &mintxg) == 0) { 943 int err; 944 945 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 946 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 947 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 948 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 949 err = zap_add_int_key(dp->dp_meta_objset, 950 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); 951 VERIFY(err == 0 || err == EEXIST); 952 if (err == EEXIST) { 953 /* Both were there to begin with */ 954 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 955 scn->scn_phys.scn_queue_obj, 956 ds1->ds_object, mintxg, tx)); 957 } 958 zfs_dbgmsg("clone_swap ds %llu; in queue; " 959 "replacing with %llu", 960 (u_longlong_t)ds1->ds_object, 961 (u_longlong_t)ds2->ds_object); 962 } else if (zap_lookup_int_key(dp->dp_meta_objset, 963 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) { 964 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 965 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 966 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 967 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 968 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 969 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); 970 zfs_dbgmsg("clone_swap ds %llu; in queue; " 971 "replacing with %llu", 972 (u_longlong_t)ds2->ds_object, 973 (u_longlong_t)ds1->ds_object); 974 } 975 976 dsl_scan_sync_state(scn, tx); 977} 978 979struct enqueue_clones_arg { 980 dmu_tx_t *tx; 981 uint64_t originobj; 982}; 983 984/* ARGSUSED */ 985static int 986enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 987{ 988 struct enqueue_clones_arg *eca = arg; 989 dsl_dataset_t *ds; 990 int err; 991 dsl_scan_t *scn = dp->dp_scan; 992 993 if (hds->ds_dir->dd_phys->dd_origin_obj != eca->originobj) 994 return (0); 995 996 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 997 if (err) 998 return (err); 999 1000 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) { 1001 dsl_dataset_t *prev; 1002 err = dsl_dataset_hold_obj(dp, 1003 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev); 1004 1005 dsl_dataset_rele(ds, FTAG); 1006 if (err) 1007 return (err); 1008 ds = prev; 1009 } 1010 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1011 scn->scn_phys.scn_queue_obj, ds->ds_object, 1012 ds->ds_phys->ds_prev_snap_txg, eca->tx) == 0); 1013 dsl_dataset_rele(ds, FTAG); 1014 return (0); 1015} 1016 1017static void 1018dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 1019{ 1020 dsl_pool_t *dp = scn->scn_dp; 1021 dsl_dataset_t *ds; 1022 objset_t *os; 1023 1024 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1025 1026 if (dmu_objset_from_ds(ds, &os)) 1027 goto out; 1028 1029 /* 1030 * Only the ZIL in the head (non-snapshot) is valid. Even though 1031 * snapshots can have ZIL block pointers (which may be the same 1032 * BP as in the head), they must be ignored. So we traverse the 1033 * ZIL here, rather than in scan_recurse(), because the regular 1034 * snapshot block-sharing rules don't apply to it. 1035 */ 1036 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds)) 1037 dsl_scan_zil(dp, &os->os_zil_header); 1038 1039 /* 1040 * Iterate over the bps in this ds. 1041 */ 1042 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1043 dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx); 1044 1045 char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); 1046 dsl_dataset_name(ds, dsname); 1047 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 1048 "pausing=%u", 1049 (longlong_t)dsobj, dsname, 1050 (longlong_t)scn->scn_phys.scn_cur_min_txg, 1051 (longlong_t)scn->scn_phys.scn_cur_max_txg, 1052 (int)scn->scn_pausing); 1053 kmem_free(dsname, ZFS_MAXNAMELEN); 1054 1055 if (scn->scn_pausing) 1056 goto out; 1057 1058 /* 1059 * We've finished this pass over this dataset. 1060 */ 1061 1062 /* 1063 * If we did not completely visit this dataset, do another pass. 1064 */ 1065 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 1066 zfs_dbgmsg("incomplete pass; visiting again"); 1067 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 1068 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1069 scn->scn_phys.scn_queue_obj, ds->ds_object, 1070 scn->scn_phys.scn_cur_max_txg, tx) == 0); 1071 goto out; 1072 } 1073 1074 /* 1075 * Add descendent datasets to work queue. 1076 */ 1077 if (ds->ds_phys->ds_next_snap_obj != 0) { 1078 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1079 scn->scn_phys.scn_queue_obj, ds->ds_phys->ds_next_snap_obj, 1080 ds->ds_phys->ds_creation_txg, tx) == 0); 1081 } 1082 if (ds->ds_phys->ds_num_children > 1) { 1083 boolean_t usenext = B_FALSE; 1084 if (ds->ds_phys->ds_next_clones_obj != 0) { 1085 uint64_t count; 1086 /* 1087 * A bug in a previous version of the code could 1088 * cause upgrade_clones_cb() to not set 1089 * ds_next_snap_obj when it should, leading to a 1090 * missing entry. Therefore we can only use the 1091 * next_clones_obj when its count is correct. 1092 */ 1093 int err = zap_count(dp->dp_meta_objset, 1094 ds->ds_phys->ds_next_clones_obj, &count); 1095 if (err == 0 && 1096 count == ds->ds_phys->ds_num_children - 1) 1097 usenext = B_TRUE; 1098 } 1099 1100 if (usenext) { 1101 VERIFY0(zap_join_key(dp->dp_meta_objset, 1102 ds->ds_phys->ds_next_clones_obj, 1103 scn->scn_phys.scn_queue_obj, 1104 ds->ds_phys->ds_creation_txg, tx)); 1105 } else { 1106 struct enqueue_clones_arg eca; 1107 eca.tx = tx; 1108 eca.originobj = ds->ds_object; 1109 1110 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1111 enqueue_clones_cb, &eca, DS_FIND_CHILDREN)); 1112 } 1113 } 1114 1115out: 1116 dsl_dataset_rele(ds, FTAG); 1117} 1118 1119/* ARGSUSED */ 1120static int 1121enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1122{ 1123 dmu_tx_t *tx = arg; 1124 dsl_dataset_t *ds; 1125 int err; 1126 dsl_scan_t *scn = dp->dp_scan; 1127 1128 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1129 if (err) 1130 return (err); 1131 1132 while (ds->ds_phys->ds_prev_snap_obj != 0) { 1133 dsl_dataset_t *prev; 1134 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 1135 FTAG, &prev); 1136 if (err) { 1137 dsl_dataset_rele(ds, FTAG); 1138 return (err); 1139 } 1140 1141 /* 1142 * If this is a clone, we don't need to worry about it for now. 1143 */ 1144 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) { 1145 dsl_dataset_rele(ds, FTAG); 1146 dsl_dataset_rele(prev, FTAG); 1147 return (0); 1148 } 1149 dsl_dataset_rele(ds, FTAG); 1150 ds = prev; 1151 } 1152 1153 VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1154 ds->ds_object, ds->ds_phys->ds_prev_snap_txg, tx) == 0); 1155 dsl_dataset_rele(ds, FTAG); 1156 return (0); 1157} 1158 1159/* 1160 * Scrub/dedup interaction. 1161 * 1162 * If there are N references to a deduped block, we don't want to scrub it 1163 * N times -- ideally, we should scrub it exactly once. 1164 * 1165 * We leverage the fact that the dde's replication class (enum ddt_class) 1166 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 1167 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 1168 * 1169 * To prevent excess scrubbing, the scrub begins by walking the DDT 1170 * to find all blocks with refcnt > 1, and scrubs each of these once. 1171 * Since there are two replication classes which contain blocks with 1172 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 1173 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 1174 * 1175 * There would be nothing more to say if a block's refcnt couldn't change 1176 * during a scrub, but of course it can so we must account for changes 1177 * in a block's replication class. 1178 * 1179 * Here's an example of what can occur: 1180 * 1181 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 1182 * when visited during the top-down scrub phase, it will be scrubbed twice. 1183 * This negates our scrub optimization, but is otherwise harmless. 1184 * 1185 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 1186 * on each visit during the top-down scrub phase, it will never be scrubbed. 1187 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 1188 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 1189 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 1190 * while a scrub is in progress, it scrubs the block right then. 1191 */ 1192static void 1193dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 1194{ 1195 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 1196 ddt_entry_t dde = { 0 }; 1197 int error; 1198 uint64_t n = 0; 1199 1200 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 1201 ddt_t *ddt; 1202 1203 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 1204 break; 1205 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 1206 (longlong_t)ddb->ddb_class, 1207 (longlong_t)ddb->ddb_type, 1208 (longlong_t)ddb->ddb_checksum, 1209 (longlong_t)ddb->ddb_cursor); 1210 1211 /* There should be no pending changes to the dedup table */ 1212 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 1213 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 1214 1215 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 1216 n++; 1217 1218 if (dsl_scan_check_pause(scn, NULL)) 1219 break; 1220 } 1221 1222 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u", 1223 (longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max, 1224 (int)scn->scn_pausing); 1225 1226 ASSERT(error == 0 || error == ENOENT); 1227 ASSERT(error != ENOENT || 1228 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 1229} 1230 1231/* ARGSUSED */ 1232void 1233dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 1234 ddt_entry_t *dde, dmu_tx_t *tx) 1235{ 1236 const ddt_key_t *ddk = &dde->dde_key; 1237 ddt_phys_t *ddp = dde->dde_phys; 1238 blkptr_t bp; 1239 zbookmark_phys_t zb = { 0 }; 1240 1241 if (scn->scn_phys.scn_state != DSS_SCANNING) 1242 return; 1243 1244 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1245 if (ddp->ddp_phys_birth == 0 || 1246 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 1247 continue; 1248 ddt_bp_create(checksum, ddk, ddp, &bp); 1249 1250 scn->scn_visited_this_txg++; 1251 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 1252 } 1253} 1254 1255static void 1256dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 1257{ 1258 dsl_pool_t *dp = scn->scn_dp; 1259 zap_cursor_t zc; 1260 zap_attribute_t za; 1261 1262 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1263 scn->scn_phys.scn_ddt_class_max) { 1264 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1265 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1266 dsl_scan_ddt(scn, tx); 1267 if (scn->scn_pausing) 1268 return; 1269 } 1270 1271 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 1272 /* First do the MOS & ORIGIN */ 1273 1274 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1275 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1276 dsl_scan_visit_rootbp(scn, NULL, 1277 &dp->dp_meta_rootbp, tx); 1278 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 1279 if (scn->scn_pausing) 1280 return; 1281 1282 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 1283 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1284 enqueue_cb, tx, DS_FIND_CHILDREN)); 1285 } else { 1286 dsl_scan_visitds(scn, 1287 dp->dp_origin_snap->ds_object, tx); 1288 } 1289 ASSERT(!scn->scn_pausing); 1290 } else if (scn->scn_phys.scn_bookmark.zb_objset != 1291 ZB_DESTROYED_OBJSET) { 1292 /* 1293 * If we were paused, continue from here. Note if the 1294 * ds we were paused on was deleted, the zb_objset may 1295 * be -1, so we will skip this and find a new objset 1296 * below. 1297 */ 1298 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx); 1299 if (scn->scn_pausing) 1300 return; 1301 } 1302 1303 /* 1304 * In case we were paused right at the end of the ds, zero the 1305 * bookmark so we don't think that we're still trying to resume. 1306 */ 1307 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); 1308 1309 /* keep pulling things out of the zap-object-as-queue */ 1310 while (zap_cursor_init(&zc, dp->dp_meta_objset, 1311 scn->scn_phys.scn_queue_obj), 1312 zap_cursor_retrieve(&zc, &za) == 0) { 1313 dsl_dataset_t *ds; 1314 uint64_t dsobj; 1315 1316 dsobj = strtonum(za.za_name, NULL); 1317 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1318 scn->scn_phys.scn_queue_obj, dsobj, tx)); 1319 1320 /* Set up min/max txg */ 1321 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1322 if (za.za_first_integer != 0) { 1323 scn->scn_phys.scn_cur_min_txg = 1324 MAX(scn->scn_phys.scn_min_txg, 1325 za.za_first_integer); 1326 } else { 1327 scn->scn_phys.scn_cur_min_txg = 1328 MAX(scn->scn_phys.scn_min_txg, 1329 ds->ds_phys->ds_prev_snap_txg); 1330 } 1331 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 1332 dsl_dataset_rele(ds, FTAG); 1333 1334 dsl_scan_visitds(scn, dsobj, tx); 1335 zap_cursor_fini(&zc); 1336 if (scn->scn_pausing) 1337 return; 1338 } 1339 zap_cursor_fini(&zc); 1340} 1341 1342static boolean_t 1343dsl_scan_free_should_pause(dsl_scan_t *scn) 1344{ 1345 uint64_t elapsed_nanosecs; 1346 1347 if (zfs_recover) 1348 return (B_FALSE); 1349 1350 if (scn->scn_visited_this_txg >= zfs_free_max_blocks) 1351 return (B_TRUE); 1352 1353 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 1354 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 1355 (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms && 1356 txg_sync_waiting(scn->scn_dp)) || 1357 spa_shutting_down(scn->scn_dp->dp_spa)); 1358} 1359 1360static int 1361dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1362{ 1363 dsl_scan_t *scn = arg; 1364 1365 if (!scn->scn_is_bptree || 1366 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 1367 if (dsl_scan_free_should_pause(scn)) 1368 return (SET_ERROR(ERESTART)); 1369 } 1370 1371 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 1372 dmu_tx_get_txg(tx), bp, BP_GET_PSIZE(bp), 0)); 1373 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 1374 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 1375 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 1376 scn->scn_visited_this_txg++; 1377 return (0); 1378} 1379 1380boolean_t 1381dsl_scan_active(dsl_scan_t *scn) 1382{ 1383 spa_t *spa = scn->scn_dp->dp_spa; 1384 uint64_t used = 0, comp, uncomp; 1385 1386 if (spa->spa_load_state != SPA_LOAD_NONE) 1387 return (B_FALSE); 1388 if (spa_shutting_down(spa)) 1389 return (B_FALSE); 1390 if (scn->scn_phys.scn_state == DSS_SCANNING || 1391 (scn->scn_async_destroying && !scn->scn_async_stalled)) 1392 return (B_TRUE); 1393 1394 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1395 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 1396 &used, &comp, &uncomp); 1397 } 1398 return (used != 0); 1399} 1400 1401void 1402dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 1403{ 1404 dsl_scan_t *scn = dp->dp_scan; 1405 spa_t *spa = dp->dp_spa; 1406 int err = 0; 1407 1408 /* 1409 * Check for scn_restart_txg before checking spa_load_state, so 1410 * that we can restart an old-style scan while the pool is being 1411 * imported (see dsl_scan_init). 1412 */ 1413 if (scn->scn_restart_txg != 0 && 1414 scn->scn_restart_txg <= tx->tx_txg) { 1415 pool_scan_func_t func = POOL_SCAN_SCRUB; 1416 dsl_scan_done(scn, B_FALSE, tx); 1417 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 1418 func = POOL_SCAN_RESILVER; 1419 zfs_dbgmsg("restarting scan func=%u txg=%llu", 1420 func, tx->tx_txg); 1421 dsl_scan_setup_sync(&func, tx); 1422 } 1423 1424 /* 1425 * If the scan is inactive due to a stalled async destroy, try again. 1426 */ 1427 if ((!scn->scn_async_stalled && !dsl_scan_active(scn)) || 1428 spa_sync_pass(dp->dp_spa) > 1) 1429 return; 1430 1431 scn->scn_visited_this_txg = 0; 1432 scn->scn_pausing = B_FALSE; 1433 scn->scn_sync_start_time = gethrtime(); 1434 spa->spa_scrub_active = B_TRUE; 1435 1436 /* 1437 * First process the async destroys. If we pause, don't do 1438 * any scrubbing or resilvering. This ensures that there are no 1439 * async destroys while we are scanning, so the scan code doesn't 1440 * have to worry about traversing it. It is also faster to free the 1441 * blocks than to scrub them. 1442 */ 1443 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1444 scn->scn_is_bptree = B_FALSE; 1445 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1446 NULL, ZIO_FLAG_MUSTSUCCEED); 1447 err = bpobj_iterate(&dp->dp_free_bpobj, 1448 dsl_scan_free_block_cb, scn, tx); 1449 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root)); 1450 1451 if (err != 0 && err != ERESTART) 1452 zfs_panic_recover("error %u from bpobj_iterate()", err); 1453 } 1454 1455 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 1456 ASSERT(scn->scn_async_destroying); 1457 scn->scn_is_bptree = B_TRUE; 1458 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1459 NULL, ZIO_FLAG_MUSTSUCCEED); 1460 err = bptree_iterate(dp->dp_meta_objset, 1461 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 1462 VERIFY0(zio_wait(scn->scn_zio_root)); 1463 1464 if (err == EIO || err == ECKSUM) { 1465 err = 0; 1466 } else if (err != 0 && err != ERESTART) { 1467 zfs_panic_recover("error %u from " 1468 "traverse_dataset_destroyed()", err); 1469 } 1470 1471 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 1472 /* finished; deactivate async destroy feature */ 1473 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 1474 ASSERT(!spa_feature_is_active(spa, 1475 SPA_FEATURE_ASYNC_DESTROY)); 1476 VERIFY0(zap_remove(dp->dp_meta_objset, 1477 DMU_POOL_DIRECTORY_OBJECT, 1478 DMU_POOL_BPTREE_OBJ, tx)); 1479 VERIFY0(bptree_free(dp->dp_meta_objset, 1480 dp->dp_bptree_obj, tx)); 1481 dp->dp_bptree_obj = 0; 1482 scn->scn_async_destroying = B_FALSE; 1483 } else { 1484 /* 1485 * If we didn't make progress, mark the async destroy as 1486 * stalled, so that we will not initiate a spa_sync() on 1487 * its behalf. 1488 */ 1489 scn->scn_async_stalled = 1490 (scn->scn_visited_this_txg == 0); 1491 } 1492 } 1493 if (scn->scn_visited_this_txg) { 1494 zfs_dbgmsg("freed %llu blocks in %llums from " 1495 "free_bpobj/bptree txg %llu; err=%d", 1496 (longlong_t)scn->scn_visited_this_txg, 1497 (longlong_t) 1498 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 1499 (longlong_t)tx->tx_txg, err); 1500 scn->scn_visited_this_txg = 0; 1501 1502 /* 1503 * Write out changes to the DDT that may be required as a 1504 * result of the blocks freed. This ensures that the DDT 1505 * is clean when a scrub/resilver runs. 1506 */ 1507 ddt_sync(spa, tx->tx_txg); 1508 } 1509 if (err != 0) 1510 return; 1511 if (!scn->scn_async_destroying && zfs_free_leak_on_eio && 1512 (dp->dp_free_dir->dd_phys->dd_used_bytes != 0 || 1513 dp->dp_free_dir->dd_phys->dd_compressed_bytes != 0 || 1514 dp->dp_free_dir->dd_phys->dd_uncompressed_bytes != 0)) { 1515 /* 1516 * We have finished background destroying, but there is still 1517 * some space left in the dp_free_dir. Transfer this leaked 1518 * space to the dp_leak_dir. 1519 */ 1520 if (dp->dp_leak_dir == NULL) { 1521 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 1522 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 1523 LEAK_DIR_NAME, tx); 1524 VERIFY0(dsl_pool_open_special_dir(dp, 1525 LEAK_DIR_NAME, &dp->dp_leak_dir)); 1526 rrw_exit(&dp->dp_config_rwlock, FTAG); 1527 } 1528 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 1529 dp->dp_free_dir->dd_phys->dd_used_bytes, 1530 dp->dp_free_dir->dd_phys->dd_compressed_bytes, 1531 dp->dp_free_dir->dd_phys->dd_uncompressed_bytes, tx); 1532 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 1533 -dp->dp_free_dir->dd_phys->dd_used_bytes, 1534 -dp->dp_free_dir->dd_phys->dd_compressed_bytes, 1535 -dp->dp_free_dir->dd_phys->dd_uncompressed_bytes, tx); 1536 } 1537 if (!scn->scn_async_destroying) { 1538 /* finished; verify that space accounting went to zero */ 1539 ASSERT0(dp->dp_free_dir->dd_phys->dd_used_bytes); 1540 ASSERT0(dp->dp_free_dir->dd_phys->dd_compressed_bytes); 1541 ASSERT0(dp->dp_free_dir->dd_phys->dd_uncompressed_bytes); 1542 } 1543 1544 if (scn->scn_phys.scn_state != DSS_SCANNING) 1545 return; 1546 1547 if (scn->scn_done_txg == tx->tx_txg) { 1548 ASSERT(!scn->scn_pausing); 1549 /* finished with scan. */ 1550 zfs_dbgmsg("txg %llu scan complete", tx->tx_txg); 1551 dsl_scan_done(scn, B_TRUE, tx); 1552 ASSERT3U(spa->spa_scrub_inflight, ==, 0); 1553 dsl_scan_sync_state(scn, tx); 1554 return; 1555 } 1556 1557 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1558 scn->scn_phys.scn_ddt_class_max) { 1559 zfs_dbgmsg("doing scan sync txg %llu; " 1560 "ddt bm=%llu/%llu/%llu/%llx", 1561 (longlong_t)tx->tx_txg, 1562 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 1563 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 1564 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 1565 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 1566 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0); 1567 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0); 1568 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0); 1569 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0); 1570 } else { 1571 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu", 1572 (longlong_t)tx->tx_txg, 1573 (longlong_t)scn->scn_phys.scn_bookmark.zb_objset, 1574 (longlong_t)scn->scn_phys.scn_bookmark.zb_object, 1575 (longlong_t)scn->scn_phys.scn_bookmark.zb_level, 1576 (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid); 1577 } 1578 1579 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1580 NULL, ZIO_FLAG_CANFAIL); 1581 dsl_pool_config_enter(dp, FTAG); 1582 dsl_scan_visit(scn, tx); 1583 dsl_pool_config_exit(dp, FTAG); 1584 (void) zio_wait(scn->scn_zio_root); 1585 scn->scn_zio_root = NULL; 1586 1587 zfs_dbgmsg("visited %llu blocks in %llums", 1588 (longlong_t)scn->scn_visited_this_txg, 1589 (longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time)); 1590 1591 if (!scn->scn_pausing) { 1592 scn->scn_done_txg = tx->tx_txg + 1; 1593 zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu", 1594 tx->tx_txg, scn->scn_done_txg); 1595 } 1596 1597 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 1598 mutex_enter(&spa->spa_scrub_lock); 1599 while (spa->spa_scrub_inflight > 0) { 1600 cv_wait(&spa->spa_scrub_io_cv, 1601 &spa->spa_scrub_lock); 1602 } 1603 mutex_exit(&spa->spa_scrub_lock); 1604 } 1605 1606 dsl_scan_sync_state(scn, tx); 1607} 1608 1609/* 1610 * This will start a new scan, or restart an existing one. 1611 */ 1612void 1613dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 1614{ 1615 if (txg == 0) { 1616 dmu_tx_t *tx; 1617 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1618 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1619 1620 txg = dmu_tx_get_txg(tx); 1621 dp->dp_scan->scn_restart_txg = txg; 1622 dmu_tx_commit(tx); 1623 } else { 1624 dp->dp_scan->scn_restart_txg = txg; 1625 } 1626 zfs_dbgmsg("restarting resilver txg=%llu", txg); 1627} 1628 1629boolean_t 1630dsl_scan_resilvering(dsl_pool_t *dp) 1631{ 1632 return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING && 1633 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 1634} 1635 1636/* 1637 * scrub consumers 1638 */ 1639 1640static void 1641count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 1642{ 1643 int i; 1644 1645 /* 1646 * If we resume after a reboot, zab will be NULL; don't record 1647 * incomplete stats in that case. 1648 */ 1649 if (zab == NULL) 1650 return; 1651 1652 for (i = 0; i < 4; i++) { 1653 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 1654 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 1655 if (t & DMU_OT_NEWTYPE) 1656 t = DMU_OT_OTHER; 1657 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 1658 int equal; 1659 1660 zb->zb_count++; 1661 zb->zb_asize += BP_GET_ASIZE(bp); 1662 zb->zb_lsize += BP_GET_LSIZE(bp); 1663 zb->zb_psize += BP_GET_PSIZE(bp); 1664 zb->zb_gangs += BP_COUNT_GANG(bp); 1665 1666 switch (BP_GET_NDVAS(bp)) { 1667 case 2: 1668 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 1669 DVA_GET_VDEV(&bp->blk_dva[1])) 1670 zb->zb_ditto_2_of_2_samevdev++; 1671 break; 1672 case 3: 1673 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 1674 DVA_GET_VDEV(&bp->blk_dva[1])) + 1675 (DVA_GET_VDEV(&bp->blk_dva[0]) == 1676 DVA_GET_VDEV(&bp->blk_dva[2])) + 1677 (DVA_GET_VDEV(&bp->blk_dva[1]) == 1678 DVA_GET_VDEV(&bp->blk_dva[2])); 1679 if (equal == 1) 1680 zb->zb_ditto_2_of_3_samevdev++; 1681 else if (equal == 3) 1682 zb->zb_ditto_3_of_3_samevdev++; 1683 break; 1684 } 1685 } 1686} 1687 1688static void 1689dsl_scan_scrub_done(zio_t *zio) 1690{ 1691 spa_t *spa = zio->io_spa; 1692 1693 zio_data_buf_free(zio->io_data, zio->io_size); 1694 1695 mutex_enter(&spa->spa_scrub_lock); 1696 spa->spa_scrub_inflight--; 1697 cv_broadcast(&spa->spa_scrub_io_cv); 1698 1699 if (zio->io_error && (zio->io_error != ECKSUM || 1700 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 1701 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++; 1702 } 1703 mutex_exit(&spa->spa_scrub_lock); 1704} 1705 1706static int 1707dsl_scan_scrub_cb(dsl_pool_t *dp, 1708 const blkptr_t *bp, const zbookmark_phys_t *zb) 1709{ 1710 dsl_scan_t *scn = dp->dp_scan; 1711 size_t size = BP_GET_PSIZE(bp); 1712 spa_t *spa = dp->dp_spa; 1713 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 1714 boolean_t needs_io; 1715 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 1716 unsigned int scan_delay = 0; 1717 1718 if (phys_birth <= scn->scn_phys.scn_min_txg || 1719 phys_birth >= scn->scn_phys.scn_max_txg) 1720 return (0); 1721 1722 count_block(dp->dp_blkstats, bp); 1723 1724 if (BP_IS_EMBEDDED(bp)) 1725 return (0); 1726 1727 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 1728 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 1729 zio_flags |= ZIO_FLAG_SCRUB; 1730 needs_io = B_TRUE; 1731 scan_delay = zfs_scrub_delay; 1732 } else { 1733 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 1734 zio_flags |= ZIO_FLAG_RESILVER; 1735 needs_io = B_FALSE; 1736 scan_delay = zfs_resilver_delay; 1737 } 1738 1739 /* If it's an intent log block, failure is expected. */ 1740 if (zb->zb_level == ZB_ZIL_LEVEL) 1741 zio_flags |= ZIO_FLAG_SPECULATIVE; 1742 1743 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 1744 vdev_t *vd = vdev_lookup_top(spa, 1745 DVA_GET_VDEV(&bp->blk_dva[d])); 1746 1747 /* 1748 * Keep track of how much data we've examined so that 1749 * zpool(1M) status can make useful progress reports. 1750 */ 1751 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]); 1752 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]); 1753 1754 /* if it's a resilver, this may not be in the target range */ 1755 if (!needs_io) { 1756 if (DVA_GET_GANG(&bp->blk_dva[d])) { 1757 /* 1758 * Gang members may be spread across multiple 1759 * vdevs, so the best estimate we have is the 1760 * scrub range, which has already been checked. 1761 * XXX -- it would be better to change our 1762 * allocation policy to ensure that all 1763 * gang members reside on the same vdev. 1764 */ 1765 needs_io = B_TRUE; 1766 } else { 1767 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL, 1768 phys_birth, 1); 1769 } 1770 } 1771 } 1772 1773 if (needs_io && !zfs_no_scrub_io) { 1774 vdev_t *rvd = spa->spa_root_vdev; 1775 uint64_t maxinflight = rvd->vdev_children * 1776 MAX(zfs_top_maxinflight, 1); 1777 void *data = zio_data_buf_alloc(size); 1778 1779 mutex_enter(&spa->spa_scrub_lock); 1780 while (spa->spa_scrub_inflight >= maxinflight) 1781 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1782 spa->spa_scrub_inflight++; 1783 mutex_exit(&spa->spa_scrub_lock); 1784 1785 /* 1786 * If we're seeing recent (zfs_scan_idle) "important" I/Os 1787 * then throttle our workload to limit the impact of a scan. 1788 */ 1789 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle) 1790 delay(MAX((int)scan_delay, 0)); 1791 1792 zio_nowait(zio_read(NULL, spa, bp, data, size, 1793 dsl_scan_scrub_done, NULL, ZIO_PRIORITY_SCRUB, 1794 zio_flags, zb)); 1795 } 1796 1797 /* do not relocate this block */ 1798 return (0); 1799} 1800 1801int 1802dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 1803{ 1804 spa_t *spa = dp->dp_spa; 1805 1806 /* 1807 * Purge all vdev caches and probe all devices. We do this here 1808 * rather than in sync context because this requires a writer lock 1809 * on the spa_config lock, which we can't do from sync context. The 1810 * spa_scrub_reopen flag indicates that vdev_open() should not 1811 * attempt to start another scrub. 1812 */ 1813 spa_vdev_state_enter(spa, SCL_NONE); 1814 spa->spa_scrub_reopen = B_TRUE; 1815 vdev_reopen(spa->spa_root_vdev); 1816 spa->spa_scrub_reopen = B_FALSE; 1817 (void) spa_vdev_state_exit(spa, NULL, 0); 1818 1819 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 1820 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_NONE)); 1821}
| 467 spa_shutting_down(scn->scn_dp->dp_spa)) { 468 if (zb) { 469 dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n", 470 (longlong_t)zb->zb_objset, 471 (longlong_t)zb->zb_object, 472 (longlong_t)zb->zb_level, 473 (longlong_t)zb->zb_blkid); 474 scn->scn_phys.scn_bookmark = *zb; 475 } 476 dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n", 477 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 478 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 479 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 480 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 481 scn->scn_pausing = B_TRUE; 482 return (B_TRUE); 483 } 484 return (B_FALSE); 485} 486 487typedef struct zil_scan_arg { 488 dsl_pool_t *zsa_dp; 489 zil_header_t *zsa_zh; 490} zil_scan_arg_t; 491 492/* ARGSUSED */ 493static int 494dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 495{ 496 zil_scan_arg_t *zsa = arg; 497 dsl_pool_t *dp = zsa->zsa_dp; 498 dsl_scan_t *scn = dp->dp_scan; 499 zil_header_t *zh = zsa->zsa_zh; 500 zbookmark_phys_t zb; 501 502 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 503 return (0); 504 505 /* 506 * One block ("stubby") can be allocated a long time ago; we 507 * want to visit that one because it has been allocated 508 * (on-disk) even if it hasn't been claimed (even though for 509 * scrub there's nothing to do to it). 510 */ 511 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa)) 512 return (0); 513 514 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 515 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 516 517 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 518 return (0); 519} 520 521/* ARGSUSED */ 522static int 523dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 524{ 525 if (lrc->lrc_txtype == TX_WRITE) { 526 zil_scan_arg_t *zsa = arg; 527 dsl_pool_t *dp = zsa->zsa_dp; 528 dsl_scan_t *scn = dp->dp_scan; 529 zil_header_t *zh = zsa->zsa_zh; 530 lr_write_t *lr = (lr_write_t *)lrc; 531 blkptr_t *bp = &lr->lr_blkptr; 532 zbookmark_phys_t zb; 533 534 if (BP_IS_HOLE(bp) || 535 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 536 return (0); 537 538 /* 539 * birth can be < claim_txg if this record's txg is 540 * already txg sync'ed (but this log block contains 541 * other records that are not synced) 542 */ 543 if (claim_txg == 0 || bp->blk_birth < claim_txg) 544 return (0); 545 546 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 547 lr->lr_foid, ZB_ZIL_LEVEL, 548 lr->lr_offset / BP_GET_LSIZE(bp)); 549 550 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 551 } 552 return (0); 553} 554 555static void 556dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 557{ 558 uint64_t claim_txg = zh->zh_claim_txg; 559 zil_scan_arg_t zsa = { dp, zh }; 560 zilog_t *zilog; 561 562 /* 563 * We only want to visit blocks that have been claimed but not yet 564 * replayed (or, in read-only mode, blocks that *would* be claimed). 565 */ 566 if (claim_txg == 0 && spa_writeable(dp->dp_spa)) 567 return; 568 569 zilog = zil_alloc(dp->dp_meta_objset, zh); 570 571 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 572 claim_txg); 573 574 zil_free(zilog); 575} 576 577/* ARGSUSED */ 578static void 579dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp, 580 uint64_t objset, uint64_t object, uint64_t blkid) 581{ 582 zbookmark_phys_t czb; 583 uint32_t flags = ARC_NOWAIT | ARC_PREFETCH; 584 585 if (zfs_no_scrub_prefetch) 586 return; 587 588 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg || 589 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)) 590 return; 591 592 SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid); 593 594 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp, 595 NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 596 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb); 597} 598 599static boolean_t 600dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 601 const zbookmark_phys_t *zb) 602{ 603 /* 604 * We never skip over user/group accounting objects (obj<0) 605 */ 606 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 607 (int64_t)zb->zb_object >= 0) { 608 /* 609 * If we already visited this bp & everything below (in 610 * a prior txg sync), don't bother doing it again. 611 */ 612 if (zbookmark_is_before(dnp, zb, &scn->scn_phys.scn_bookmark)) 613 return (B_TRUE); 614 615 /* 616 * If we found the block we're trying to resume from, or 617 * we went past it to a different object, zero it out to 618 * indicate that it's OK to start checking for pausing 619 * again. 620 */ 621 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 622 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 623 dprintf("resuming at %llx/%llx/%llx/%llx\n", 624 (longlong_t)zb->zb_objset, 625 (longlong_t)zb->zb_object, 626 (longlong_t)zb->zb_level, 627 (longlong_t)zb->zb_blkid); 628 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 629 } 630 } 631 return (B_FALSE); 632} 633 634/* 635 * Return nonzero on i/o error. 636 * Return new buf to write out in *bufp. 637 */ 638static int 639dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 640 dnode_phys_t *dnp, const blkptr_t *bp, 641 const zbookmark_phys_t *zb, dmu_tx_t *tx) 642{ 643 dsl_pool_t *dp = scn->scn_dp; 644 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 645 int err; 646 647 if (BP_GET_LEVEL(bp) > 0) { 648 uint32_t flags = ARC_WAIT; 649 int i; 650 blkptr_t *cbp; 651 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 652 arc_buf_t *buf; 653 654 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 655 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 656 if (err) { 657 scn->scn_phys.scn_errors++; 658 return (err); 659 } 660 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 661 dsl_scan_prefetch(scn, buf, cbp, zb->zb_objset, 662 zb->zb_object, zb->zb_blkid * epb + i); 663 } 664 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 665 zbookmark_phys_t czb; 666 667 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 668 zb->zb_level - 1, 669 zb->zb_blkid * epb + i); 670 dsl_scan_visitbp(cbp, &czb, dnp, 671 ds, scn, ostype, tx); 672 } 673 (void) arc_buf_remove_ref(buf, &buf); 674 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 675 uint32_t flags = ARC_WAIT; 676 dnode_phys_t *cdnp; 677 int i, j; 678 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 679 arc_buf_t *buf; 680 681 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 682 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 683 if (err) { 684 scn->scn_phys.scn_errors++; 685 return (err); 686 } 687 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 688 for (j = 0; j < cdnp->dn_nblkptr; j++) { 689 blkptr_t *cbp = &cdnp->dn_blkptr[j]; 690 dsl_scan_prefetch(scn, buf, cbp, 691 zb->zb_objset, zb->zb_blkid * epb + i, j); 692 } 693 } 694 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 695 dsl_scan_visitdnode(scn, ds, ostype, 696 cdnp, zb->zb_blkid * epb + i, tx); 697 } 698 699 (void) arc_buf_remove_ref(buf, &buf); 700 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 701 uint32_t flags = ARC_WAIT; 702 objset_phys_t *osp; 703 arc_buf_t *buf; 704 705 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 706 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 707 if (err) { 708 scn->scn_phys.scn_errors++; 709 return (err); 710 } 711 712 osp = buf->b_data; 713 714 dsl_scan_visitdnode(scn, ds, osp->os_type, 715 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); 716 717 if (OBJSET_BUF_HAS_USERUSED(buf)) { 718 /* 719 * We also always visit user/group accounting 720 * objects, and never skip them, even if we are 721 * pausing. This is necessary so that the space 722 * deltas from this txg get integrated. 723 */ 724 dsl_scan_visitdnode(scn, ds, osp->os_type, 725 &osp->os_groupused_dnode, 726 DMU_GROUPUSED_OBJECT, tx); 727 dsl_scan_visitdnode(scn, ds, osp->os_type, 728 &osp->os_userused_dnode, 729 DMU_USERUSED_OBJECT, tx); 730 } 731 (void) arc_buf_remove_ref(buf, &buf); 732 } 733 734 return (0); 735} 736 737static void 738dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 739 dmu_objset_type_t ostype, dnode_phys_t *dnp, 740 uint64_t object, dmu_tx_t *tx) 741{ 742 int j; 743 744 for (j = 0; j < dnp->dn_nblkptr; j++) { 745 zbookmark_phys_t czb; 746 747 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 748 dnp->dn_nlevels - 1, j); 749 dsl_scan_visitbp(&dnp->dn_blkptr[j], 750 &czb, dnp, ds, scn, ostype, tx); 751 } 752 753 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 754 zbookmark_phys_t czb; 755 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 756 0, DMU_SPILL_BLKID); 757 dsl_scan_visitbp(&dnp->dn_spill, 758 &czb, dnp, ds, scn, ostype, tx); 759 } 760} 761 762/* 763 * The arguments are in this order because mdb can only print the 764 * first 5; we want them to be useful. 765 */ 766static void 767dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 768 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 769 dmu_objset_type_t ostype, dmu_tx_t *tx) 770{ 771 dsl_pool_t *dp = scn->scn_dp; 772 arc_buf_t *buf = NULL; 773 blkptr_t bp_toread = *bp; 774 775 /* ASSERT(pbuf == NULL || arc_released(pbuf)); */ 776 777 if (dsl_scan_check_pause(scn, zb)) 778 return; 779 780 if (dsl_scan_check_resume(scn, dnp, zb)) 781 return; 782 783 if (BP_IS_HOLE(bp)) 784 return; 785 786 scn->scn_visited_this_txg++; 787 788 dprintf_bp(bp, 789 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", 790 ds, ds ? ds->ds_object : 0, 791 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 792 bp); 793 794 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 795 return; 796 797 if (dsl_scan_recurse(scn, ds, ostype, dnp, &bp_toread, zb, tx) != 0) 798 return; 799 800 /* 801 * If dsl_scan_ddt() has aready visited this block, it will have 802 * already done any translations or scrubbing, so don't call the 803 * callback again. 804 */ 805 if (ddt_class_contains(dp->dp_spa, 806 scn->scn_phys.scn_ddt_class_max, bp)) { 807 ASSERT(buf == NULL); 808 return; 809 } 810 811 /* 812 * If this block is from the future (after cur_max_txg), then we 813 * are doing this on behalf of a deleted snapshot, and we will 814 * revisit the future block on the next pass of this dataset. 815 * Don't scan it now unless we need to because something 816 * under it was modified. 817 */ 818 if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) { 819 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 820 } 821} 822 823static void 824dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 825 dmu_tx_t *tx) 826{ 827 zbookmark_phys_t zb; 828 829 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 830 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 831 dsl_scan_visitbp(bp, &zb, NULL, 832 ds, scn, DMU_OST_NONE, tx); 833 834 dprintf_ds(ds, "finished scan%s", ""); 835} 836 837void 838dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 839{ 840 dsl_pool_t *dp = ds->ds_dir->dd_pool; 841 dsl_scan_t *scn = dp->dp_scan; 842 uint64_t mintxg; 843 844 if (scn->scn_phys.scn_state != DSS_SCANNING) 845 return; 846 847 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 848 if (dsl_dataset_is_snapshot(ds)) { 849 /* Note, scn_cur_{min,max}_txg stays the same. */ 850 scn->scn_phys.scn_bookmark.zb_objset = 851 ds->ds_phys->ds_next_snap_obj; 852 zfs_dbgmsg("destroying ds %llu; currently traversing; " 853 "reset zb_objset to %llu", 854 (u_longlong_t)ds->ds_object, 855 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 856 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN; 857 } else { 858 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 859 ZB_DESTROYED_OBJSET, 0, 0, 0); 860 zfs_dbgmsg("destroying ds %llu; currently traversing; " 861 "reset bookmark to -1,0,0,0", 862 (u_longlong_t)ds->ds_object); 863 } 864 } else if (zap_lookup_int_key(dp->dp_meta_objset, 865 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 866 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1); 867 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 868 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 869 if (dsl_dataset_is_snapshot(ds)) { 870 /* 871 * We keep the same mintxg; it could be > 872 * ds_creation_txg if the previous snapshot was 873 * deleted too. 874 */ 875 VERIFY(zap_add_int_key(dp->dp_meta_objset, 876 scn->scn_phys.scn_queue_obj, 877 ds->ds_phys->ds_next_snap_obj, mintxg, tx) == 0); 878 zfs_dbgmsg("destroying ds %llu; in queue; " 879 "replacing with %llu", 880 (u_longlong_t)ds->ds_object, 881 (u_longlong_t)ds->ds_phys->ds_next_snap_obj); 882 } else { 883 zfs_dbgmsg("destroying ds %llu; in queue; removing", 884 (u_longlong_t)ds->ds_object); 885 } 886 } else { 887 zfs_dbgmsg("destroying ds %llu; ignoring", 888 (u_longlong_t)ds->ds_object); 889 } 890 891 /* 892 * dsl_scan_sync() should be called after this, and should sync 893 * out our changed state, but just to be safe, do it here. 894 */ 895 dsl_scan_sync_state(scn, tx); 896} 897 898void 899dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 900{ 901 dsl_pool_t *dp = ds->ds_dir->dd_pool; 902 dsl_scan_t *scn = dp->dp_scan; 903 uint64_t mintxg; 904 905 if (scn->scn_phys.scn_state != DSS_SCANNING) 906 return; 907 908 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0); 909 910 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 911 scn->scn_phys.scn_bookmark.zb_objset = 912 ds->ds_phys->ds_prev_snap_obj; 913 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 914 "reset zb_objset to %llu", 915 (u_longlong_t)ds->ds_object, 916 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 917 } else if (zap_lookup_int_key(dp->dp_meta_objset, 918 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 919 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 920 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 921 VERIFY(zap_add_int_key(dp->dp_meta_objset, 922 scn->scn_phys.scn_queue_obj, 923 ds->ds_phys->ds_prev_snap_obj, mintxg, tx) == 0); 924 zfs_dbgmsg("snapshotting ds %llu; in queue; " 925 "replacing with %llu", 926 (u_longlong_t)ds->ds_object, 927 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj); 928 } 929 dsl_scan_sync_state(scn, tx); 930} 931 932void 933dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 934{ 935 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 936 dsl_scan_t *scn = dp->dp_scan; 937 uint64_t mintxg; 938 939 if (scn->scn_phys.scn_state != DSS_SCANNING) 940 return; 941 942 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) { 943 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object; 944 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 945 "reset zb_objset to %llu", 946 (u_longlong_t)ds1->ds_object, 947 (u_longlong_t)ds2->ds_object); 948 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) { 949 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object; 950 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 951 "reset zb_objset to %llu", 952 (u_longlong_t)ds2->ds_object, 953 (u_longlong_t)ds1->ds_object); 954 } 955 956 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 957 ds1->ds_object, &mintxg) == 0) { 958 int err; 959 960 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 961 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 962 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 963 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 964 err = zap_add_int_key(dp->dp_meta_objset, 965 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); 966 VERIFY(err == 0 || err == EEXIST); 967 if (err == EEXIST) { 968 /* Both were there to begin with */ 969 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 970 scn->scn_phys.scn_queue_obj, 971 ds1->ds_object, mintxg, tx)); 972 } 973 zfs_dbgmsg("clone_swap ds %llu; in queue; " 974 "replacing with %llu", 975 (u_longlong_t)ds1->ds_object, 976 (u_longlong_t)ds2->ds_object); 977 } else if (zap_lookup_int_key(dp->dp_meta_objset, 978 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) { 979 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg); 980 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg); 981 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 982 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 983 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 984 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); 985 zfs_dbgmsg("clone_swap ds %llu; in queue; " 986 "replacing with %llu", 987 (u_longlong_t)ds2->ds_object, 988 (u_longlong_t)ds1->ds_object); 989 } 990 991 dsl_scan_sync_state(scn, tx); 992} 993 994struct enqueue_clones_arg { 995 dmu_tx_t *tx; 996 uint64_t originobj; 997}; 998 999/* ARGSUSED */ 1000static int 1001enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1002{ 1003 struct enqueue_clones_arg *eca = arg; 1004 dsl_dataset_t *ds; 1005 int err; 1006 dsl_scan_t *scn = dp->dp_scan; 1007 1008 if (hds->ds_dir->dd_phys->dd_origin_obj != eca->originobj) 1009 return (0); 1010 1011 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1012 if (err) 1013 return (err); 1014 1015 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) { 1016 dsl_dataset_t *prev; 1017 err = dsl_dataset_hold_obj(dp, 1018 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev); 1019 1020 dsl_dataset_rele(ds, FTAG); 1021 if (err) 1022 return (err); 1023 ds = prev; 1024 } 1025 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1026 scn->scn_phys.scn_queue_obj, ds->ds_object, 1027 ds->ds_phys->ds_prev_snap_txg, eca->tx) == 0); 1028 dsl_dataset_rele(ds, FTAG); 1029 return (0); 1030} 1031 1032static void 1033dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 1034{ 1035 dsl_pool_t *dp = scn->scn_dp; 1036 dsl_dataset_t *ds; 1037 objset_t *os; 1038 1039 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1040 1041 if (dmu_objset_from_ds(ds, &os)) 1042 goto out; 1043 1044 /* 1045 * Only the ZIL in the head (non-snapshot) is valid. Even though 1046 * snapshots can have ZIL block pointers (which may be the same 1047 * BP as in the head), they must be ignored. So we traverse the 1048 * ZIL here, rather than in scan_recurse(), because the regular 1049 * snapshot block-sharing rules don't apply to it. 1050 */ 1051 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds)) 1052 dsl_scan_zil(dp, &os->os_zil_header); 1053 1054 /* 1055 * Iterate over the bps in this ds. 1056 */ 1057 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1058 dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx); 1059 1060 char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); 1061 dsl_dataset_name(ds, dsname); 1062 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 1063 "pausing=%u", 1064 (longlong_t)dsobj, dsname, 1065 (longlong_t)scn->scn_phys.scn_cur_min_txg, 1066 (longlong_t)scn->scn_phys.scn_cur_max_txg, 1067 (int)scn->scn_pausing); 1068 kmem_free(dsname, ZFS_MAXNAMELEN); 1069 1070 if (scn->scn_pausing) 1071 goto out; 1072 1073 /* 1074 * We've finished this pass over this dataset. 1075 */ 1076 1077 /* 1078 * If we did not completely visit this dataset, do another pass. 1079 */ 1080 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 1081 zfs_dbgmsg("incomplete pass; visiting again"); 1082 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 1083 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1084 scn->scn_phys.scn_queue_obj, ds->ds_object, 1085 scn->scn_phys.scn_cur_max_txg, tx) == 0); 1086 goto out; 1087 } 1088 1089 /* 1090 * Add descendent datasets to work queue. 1091 */ 1092 if (ds->ds_phys->ds_next_snap_obj != 0) { 1093 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1094 scn->scn_phys.scn_queue_obj, ds->ds_phys->ds_next_snap_obj, 1095 ds->ds_phys->ds_creation_txg, tx) == 0); 1096 } 1097 if (ds->ds_phys->ds_num_children > 1) { 1098 boolean_t usenext = B_FALSE; 1099 if (ds->ds_phys->ds_next_clones_obj != 0) { 1100 uint64_t count; 1101 /* 1102 * A bug in a previous version of the code could 1103 * cause upgrade_clones_cb() to not set 1104 * ds_next_snap_obj when it should, leading to a 1105 * missing entry. Therefore we can only use the 1106 * next_clones_obj when its count is correct. 1107 */ 1108 int err = zap_count(dp->dp_meta_objset, 1109 ds->ds_phys->ds_next_clones_obj, &count); 1110 if (err == 0 && 1111 count == ds->ds_phys->ds_num_children - 1) 1112 usenext = B_TRUE; 1113 } 1114 1115 if (usenext) { 1116 VERIFY0(zap_join_key(dp->dp_meta_objset, 1117 ds->ds_phys->ds_next_clones_obj, 1118 scn->scn_phys.scn_queue_obj, 1119 ds->ds_phys->ds_creation_txg, tx)); 1120 } else { 1121 struct enqueue_clones_arg eca; 1122 eca.tx = tx; 1123 eca.originobj = ds->ds_object; 1124 1125 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1126 enqueue_clones_cb, &eca, DS_FIND_CHILDREN)); 1127 } 1128 } 1129 1130out: 1131 dsl_dataset_rele(ds, FTAG); 1132} 1133 1134/* ARGSUSED */ 1135static int 1136enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1137{ 1138 dmu_tx_t *tx = arg; 1139 dsl_dataset_t *ds; 1140 int err; 1141 dsl_scan_t *scn = dp->dp_scan; 1142 1143 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1144 if (err) 1145 return (err); 1146 1147 while (ds->ds_phys->ds_prev_snap_obj != 0) { 1148 dsl_dataset_t *prev; 1149 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 1150 FTAG, &prev); 1151 if (err) { 1152 dsl_dataset_rele(ds, FTAG); 1153 return (err); 1154 } 1155 1156 /* 1157 * If this is a clone, we don't need to worry about it for now. 1158 */ 1159 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) { 1160 dsl_dataset_rele(ds, FTAG); 1161 dsl_dataset_rele(prev, FTAG); 1162 return (0); 1163 } 1164 dsl_dataset_rele(ds, FTAG); 1165 ds = prev; 1166 } 1167 1168 VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1169 ds->ds_object, ds->ds_phys->ds_prev_snap_txg, tx) == 0); 1170 dsl_dataset_rele(ds, FTAG); 1171 return (0); 1172} 1173 1174/* 1175 * Scrub/dedup interaction. 1176 * 1177 * If there are N references to a deduped block, we don't want to scrub it 1178 * N times -- ideally, we should scrub it exactly once. 1179 * 1180 * We leverage the fact that the dde's replication class (enum ddt_class) 1181 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 1182 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 1183 * 1184 * To prevent excess scrubbing, the scrub begins by walking the DDT 1185 * to find all blocks with refcnt > 1, and scrubs each of these once. 1186 * Since there are two replication classes which contain blocks with 1187 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 1188 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 1189 * 1190 * There would be nothing more to say if a block's refcnt couldn't change 1191 * during a scrub, but of course it can so we must account for changes 1192 * in a block's replication class. 1193 * 1194 * Here's an example of what can occur: 1195 * 1196 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 1197 * when visited during the top-down scrub phase, it will be scrubbed twice. 1198 * This negates our scrub optimization, but is otherwise harmless. 1199 * 1200 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 1201 * on each visit during the top-down scrub phase, it will never be scrubbed. 1202 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 1203 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 1204 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 1205 * while a scrub is in progress, it scrubs the block right then. 1206 */ 1207static void 1208dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 1209{ 1210 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 1211 ddt_entry_t dde = { 0 }; 1212 int error; 1213 uint64_t n = 0; 1214 1215 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 1216 ddt_t *ddt; 1217 1218 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 1219 break; 1220 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 1221 (longlong_t)ddb->ddb_class, 1222 (longlong_t)ddb->ddb_type, 1223 (longlong_t)ddb->ddb_checksum, 1224 (longlong_t)ddb->ddb_cursor); 1225 1226 /* There should be no pending changes to the dedup table */ 1227 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 1228 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 1229 1230 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 1231 n++; 1232 1233 if (dsl_scan_check_pause(scn, NULL)) 1234 break; 1235 } 1236 1237 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u", 1238 (longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max, 1239 (int)scn->scn_pausing); 1240 1241 ASSERT(error == 0 || error == ENOENT); 1242 ASSERT(error != ENOENT || 1243 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 1244} 1245 1246/* ARGSUSED */ 1247void 1248dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 1249 ddt_entry_t *dde, dmu_tx_t *tx) 1250{ 1251 const ddt_key_t *ddk = &dde->dde_key; 1252 ddt_phys_t *ddp = dde->dde_phys; 1253 blkptr_t bp; 1254 zbookmark_phys_t zb = { 0 }; 1255 1256 if (scn->scn_phys.scn_state != DSS_SCANNING) 1257 return; 1258 1259 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1260 if (ddp->ddp_phys_birth == 0 || 1261 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 1262 continue; 1263 ddt_bp_create(checksum, ddk, ddp, &bp); 1264 1265 scn->scn_visited_this_txg++; 1266 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 1267 } 1268} 1269 1270static void 1271dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 1272{ 1273 dsl_pool_t *dp = scn->scn_dp; 1274 zap_cursor_t zc; 1275 zap_attribute_t za; 1276 1277 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1278 scn->scn_phys.scn_ddt_class_max) { 1279 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1280 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1281 dsl_scan_ddt(scn, tx); 1282 if (scn->scn_pausing) 1283 return; 1284 } 1285 1286 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 1287 /* First do the MOS & ORIGIN */ 1288 1289 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1290 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1291 dsl_scan_visit_rootbp(scn, NULL, 1292 &dp->dp_meta_rootbp, tx); 1293 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 1294 if (scn->scn_pausing) 1295 return; 1296 1297 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 1298 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1299 enqueue_cb, tx, DS_FIND_CHILDREN)); 1300 } else { 1301 dsl_scan_visitds(scn, 1302 dp->dp_origin_snap->ds_object, tx); 1303 } 1304 ASSERT(!scn->scn_pausing); 1305 } else if (scn->scn_phys.scn_bookmark.zb_objset != 1306 ZB_DESTROYED_OBJSET) { 1307 /* 1308 * If we were paused, continue from here. Note if the 1309 * ds we were paused on was deleted, the zb_objset may 1310 * be -1, so we will skip this and find a new objset 1311 * below. 1312 */ 1313 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx); 1314 if (scn->scn_pausing) 1315 return; 1316 } 1317 1318 /* 1319 * In case we were paused right at the end of the ds, zero the 1320 * bookmark so we don't think that we're still trying to resume. 1321 */ 1322 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); 1323 1324 /* keep pulling things out of the zap-object-as-queue */ 1325 while (zap_cursor_init(&zc, dp->dp_meta_objset, 1326 scn->scn_phys.scn_queue_obj), 1327 zap_cursor_retrieve(&zc, &za) == 0) { 1328 dsl_dataset_t *ds; 1329 uint64_t dsobj; 1330 1331 dsobj = strtonum(za.za_name, NULL); 1332 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1333 scn->scn_phys.scn_queue_obj, dsobj, tx)); 1334 1335 /* Set up min/max txg */ 1336 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1337 if (za.za_first_integer != 0) { 1338 scn->scn_phys.scn_cur_min_txg = 1339 MAX(scn->scn_phys.scn_min_txg, 1340 za.za_first_integer); 1341 } else { 1342 scn->scn_phys.scn_cur_min_txg = 1343 MAX(scn->scn_phys.scn_min_txg, 1344 ds->ds_phys->ds_prev_snap_txg); 1345 } 1346 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 1347 dsl_dataset_rele(ds, FTAG); 1348 1349 dsl_scan_visitds(scn, dsobj, tx); 1350 zap_cursor_fini(&zc); 1351 if (scn->scn_pausing) 1352 return; 1353 } 1354 zap_cursor_fini(&zc); 1355} 1356 1357static boolean_t 1358dsl_scan_free_should_pause(dsl_scan_t *scn) 1359{ 1360 uint64_t elapsed_nanosecs; 1361 1362 if (zfs_recover) 1363 return (B_FALSE); 1364 1365 if (scn->scn_visited_this_txg >= zfs_free_max_blocks) 1366 return (B_TRUE); 1367 1368 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 1369 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 1370 (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms && 1371 txg_sync_waiting(scn->scn_dp)) || 1372 spa_shutting_down(scn->scn_dp->dp_spa)); 1373} 1374 1375static int 1376dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1377{ 1378 dsl_scan_t *scn = arg; 1379 1380 if (!scn->scn_is_bptree || 1381 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 1382 if (dsl_scan_free_should_pause(scn)) 1383 return (SET_ERROR(ERESTART)); 1384 } 1385 1386 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 1387 dmu_tx_get_txg(tx), bp, BP_GET_PSIZE(bp), 0)); 1388 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 1389 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 1390 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 1391 scn->scn_visited_this_txg++; 1392 return (0); 1393} 1394 1395boolean_t 1396dsl_scan_active(dsl_scan_t *scn) 1397{ 1398 spa_t *spa = scn->scn_dp->dp_spa; 1399 uint64_t used = 0, comp, uncomp; 1400 1401 if (spa->spa_load_state != SPA_LOAD_NONE) 1402 return (B_FALSE); 1403 if (spa_shutting_down(spa)) 1404 return (B_FALSE); 1405 if (scn->scn_phys.scn_state == DSS_SCANNING || 1406 (scn->scn_async_destroying && !scn->scn_async_stalled)) 1407 return (B_TRUE); 1408 1409 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1410 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 1411 &used, &comp, &uncomp); 1412 } 1413 return (used != 0); 1414} 1415 1416void 1417dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 1418{ 1419 dsl_scan_t *scn = dp->dp_scan; 1420 spa_t *spa = dp->dp_spa; 1421 int err = 0; 1422 1423 /* 1424 * Check for scn_restart_txg before checking spa_load_state, so 1425 * that we can restart an old-style scan while the pool is being 1426 * imported (see dsl_scan_init). 1427 */ 1428 if (scn->scn_restart_txg != 0 && 1429 scn->scn_restart_txg <= tx->tx_txg) { 1430 pool_scan_func_t func = POOL_SCAN_SCRUB; 1431 dsl_scan_done(scn, B_FALSE, tx); 1432 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 1433 func = POOL_SCAN_RESILVER; 1434 zfs_dbgmsg("restarting scan func=%u txg=%llu", 1435 func, tx->tx_txg); 1436 dsl_scan_setup_sync(&func, tx); 1437 } 1438 1439 /* 1440 * If the scan is inactive due to a stalled async destroy, try again. 1441 */ 1442 if ((!scn->scn_async_stalled && !dsl_scan_active(scn)) || 1443 spa_sync_pass(dp->dp_spa) > 1) 1444 return; 1445 1446 scn->scn_visited_this_txg = 0; 1447 scn->scn_pausing = B_FALSE; 1448 scn->scn_sync_start_time = gethrtime(); 1449 spa->spa_scrub_active = B_TRUE; 1450 1451 /* 1452 * First process the async destroys. If we pause, don't do 1453 * any scrubbing or resilvering. This ensures that there are no 1454 * async destroys while we are scanning, so the scan code doesn't 1455 * have to worry about traversing it. It is also faster to free the 1456 * blocks than to scrub them. 1457 */ 1458 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1459 scn->scn_is_bptree = B_FALSE; 1460 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1461 NULL, ZIO_FLAG_MUSTSUCCEED); 1462 err = bpobj_iterate(&dp->dp_free_bpobj, 1463 dsl_scan_free_block_cb, scn, tx); 1464 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root)); 1465 1466 if (err != 0 && err != ERESTART) 1467 zfs_panic_recover("error %u from bpobj_iterate()", err); 1468 } 1469 1470 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 1471 ASSERT(scn->scn_async_destroying); 1472 scn->scn_is_bptree = B_TRUE; 1473 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1474 NULL, ZIO_FLAG_MUSTSUCCEED); 1475 err = bptree_iterate(dp->dp_meta_objset, 1476 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 1477 VERIFY0(zio_wait(scn->scn_zio_root)); 1478 1479 if (err == EIO || err == ECKSUM) { 1480 err = 0; 1481 } else if (err != 0 && err != ERESTART) { 1482 zfs_panic_recover("error %u from " 1483 "traverse_dataset_destroyed()", err); 1484 } 1485 1486 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 1487 /* finished; deactivate async destroy feature */ 1488 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 1489 ASSERT(!spa_feature_is_active(spa, 1490 SPA_FEATURE_ASYNC_DESTROY)); 1491 VERIFY0(zap_remove(dp->dp_meta_objset, 1492 DMU_POOL_DIRECTORY_OBJECT, 1493 DMU_POOL_BPTREE_OBJ, tx)); 1494 VERIFY0(bptree_free(dp->dp_meta_objset, 1495 dp->dp_bptree_obj, tx)); 1496 dp->dp_bptree_obj = 0; 1497 scn->scn_async_destroying = B_FALSE; 1498 } else { 1499 /* 1500 * If we didn't make progress, mark the async destroy as 1501 * stalled, so that we will not initiate a spa_sync() on 1502 * its behalf. 1503 */ 1504 scn->scn_async_stalled = 1505 (scn->scn_visited_this_txg == 0); 1506 } 1507 } 1508 if (scn->scn_visited_this_txg) { 1509 zfs_dbgmsg("freed %llu blocks in %llums from " 1510 "free_bpobj/bptree txg %llu; err=%d", 1511 (longlong_t)scn->scn_visited_this_txg, 1512 (longlong_t) 1513 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 1514 (longlong_t)tx->tx_txg, err); 1515 scn->scn_visited_this_txg = 0; 1516 1517 /* 1518 * Write out changes to the DDT that may be required as a 1519 * result of the blocks freed. This ensures that the DDT 1520 * is clean when a scrub/resilver runs. 1521 */ 1522 ddt_sync(spa, tx->tx_txg); 1523 } 1524 if (err != 0) 1525 return; 1526 if (!scn->scn_async_destroying && zfs_free_leak_on_eio && 1527 (dp->dp_free_dir->dd_phys->dd_used_bytes != 0 || 1528 dp->dp_free_dir->dd_phys->dd_compressed_bytes != 0 || 1529 dp->dp_free_dir->dd_phys->dd_uncompressed_bytes != 0)) { 1530 /* 1531 * We have finished background destroying, but there is still 1532 * some space left in the dp_free_dir. Transfer this leaked 1533 * space to the dp_leak_dir. 1534 */ 1535 if (dp->dp_leak_dir == NULL) { 1536 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 1537 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 1538 LEAK_DIR_NAME, tx); 1539 VERIFY0(dsl_pool_open_special_dir(dp, 1540 LEAK_DIR_NAME, &dp->dp_leak_dir)); 1541 rrw_exit(&dp->dp_config_rwlock, FTAG); 1542 } 1543 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 1544 dp->dp_free_dir->dd_phys->dd_used_bytes, 1545 dp->dp_free_dir->dd_phys->dd_compressed_bytes, 1546 dp->dp_free_dir->dd_phys->dd_uncompressed_bytes, tx); 1547 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 1548 -dp->dp_free_dir->dd_phys->dd_used_bytes, 1549 -dp->dp_free_dir->dd_phys->dd_compressed_bytes, 1550 -dp->dp_free_dir->dd_phys->dd_uncompressed_bytes, tx); 1551 } 1552 if (!scn->scn_async_destroying) { 1553 /* finished; verify that space accounting went to zero */ 1554 ASSERT0(dp->dp_free_dir->dd_phys->dd_used_bytes); 1555 ASSERT0(dp->dp_free_dir->dd_phys->dd_compressed_bytes); 1556 ASSERT0(dp->dp_free_dir->dd_phys->dd_uncompressed_bytes); 1557 } 1558 1559 if (scn->scn_phys.scn_state != DSS_SCANNING) 1560 return; 1561 1562 if (scn->scn_done_txg == tx->tx_txg) { 1563 ASSERT(!scn->scn_pausing); 1564 /* finished with scan. */ 1565 zfs_dbgmsg("txg %llu scan complete", tx->tx_txg); 1566 dsl_scan_done(scn, B_TRUE, tx); 1567 ASSERT3U(spa->spa_scrub_inflight, ==, 0); 1568 dsl_scan_sync_state(scn, tx); 1569 return; 1570 } 1571 1572 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1573 scn->scn_phys.scn_ddt_class_max) { 1574 zfs_dbgmsg("doing scan sync txg %llu; " 1575 "ddt bm=%llu/%llu/%llu/%llx", 1576 (longlong_t)tx->tx_txg, 1577 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 1578 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 1579 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 1580 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 1581 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0); 1582 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0); 1583 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0); 1584 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0); 1585 } else { 1586 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu", 1587 (longlong_t)tx->tx_txg, 1588 (longlong_t)scn->scn_phys.scn_bookmark.zb_objset, 1589 (longlong_t)scn->scn_phys.scn_bookmark.zb_object, 1590 (longlong_t)scn->scn_phys.scn_bookmark.zb_level, 1591 (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid); 1592 } 1593 1594 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1595 NULL, ZIO_FLAG_CANFAIL); 1596 dsl_pool_config_enter(dp, FTAG); 1597 dsl_scan_visit(scn, tx); 1598 dsl_pool_config_exit(dp, FTAG); 1599 (void) zio_wait(scn->scn_zio_root); 1600 scn->scn_zio_root = NULL; 1601 1602 zfs_dbgmsg("visited %llu blocks in %llums", 1603 (longlong_t)scn->scn_visited_this_txg, 1604 (longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time)); 1605 1606 if (!scn->scn_pausing) { 1607 scn->scn_done_txg = tx->tx_txg + 1; 1608 zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu", 1609 tx->tx_txg, scn->scn_done_txg); 1610 } 1611 1612 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 1613 mutex_enter(&spa->spa_scrub_lock); 1614 while (spa->spa_scrub_inflight > 0) { 1615 cv_wait(&spa->spa_scrub_io_cv, 1616 &spa->spa_scrub_lock); 1617 } 1618 mutex_exit(&spa->spa_scrub_lock); 1619 } 1620 1621 dsl_scan_sync_state(scn, tx); 1622} 1623 1624/* 1625 * This will start a new scan, or restart an existing one. 1626 */ 1627void 1628dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 1629{ 1630 if (txg == 0) { 1631 dmu_tx_t *tx; 1632 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1633 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1634 1635 txg = dmu_tx_get_txg(tx); 1636 dp->dp_scan->scn_restart_txg = txg; 1637 dmu_tx_commit(tx); 1638 } else { 1639 dp->dp_scan->scn_restart_txg = txg; 1640 } 1641 zfs_dbgmsg("restarting resilver txg=%llu", txg); 1642} 1643 1644boolean_t 1645dsl_scan_resilvering(dsl_pool_t *dp) 1646{ 1647 return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING && 1648 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 1649} 1650 1651/* 1652 * scrub consumers 1653 */ 1654 1655static void 1656count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 1657{ 1658 int i; 1659 1660 /* 1661 * If we resume after a reboot, zab will be NULL; don't record 1662 * incomplete stats in that case. 1663 */ 1664 if (zab == NULL) 1665 return; 1666 1667 for (i = 0; i < 4; i++) { 1668 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 1669 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 1670 if (t & DMU_OT_NEWTYPE) 1671 t = DMU_OT_OTHER; 1672 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 1673 int equal; 1674 1675 zb->zb_count++; 1676 zb->zb_asize += BP_GET_ASIZE(bp); 1677 zb->zb_lsize += BP_GET_LSIZE(bp); 1678 zb->zb_psize += BP_GET_PSIZE(bp); 1679 zb->zb_gangs += BP_COUNT_GANG(bp); 1680 1681 switch (BP_GET_NDVAS(bp)) { 1682 case 2: 1683 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 1684 DVA_GET_VDEV(&bp->blk_dva[1])) 1685 zb->zb_ditto_2_of_2_samevdev++; 1686 break; 1687 case 3: 1688 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 1689 DVA_GET_VDEV(&bp->blk_dva[1])) + 1690 (DVA_GET_VDEV(&bp->blk_dva[0]) == 1691 DVA_GET_VDEV(&bp->blk_dva[2])) + 1692 (DVA_GET_VDEV(&bp->blk_dva[1]) == 1693 DVA_GET_VDEV(&bp->blk_dva[2])); 1694 if (equal == 1) 1695 zb->zb_ditto_2_of_3_samevdev++; 1696 else if (equal == 3) 1697 zb->zb_ditto_3_of_3_samevdev++; 1698 break; 1699 } 1700 } 1701} 1702 1703static void 1704dsl_scan_scrub_done(zio_t *zio) 1705{ 1706 spa_t *spa = zio->io_spa; 1707 1708 zio_data_buf_free(zio->io_data, zio->io_size); 1709 1710 mutex_enter(&spa->spa_scrub_lock); 1711 spa->spa_scrub_inflight--; 1712 cv_broadcast(&spa->spa_scrub_io_cv); 1713 1714 if (zio->io_error && (zio->io_error != ECKSUM || 1715 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 1716 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++; 1717 } 1718 mutex_exit(&spa->spa_scrub_lock); 1719} 1720 1721static int 1722dsl_scan_scrub_cb(dsl_pool_t *dp, 1723 const blkptr_t *bp, const zbookmark_phys_t *zb) 1724{ 1725 dsl_scan_t *scn = dp->dp_scan; 1726 size_t size = BP_GET_PSIZE(bp); 1727 spa_t *spa = dp->dp_spa; 1728 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 1729 boolean_t needs_io; 1730 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 1731 unsigned int scan_delay = 0; 1732 1733 if (phys_birth <= scn->scn_phys.scn_min_txg || 1734 phys_birth >= scn->scn_phys.scn_max_txg) 1735 return (0); 1736 1737 count_block(dp->dp_blkstats, bp); 1738 1739 if (BP_IS_EMBEDDED(bp)) 1740 return (0); 1741 1742 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 1743 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 1744 zio_flags |= ZIO_FLAG_SCRUB; 1745 needs_io = B_TRUE; 1746 scan_delay = zfs_scrub_delay; 1747 } else { 1748 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 1749 zio_flags |= ZIO_FLAG_RESILVER; 1750 needs_io = B_FALSE; 1751 scan_delay = zfs_resilver_delay; 1752 } 1753 1754 /* If it's an intent log block, failure is expected. */ 1755 if (zb->zb_level == ZB_ZIL_LEVEL) 1756 zio_flags |= ZIO_FLAG_SPECULATIVE; 1757 1758 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 1759 vdev_t *vd = vdev_lookup_top(spa, 1760 DVA_GET_VDEV(&bp->blk_dva[d])); 1761 1762 /* 1763 * Keep track of how much data we've examined so that 1764 * zpool(1M) status can make useful progress reports. 1765 */ 1766 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]); 1767 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]); 1768 1769 /* if it's a resilver, this may not be in the target range */ 1770 if (!needs_io) { 1771 if (DVA_GET_GANG(&bp->blk_dva[d])) { 1772 /* 1773 * Gang members may be spread across multiple 1774 * vdevs, so the best estimate we have is the 1775 * scrub range, which has already been checked. 1776 * XXX -- it would be better to change our 1777 * allocation policy to ensure that all 1778 * gang members reside on the same vdev. 1779 */ 1780 needs_io = B_TRUE; 1781 } else { 1782 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL, 1783 phys_birth, 1); 1784 } 1785 } 1786 } 1787 1788 if (needs_io && !zfs_no_scrub_io) { 1789 vdev_t *rvd = spa->spa_root_vdev; 1790 uint64_t maxinflight = rvd->vdev_children * 1791 MAX(zfs_top_maxinflight, 1); 1792 void *data = zio_data_buf_alloc(size); 1793 1794 mutex_enter(&spa->spa_scrub_lock); 1795 while (spa->spa_scrub_inflight >= maxinflight) 1796 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1797 spa->spa_scrub_inflight++; 1798 mutex_exit(&spa->spa_scrub_lock); 1799 1800 /* 1801 * If we're seeing recent (zfs_scan_idle) "important" I/Os 1802 * then throttle our workload to limit the impact of a scan. 1803 */ 1804 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle) 1805 delay(MAX((int)scan_delay, 0)); 1806 1807 zio_nowait(zio_read(NULL, spa, bp, data, size, 1808 dsl_scan_scrub_done, NULL, ZIO_PRIORITY_SCRUB, 1809 zio_flags, zb)); 1810 } 1811 1812 /* do not relocate this block */ 1813 return (0); 1814} 1815 1816int 1817dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 1818{ 1819 spa_t *spa = dp->dp_spa; 1820 1821 /* 1822 * Purge all vdev caches and probe all devices. We do this here 1823 * rather than in sync context because this requires a writer lock 1824 * on the spa_config lock, which we can't do from sync context. The 1825 * spa_scrub_reopen flag indicates that vdev_open() should not 1826 * attempt to start another scrub. 1827 */ 1828 spa_vdev_state_enter(spa, SCL_NONE); 1829 spa->spa_scrub_reopen = B_TRUE; 1830 vdev_reopen(spa->spa_root_vdev); 1831 spa->spa_scrub_reopen = B_FALSE; 1832 (void) spa_vdev_state_exit(spa, NULL, 0); 1833 1834 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 1835 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_NONE)); 1836}
|