1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright 2016 Gary Mills 25 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 26 * Copyright 2017 Joyent, Inc. 27 * Copyright (c) 2017 Datto Inc. 28 */ 29 30#include <sys/dsl_scan.h> 31#include <sys/dsl_pool.h> 32#include <sys/dsl_dataset.h> 33#include <sys/dsl_prop.h> 34#include <sys/dsl_dir.h> 35#include <sys/dsl_synctask.h> 36#include <sys/dnode.h> 37#include <sys/dmu_tx.h> 38#include <sys/dmu_objset.h> 39#include <sys/arc.h> 40#include <sys/zap.h> 41#include <sys/zio.h> 42#include <sys/zfs_context.h> 43#include <sys/fs/zfs.h> 44#include <sys/zfs_znode.h> 45#include <sys/spa_impl.h> 46#include <sys/vdev_impl.h> 47#include <sys/zil_impl.h> 48#include <sys/zio_checksum.h> 49#include <sys/ddt.h> 50#include <sys/sa.h> 51#include <sys/sa_impl.h> 52#include <sys/zfeature.h> 53#include <sys/abd.h> 54#include <sys/range_tree.h> 55#ifdef _KERNEL 56#include <sys/zfs_vfsops.h> 57#endif 58 59/* 60 * Grand theory statement on scan queue sorting 61 * 62 * Scanning is implemented by recursively traversing all indirection levels 63 * in an object and reading all blocks referenced from said objects. This 64 * results in us approximately traversing the object from lowest logical 65 * offset to the highest. For best performance, we would want the logical 66 * blocks to be physically contiguous. However, this is frequently not the 67 * case with pools given the allocation patterns of copy-on-write filesystems. 68 * So instead, we put the I/Os into a reordering queue and issue them in a 69 * way that will most benefit physical disks (LBA-order). 70 * 71 * Queue management: 72 * 73 * Ideally, we would want to scan all metadata and queue up all block I/O 74 * prior to starting to issue it, because that allows us to do an optimal 75 * sorting job. This can however consume large amounts of memory. Therefore 76 * we continuously monitor the size of the queues and constrain them to 5% 77 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this 78 * limit, we clear out a few of the largest extents at the head of the queues 79 * to make room for more scanning. Hopefully, these extents will be fairly 80 * large and contiguous, allowing us to approach sequential I/O throughput 81 * even without a fully sorted tree. 82 * 83 * Metadata scanning takes place in dsl_scan_visit(), which is called from 84 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all 85 * metadata on the pool, or we need to make room in memory because our 86 * queues are too large, dsl_scan_visit() is postponed and 87 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies 88 * that metadata scanning and queued I/O issuing are mutually exclusive. This 89 * allows us to provide maximum sequential I/O throughput for the majority of 90 * I/O's issued since sequential I/O performance is significantly negatively 91 * impacted if it is interleaved with random I/O. 92 * 93 * Implementation Notes 94 * 95 * One side effect of the queued scanning algorithm is that the scanning code 96 * needs to be notified whenever a block is freed. This is needed to allow 97 * the scanning code to remove these I/Os from the issuing queue. Additionally, 98 * we do not attempt to queue gang blocks to be issued sequentially since this 99 * is very hard to do and would have an extremely limitted performance benefit. 100 * Instead, we simply issue gang I/Os as soon as we find them using the legacy 101 * algorithm. 102 * 103 * Backwards compatibility 104 * 105 * This new algorithm is backwards compatible with the legacy on-disk data 106 * structures (and therefore does not require a new feature flag). 107 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan 108 * will stop scanning metadata (in logical order) and wait for all outstanding 109 * sorted I/O to complete. Once this is done, we write out a checkpoint 110 * bookmark, indicating that we have scanned everything logically before it. 111 * If the pool is imported on a machine without the new sorting algorithm, 112 * the scan simply resumes from the last checkpoint using the legacy algorithm. 113 */ 114 115typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 116 const zbookmark_phys_t *); 117 118static scan_cb_t dsl_scan_scrub_cb; 119 120static int scan_ds_queue_compare(const void *a, const void *b); 121static int scan_prefetch_queue_compare(const void *a, const void *b); 122static void scan_ds_queue_clear(dsl_scan_t *scn); 123static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, 124 uint64_t *txg); 125static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg); 126static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); 127static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); 128static uint64_t dsl_scan_count_leaves(vdev_t *vd); 129 130extern int zfs_vdev_async_write_active_min_dirty_percent; 131 132/* 133 * By default zfs will check to ensure it is not over the hard memory 134 * limit before each txg. If finer-grained control of this is needed 135 * this value can be set to 1 to enable checking before scanning each 136 * block. 137 */ 138int zfs_scan_strict_mem_lim = B_FALSE; 139 140/* 141 * Maximum number of parallelly executing I/Os per top-level vdev. 142 * Tune with care. Very high settings (hundreds) are known to trigger 143 * some firmware bugs and resets on certain SSDs. 144 */ 145int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 146unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver -- 2 is a good number */ 147unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub -- 4 is a good number */ 148unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ 149 150/* 151 * Maximum number of parallelly executed bytes per leaf vdev. We attempt 152 * to strike a balance here between keeping the vdev queues full of I/Os 153 * at all times and not overflowing the queues to cause long latency, 154 * which would cause long txg sync times. No matter what, we will not 155 * overload the drives with I/O, since that is protected by 156 * zfs_vdev_scrub_max_active. 157 */ 158unsigned long zfs_scan_vdev_limit = 4 << 20; 159 160int zfs_scan_issue_strategy = 0; 161int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */ 162uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ 163 164unsigned int zfs_scan_checkpoint_intval = 7200; /* seconds */ 165#define ZFS_SCAN_CHECKPOINT_INTVAL SEC_TO_TICK(zfs_scan_checkpoint_intval) 166 167/* 168 * fill_weight is non-tunable at runtime, so we copy it at module init from 169 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would 170 * break queue sorting. 171 */ 172uint64_t zfs_scan_fill_weight = 3; 173static uint64_t fill_weight; 174 175/* See dsl_scan_should_clear() for details on the memory limit tunables */ 176uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ 177uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ 178int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */ 179int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */ 180 181unsigned int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */ 182unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 183unsigned int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */ 184unsigned int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */ 185boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 186boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 187 188SYSCTL_DECL(_vfs_zfs); 189SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RWTUN, 190 &zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev"); 191SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RWTUN, 192 &zfs_resilver_delay, 0, "Number of ticks to delay resilver"); 193SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RWTUN, 194 &zfs_scrub_delay, 0, "Number of ticks to delay scrub"); 195SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RWTUN, 196 &zfs_scan_idle, 0, "Idle scan window in clock ticks"); 197SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RWTUN, 198 &zfs_scrub_min_time_ms, 0, "Min millisecs to scrub per txg"); 199SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RWTUN, 200 &zfs_free_min_time_ms, 0, "Min millisecs to free per txg"); 201SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RWTUN, 202 &zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg"); 203SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RWTUN, 204 &zfs_no_scrub_io, 0, "Disable scrub I/O"); 205SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RWTUN, 206 &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); 207SYSCTL_UINT(_vfs_zfs, OID_AUTO, zfs_scan_legacy, CTLFLAG_RWTUN, 208 &zfs_scan_legacy, 0, "Scrub using legacy non-sequential method"); 209SYSCTL_UINT(_vfs_zfs, OID_AUTO, zfs_scan_checkpoint_interval, CTLFLAG_RWTUN, 210 &zfs_scan_checkpoint_intval, 0, "Scan progress on-disk checkpointing interval"); 211 212enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 213/* max number of blocks to free in a single TXG */ 214uint64_t zfs_async_block_max_blocks = UINT64_MAX; 215SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, free_max_blocks, CTLFLAG_RWTUN, 216 &zfs_async_block_max_blocks, 0, "Maximum number of blocks to free in one TXG"); 217 218/* 219 * We wait a few txgs after importing a pool to begin scanning so that 220 * the import / mounting code isn't held up by scrub / resilver IO. 221 * Unfortunately, it is a bit difficult to determine exactly how long 222 * this will take since userspace will trigger fs mounts asynchronously 223 * and the kernel will create zvol minors asynchronously. As a result, 224 * the value provided here is a bit arbitrary, but represents a 225 * reasonable estimate of how many txgs it will take to finish fully 226 * importing a pool 227 */ 228#define SCAN_IMPORT_WAIT_TXGS 5 229 230 231#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 232 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 233 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 234 235extern int zfs_txg_timeout; 236 237/* 238 * Enable/disable the processing of the free_bpobj object. 239 */ 240boolean_t zfs_free_bpobj_enabled = B_TRUE; 241 242SYSCTL_INT(_vfs_zfs, OID_AUTO, free_bpobj_enabled, CTLFLAG_RWTUN, 243 &zfs_free_bpobj_enabled, 0, "Enable free_bpobj processing"); 244 245/* the order has to match pool_scan_type */ 246static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 247 NULL, 248 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 249 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 250}; 251 252/* In core node for the scn->scn_queue. Represents a dataset to be scanned */ 253typedef struct { 254 uint64_t sds_dsobj; 255 uint64_t sds_txg; 256 avl_node_t sds_node; 257} scan_ds_t; 258 259/* 260 * This controls what conditions are placed on dsl_scan_sync_state(): 261 * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0 262 * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0. 263 * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise 264 * write out the scn_phys_cached version. 265 * See dsl_scan_sync_state for details. 266 */ 267typedef enum { 268 SYNC_OPTIONAL, 269 SYNC_MANDATORY, 270 SYNC_CACHED 271} state_sync_type_t; 272 273/* 274 * This struct represents the minimum information needed to reconstruct a 275 * zio for sequential scanning. This is useful because many of these will 276 * accumulate in the sequential IO queues before being issued, so saving 277 * memory matters here. 278 */ 279typedef struct scan_io { 280 /* fields from blkptr_t */ 281 uint64_t sio_offset; 282 uint64_t sio_blk_prop; 283 uint64_t sio_phys_birth; 284 uint64_t sio_birth; 285 zio_cksum_t sio_cksum; 286 uint32_t sio_asize; 287 288 /* fields from zio_t */ 289 int sio_flags; 290 zbookmark_phys_t sio_zb; 291 292 /* members for queue sorting */ 293 union { 294 avl_node_t sio_addr_node; /* link into issueing queue */ 295 list_node_t sio_list_node; /* link for issuing to disk */ 296 } sio_nodes; 297} scan_io_t; 298 299struct dsl_scan_io_queue { 300 dsl_scan_t *q_scn; /* associated dsl_scan_t */ 301 vdev_t *q_vd; /* top-level vdev that this queue represents */ 302 303 /* trees used for sorting I/Os and extents of I/Os */ 304 range_tree_t *q_exts_by_addr; 305 avl_tree_t q_exts_by_size; 306 avl_tree_t q_sios_by_addr; 307 308 /* members for zio rate limiting */ 309 uint64_t q_maxinflight_bytes; 310 uint64_t q_inflight_bytes; 311 kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */ 312 313 /* per txg statistics */ 314 uint64_t q_total_seg_size_this_txg; 315 uint64_t q_segs_this_txg; 316 uint64_t q_total_zio_size_this_txg; 317 uint64_t q_zios_this_txg; 318}; 319 320/* private data for dsl_scan_prefetch_cb() */ 321typedef struct scan_prefetch_ctx { 322 refcount_t spc_refcnt; /* refcount for memory management */ 323 dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */ 324 boolean_t spc_root; /* is this prefetch for an objset? */ 325 uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */ 326 uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */ 327} scan_prefetch_ctx_t; 328 329/* private data for dsl_scan_prefetch() */ 330typedef struct scan_prefetch_issue_ctx { 331 avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */ 332 scan_prefetch_ctx_t *spic_spc; /* spc for the callback */ 333 blkptr_t spic_bp; /* bp to prefetch */ 334 zbookmark_phys_t spic_zb; /* bookmark to prefetch */ 335} scan_prefetch_issue_ctx_t; 336 337static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 338 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue); 339static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, 340 scan_io_t *sio); 341 342static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd); 343static void scan_io_queues_destroy(dsl_scan_t *scn); 344 345static kmem_cache_t *sio_cache; 346 347void 348scan_init(void) 349{ 350 /* 351 * This is used in ext_size_compare() to weight segments 352 * based on how sparse they are. This cannot be changed 353 * mid-scan and the tree comparison functions don't currently 354 * have a mechansim for passing additional context to the 355 * compare functions. Thus we store this value globally and 356 * we only allow it to be set at module intiailization time 357 */ 358 fill_weight = zfs_scan_fill_weight; 359 360 sio_cache = kmem_cache_create("sio_cache", 361 sizeof (scan_io_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 362} 363 364void 365scan_fini(void) 366{ 367 kmem_cache_destroy(sio_cache); 368} 369 370static inline boolean_t 371dsl_scan_is_running(const dsl_scan_t *scn) 372{ 373 return (scn->scn_phys.scn_state == DSS_SCANNING); 374} 375 376boolean_t 377dsl_scan_resilvering(dsl_pool_t *dp) 378{ 379 return (dsl_scan_is_running(dp->dp_scan) && 380 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 381} 382 383static inline void 384sio2bp(const scan_io_t *sio, blkptr_t *bp, uint64_t vdev_id) 385{ 386 bzero(bp, sizeof (*bp)); 387 DVA_SET_ASIZE(&bp->blk_dva[0], sio->sio_asize); 388 DVA_SET_VDEV(&bp->blk_dva[0], vdev_id); 389 DVA_SET_OFFSET(&bp->blk_dva[0], sio->sio_offset); 390 bp->blk_prop = sio->sio_blk_prop; 391 bp->blk_phys_birth = sio->sio_phys_birth; 392 bp->blk_birth = sio->sio_birth; 393 bp->blk_fill = 1; /* we always only work with data pointers */ 394 bp->blk_cksum = sio->sio_cksum; 395} 396 397static inline void 398bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) 399{ 400 /* we discard the vdev id, since we can deduce it from the queue */ 401 sio->sio_offset = DVA_GET_OFFSET(&bp->blk_dva[dva_i]); 402 sio->sio_asize = DVA_GET_ASIZE(&bp->blk_dva[dva_i]); 403 sio->sio_blk_prop = bp->blk_prop; 404 sio->sio_phys_birth = bp->blk_phys_birth; 405 sio->sio_birth = bp->blk_birth; 406 sio->sio_cksum = bp->blk_cksum; 407} 408 409void 410dsl_scan_global_init(void) 411{ 412 /* 413 * This is used in ext_size_compare() to weight segments 414 * based on how sparse they are. This cannot be changed 415 * mid-scan and the tree comparison functions don't currently 416 * have a mechansim for passing additional context to the 417 * compare functions. Thus we store this value globally and 418 * we only allow it to be set at module intiailization time 419 */ 420 fill_weight = zfs_scan_fill_weight; 421} 422 423int 424dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 425{ 426 int err; 427 dsl_scan_t *scn; 428 spa_t *spa = dp->dp_spa; 429 uint64_t f; 430 431 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 432 scn->scn_dp = dp; 433 434 /* 435 * It's possible that we're resuming a scan after a reboot so 436 * make sure that the scan_async_destroying flag is initialized 437 * appropriately. 438 */ 439 ASSERT(!scn->scn_async_destroying); 440 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 441 SPA_FEATURE_ASYNC_DESTROY); 442 443 /* 444 * Calculate the max number of in-flight bytes for pool-wide 445 * scanning operations (minimum 1MB). Limits for the issuing 446 * phase are done per top-level vdev and are handled separately. 447 */ 448 scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit * 449 dsl_scan_count_leaves(spa->spa_root_vdev), 1ULL << 20); 450 451 avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t), 452 offsetof(scan_ds_t, sds_node)); 453 avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare, 454 sizeof (scan_prefetch_issue_ctx_t), 455 offsetof(scan_prefetch_issue_ctx_t, spic_avl_node)); 456 457 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 458 "scrub_func", sizeof (uint64_t), 1, &f); 459 if (err == 0) { 460 /* 461 * There was an old-style scrub in progress. Restart a 462 * new-style scrub from the beginning. 463 */ 464 scn->scn_restart_txg = txg; 465 zfs_dbgmsg("old-style scrub was in progress; " 466 "restarting new-style scrub in txg %llu", 467 (longlong_t)scn->scn_restart_txg); 468 469 /* 470 * Load the queue obj from the old location so that it 471 * can be freed by dsl_scan_done(). 472 */ 473 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 474 "scrub_queue", sizeof (uint64_t), 1, 475 &scn->scn_phys.scn_queue_obj); 476 } else { 477 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 478 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 479 &scn->scn_phys); 480 if (err == ENOENT) 481 return (0); 482 else if (err) 483 return (err); 484 485 /* 486 * We might be restarting after a reboot, so jump the issued 487 * counter to how far we've scanned. We know we're consistent 488 * up to here. 489 */ 490 scn->scn_issued_before_pass = scn->scn_phys.scn_examined; 491 492 if (dsl_scan_is_running(scn) && 493 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 494 /* 495 * A new-type scrub was in progress on an old 496 * pool, and the pool was accessed by old 497 * software. Restart from the beginning, since 498 * the old software may have changed the pool in 499 * the meantime. 500 */ 501 scn->scn_restart_txg = txg; 502 zfs_dbgmsg("new-style scrub was modified " 503 "by old software; restarting in txg %llu", 504 (longlong_t)scn->scn_restart_txg); 505 } 506 } 507 508 bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); 509 510 /* reload the queue into the in-core state */ 511 if (scn->scn_phys.scn_queue_obj != 0) { 512 zap_cursor_t zc; 513 zap_attribute_t za; 514 515 for (zap_cursor_init(&zc, dp->dp_meta_objset, 516 scn->scn_phys.scn_queue_obj); 517 zap_cursor_retrieve(&zc, &za) == 0; 518 (void) zap_cursor_advance(&zc)) { 519 scan_ds_queue_insert(scn, 520 zfs_strtonum(za.za_name, NULL), 521 za.za_first_integer); 522 } 523 zap_cursor_fini(&zc); 524 } 525 526 spa_scan_stat_init(spa); 527 return (0); 528} 529 530void 531dsl_scan_fini(dsl_pool_t *dp) 532{ 533 if (dp->dp_scan != NULL) { 534 dsl_scan_t *scn = dp->dp_scan; 535 536 if (scn->scn_taskq != NULL) 537 taskq_destroy(scn->scn_taskq); 538 scan_ds_queue_clear(scn); 539 avl_destroy(&scn->scn_queue); 540 avl_destroy(&scn->scn_prefetch_queue); 541 542 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 543 dp->dp_scan = NULL; 544 } 545} 546 547static boolean_t 548dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) 549{ 550 return (scn->scn_restart_txg != 0 && 551 scn->scn_restart_txg <= tx->tx_txg); 552} 553 554boolean_t 555dsl_scan_scrubbing(const dsl_pool_t *dp) 556{ 557 dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys; 558 559 return (scn_phys->scn_state == DSS_SCANNING && 560 scn_phys->scn_func == POOL_SCAN_SCRUB); 561} 562 563boolean_t 564dsl_scan_is_paused_scrub(const dsl_scan_t *scn) 565{ 566 return (dsl_scan_scrubbing(scn->scn_dp) && 567 scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED); 568} 569 570/* 571 * Writes out a persistent dsl_scan_phys_t record to the pool directory. 572 * Because we can be running in the block sorting algorithm, we do not always 573 * want to write out the record, only when it is "safe" to do so. This safety 574 * condition is achieved by making sure that the sorting queues are empty 575 * (scn_bytes_pending == 0). When this condition is not true, the sync'd state 576 * is inconsistent with how much actual scanning progress has been made. The 577 * kind of sync to be performed is specified by the sync_type argument. If the 578 * sync is optional, we only sync if the queues are empty. If the sync is 579 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The 580 * third possible state is a "cached" sync. This is done in response to: 581 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been 582 * destroyed, so we wouldn't be able to restart scanning from it. 583 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been 584 * superseded by a newer snapshot. 585 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been 586 * swapped with its clone. 587 * In all cases, a cached sync simply rewrites the last record we've written, 588 * just slightly modified. For the modifications that are performed to the 589 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed, 590 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped. 591 */ 592static void 593dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) 594{ 595 int i; 596 spa_t *spa = scn->scn_dp->dp_spa; 597 598 ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0); 599 if (scn->scn_bytes_pending == 0) { 600 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 601 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 602 dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue; 603 604 if (q == NULL) 605 continue; 606 607 mutex_enter(&vd->vdev_scan_io_queue_lock); 608 ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); 609 ASSERT3P(avl_first(&q->q_exts_by_size), ==, NULL); 610 ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); 611 mutex_exit(&vd->vdev_scan_io_queue_lock); 612 } 613 614 if (scn->scn_phys.scn_queue_obj != 0) 615 scan_ds_queue_sync(scn, tx); 616 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 617 DMU_POOL_DIRECTORY_OBJECT, 618 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 619 &scn->scn_phys, tx)); 620 bcopy(&scn->scn_phys, &scn->scn_phys_cached, 621 sizeof (scn->scn_phys)); 622 623 if (scn->scn_checkpointing) 624 zfs_dbgmsg("finish scan checkpoint"); 625 626 scn->scn_checkpointing = B_FALSE; 627 scn->scn_last_checkpoint = ddi_get_lbolt(); 628 } else if (sync_type == SYNC_CACHED) { 629 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 630 DMU_POOL_DIRECTORY_OBJECT, 631 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 632 &scn->scn_phys_cached, tx)); 633 } 634} 635 636/* ARGSUSED */ 637static int 638dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 639{ 640 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 641 642 if (dsl_scan_is_running(scn)) 643 return (SET_ERROR(EBUSY)); 644 645 return (0); 646} 647 648static void 649dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 650{ 651 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 652 pool_scan_func_t *funcp = arg; 653 dmu_object_type_t ot = 0; 654 dsl_pool_t *dp = scn->scn_dp; 655 spa_t *spa = dp->dp_spa; 656 657 ASSERT(!dsl_scan_is_running(scn)); 658 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 659 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 660 scn->scn_phys.scn_func = *funcp; 661 scn->scn_phys.scn_state = DSS_SCANNING; 662 scn->scn_phys.scn_min_txg = 0; 663 scn->scn_phys.scn_max_txg = tx->tx_txg; 664 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 665 scn->scn_phys.scn_start_time = gethrestime_sec(); 666 scn->scn_phys.scn_errors = 0; 667 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 668 scn->scn_issued_before_pass = 0; 669 scn->scn_restart_txg = 0; 670 scn->scn_done_txg = 0; 671 scn->scn_last_checkpoint = 0; 672 scn->scn_checkpointing = B_FALSE; 673 spa_scan_stat_init(spa); 674 675 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 676 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 677 678 /* rewrite all disk labels */ 679 vdev_config_dirty(spa->spa_root_vdev); 680 681 if (vdev_resilver_needed(spa->spa_root_vdev, 682 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 683 spa_event_notify(spa, NULL, NULL, 684 ESC_ZFS_RESILVER_START); 685 } else { 686 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); 687 } 688 689 spa->spa_scrub_started = B_TRUE; 690 /* 691 * If this is an incremental scrub, limit the DDT scrub phase 692 * to just the auto-ditto class (for correctness); the rest 693 * of the scrub should go faster using top-down pruning. 694 */ 695 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 696 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 697 698 } 699 700 /* back to the generic stuff */ 701 702 if (dp->dp_blkstats == NULL) { 703 dp->dp_blkstats = 704 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 705 mutex_init(&dp->dp_blkstats->zab_lock, NULL, 706 MUTEX_DEFAULT, NULL); 707 } 708 bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type)); 709 710 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 711 ot = DMU_OT_ZAP_OTHER; 712 713 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 714 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 715 716 bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); 717 718 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 719 720 spa_history_log_internal(spa, "scan setup", tx, 721 "func=%u mintxg=%llu maxtxg=%llu", 722 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 723} 724 725/* 726 * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver. 727 * Can also be called to resume a paused scrub. 728 */ 729int 730dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 731{ 732 spa_t *spa = dp->dp_spa; 733 dsl_scan_t *scn = dp->dp_scan; 734 735 /* 736 * Purge all vdev caches and probe all devices. We do this here 737 * rather than in sync context because this requires a writer lock 738 * on the spa_config lock, which we can't do from sync context. The 739 * spa_scrub_reopen flag indicates that vdev_open() should not 740 * attempt to start another scrub. 741 */ 742 spa_vdev_state_enter(spa, SCL_NONE); 743 spa->spa_scrub_reopen = B_TRUE; 744 vdev_reopen(spa->spa_root_vdev); 745 spa->spa_scrub_reopen = B_FALSE; 746 (void) spa_vdev_state_exit(spa, NULL, 0); 747 748 if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { 749 /* got scrub start cmd, resume paused scrub */ 750 int err = dsl_scrub_set_pause_resume(scn->scn_dp, 751 POOL_SCRUB_NORMAL); 752 if (err == 0) { 753 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); 754 return (ECANCELED); 755 } 756 return (SET_ERROR(err)); 757 } 758 759 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 760 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); 761} 762 763/* ARGSUSED */ 764static void 765dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 766{ 767 static const char *old_names[] = { 768 "scrub_bookmark", 769 "scrub_ddt_bookmark", 770 "scrub_ddt_class_max", 771 "scrub_queue", 772 "scrub_min_txg", 773 "scrub_max_txg", 774 "scrub_func", 775 "scrub_errors", 776 NULL 777 }; 778 779 dsl_pool_t *dp = scn->scn_dp; 780 spa_t *spa = dp->dp_spa; 781 int i; 782 783 /* Remove any remnants of an old-style scrub. */ 784 for (i = 0; old_names[i]; i++) { 785 (void) zap_remove(dp->dp_meta_objset, 786 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 787 } 788 789 if (scn->scn_phys.scn_queue_obj != 0) { 790 VERIFY0(dmu_object_free(dp->dp_meta_objset, 791 scn->scn_phys.scn_queue_obj, tx)); 792 scn->scn_phys.scn_queue_obj = 0; 793 } 794 scan_ds_queue_clear(scn); 795 796 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 797 798 /* 799 * If we were "restarted" from a stopped state, don't bother 800 * with anything else. 801 */ 802 if (!dsl_scan_is_running(scn)) { 803 ASSERT(!scn->scn_is_sorted); 804 return; 805 } 806 807 if (scn->scn_is_sorted) { 808 scan_io_queues_destroy(scn); 809 scn->scn_is_sorted = B_FALSE; 810 811 if (scn->scn_taskq != NULL) { 812 taskq_destroy(scn->scn_taskq); 813 scn->scn_taskq = NULL; 814 } 815 } 816 817 scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED; 818 819 if (dsl_scan_restarting(scn, tx)) 820 spa_history_log_internal(spa, "scan aborted, restarting", tx, 821 "errors=%llu", spa_get_errlog_size(spa)); 822 else if (!complete) 823 spa_history_log_internal(spa, "scan cancelled", tx, 824 "errors=%llu", spa_get_errlog_size(spa)); 825 else 826 spa_history_log_internal(spa, "scan done", tx, 827 "errors=%llu", spa_get_errlog_size(spa)); 828 829 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 830 spa->spa_scrub_started = B_FALSE; 831 spa->spa_scrub_active = B_FALSE; 832 833 /* 834 * If the scrub/resilver completed, update all DTLs to 835 * reflect this. Whether it succeeded or not, vacate 836 * all temporary scrub DTLs. 837 * 838 * As the scrub does not currently support traversing 839 * data that have been freed but are part of a checkpoint, 840 * we don't mark the scrub as done in the DTLs as faults 841 * may still exist in those vdevs. 842 */ 843 if (complete && 844 !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 845 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 846 scn->scn_phys.scn_max_txg, B_TRUE); 847 848 spa_event_notify(spa, NULL, NULL, 849 scn->scn_phys.scn_min_txg ? 850 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 851 } else { 852 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 853 0, B_TRUE); 854 } 855 spa_errlog_rotate(spa); 856 857 /* 858 * We may have finished replacing a device. 859 * Let the async thread assess this and handle the detach. 860 */ 861 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 862 } 863 864 scn->scn_phys.scn_end_time = gethrestime_sec(); 865 866 ASSERT(!dsl_scan_is_running(scn)); 867} 868 869/* ARGSUSED */ 870static int 871dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 872{ 873 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 874 875 if (!dsl_scan_is_running(scn)) 876 return (SET_ERROR(ENOENT)); 877 return (0); 878} 879 880/* ARGSUSED */ 881static void 882dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 883{ 884 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 885 886 dsl_scan_done(scn, B_FALSE, tx); 887 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 888 spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); 889} 890 891int 892dsl_scan_cancel(dsl_pool_t *dp) 893{ 894 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 895 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 896} 897 898static int 899dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) 900{ 901 pool_scrub_cmd_t *cmd = arg; 902 dsl_pool_t *dp = dmu_tx_pool(tx); 903 dsl_scan_t *scn = dp->dp_scan; 904 905 if (*cmd == POOL_SCRUB_PAUSE) { 906 /* can't pause a scrub when there is no in-progress scrub */ 907 if (!dsl_scan_scrubbing(dp)) 908 return (SET_ERROR(ENOENT)); 909 910 /* can't pause a paused scrub */ 911 if (dsl_scan_is_paused_scrub(scn)) 912 return (SET_ERROR(EBUSY)); 913 } else if (*cmd != POOL_SCRUB_NORMAL) { 914 return (SET_ERROR(ENOTSUP)); 915 } 916 917 return (0); 918} 919 920static void 921dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) 922{ 923 pool_scrub_cmd_t *cmd = arg; 924 dsl_pool_t *dp = dmu_tx_pool(tx); 925 spa_t *spa = dp->dp_spa; 926 dsl_scan_t *scn = dp->dp_scan; 927 928 if (*cmd == POOL_SCRUB_PAUSE) { 929 /* can't pause a scrub when there is no in-progress scrub */ 930 spa->spa_scan_pass_scrub_pause = gethrestime_sec(); 931 scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; 932 scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED; 933 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 934 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); 935 } else { 936 ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); 937 if (dsl_scan_is_paused_scrub(scn)) { 938 /* 939 * We need to keep track of how much time we spend 940 * paused per pass so that we can adjust the scrub rate 941 * shown in the output of 'zpool status' 942 */ 943 spa->spa_scan_pass_scrub_spent_paused += 944 gethrestime_sec() - spa->spa_scan_pass_scrub_pause; 945 spa->spa_scan_pass_scrub_pause = 0; 946 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 947 scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED; 948 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 949 } 950 } 951} 952 953/* 954 * Set scrub pause/resume state if it makes sense to do so 955 */ 956int 957dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) 958{ 959 return (dsl_sync_task(spa_name(dp->dp_spa), 960 dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, 961 ZFS_SPACE_CHECK_RESERVED)); 962} 963 964 965/* start a new scan, or restart an existing one. */ 966void 967dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 968{ 969 if (txg == 0) { 970 dmu_tx_t *tx; 971 tx = dmu_tx_create_dd(dp->dp_mos_dir); 972 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 973 974 txg = dmu_tx_get_txg(tx); 975 dp->dp_scan->scn_restart_txg = txg; 976 dmu_tx_commit(tx); 977 } else { 978 dp->dp_scan->scn_restart_txg = txg; 979 } 980 zfs_dbgmsg("restarting resilver txg=%llu", txg); 981} 982 983void 984dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 985{ 986 zio_free(dp->dp_spa, txg, bp); 987} 988 989void 990dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 991{ 992 ASSERT(dsl_pool_sync_context(dp)); 993 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, BP_GET_PSIZE(bpp), 994 pio->io_flags)); 995} 996 997static int 998scan_ds_queue_compare(const void *a, const void *b) 999{ 1000 const scan_ds_t *sds_a = a, *sds_b = b; 1001 1002 if (sds_a->sds_dsobj < sds_b->sds_dsobj) 1003 return (-1); 1004 if (sds_a->sds_dsobj == sds_b->sds_dsobj) 1005 return (0); 1006 return (1); 1007} 1008 1009static void 1010scan_ds_queue_clear(dsl_scan_t *scn) 1011{ 1012 void *cookie = NULL; 1013 scan_ds_t *sds; 1014 while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) { 1015 kmem_free(sds, sizeof (*sds)); 1016 } 1017} 1018 1019static boolean_t 1020scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg) 1021{ 1022 scan_ds_t srch, *sds; 1023 1024 srch.sds_dsobj = dsobj; 1025 sds = avl_find(&scn->scn_queue, &srch, NULL); 1026 if (sds != NULL && txg != NULL) 1027 *txg = sds->sds_txg; 1028 return (sds != NULL); 1029} 1030 1031static void 1032scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg) 1033{ 1034 scan_ds_t *sds; 1035 avl_index_t where; 1036 1037 sds = kmem_zalloc(sizeof (*sds), KM_SLEEP); 1038 sds->sds_dsobj = dsobj; 1039 sds->sds_txg = txg; 1040 1041 VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL); 1042 avl_insert(&scn->scn_queue, sds, where); 1043} 1044 1045static void 1046scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj) 1047{ 1048 scan_ds_t srch, *sds; 1049 1050 srch.sds_dsobj = dsobj; 1051 1052 sds = avl_find(&scn->scn_queue, &srch, NULL); 1053 VERIFY(sds != NULL); 1054 avl_remove(&scn->scn_queue, sds); 1055 kmem_free(sds, sizeof (*sds)); 1056} 1057 1058static void 1059scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx) 1060{ 1061 dsl_pool_t *dp = scn->scn_dp; 1062 spa_t *spa = dp->dp_spa; 1063 dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ? 1064 DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER; 1065 1066 ASSERT0(scn->scn_bytes_pending); 1067 ASSERT(scn->scn_phys.scn_queue_obj != 0); 1068 1069 VERIFY0(dmu_object_free(dp->dp_meta_objset, 1070 scn->scn_phys.scn_queue_obj, tx)); 1071 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot, 1072 DMU_OT_NONE, 0, tx); 1073 for (scan_ds_t *sds = avl_first(&scn->scn_queue); 1074 sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) { 1075 VERIFY0(zap_add_int_key(dp->dp_meta_objset, 1076 scn->scn_phys.scn_queue_obj, sds->sds_dsobj, 1077 sds->sds_txg, tx)); 1078 } 1079} 1080 1081/* 1082 * Computes the memory limit state that we're currently in. A sorted scan 1083 * needs quite a bit of memory to hold the sorting queue, so we need to 1084 * reasonably constrain the size so it doesn't impact overall system 1085 * performance. We compute two limits: 1086 * 1) Hard memory limit: if the amount of memory used by the sorting 1087 * queues on a pool gets above this value, we stop the metadata 1088 * scanning portion and start issuing the queued up and sorted 1089 * I/Os to reduce memory usage. 1090 * This limit is calculated as a fraction of physmem (by default 5%). 1091 * We constrain the lower bound of the hard limit to an absolute 1092 * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain 1093 * the upper bound to 5% of the total pool size - no chance we'll 1094 * ever need that much memory, but just to keep the value in check. 1095 * 2) Soft memory limit: once we hit the hard memory limit, we start 1096 * issuing I/O to reduce queue memory usage, but we don't want to 1097 * completely empty out the queues, since we might be able to find I/Os 1098 * that will fill in the gaps of our non-sequential IOs at some point 1099 * in the future. So we stop the issuing of I/Os once the amount of 1100 * memory used drops below the soft limit (at which point we stop issuing 1101 * I/O and start scanning metadata again). 1102 * 1103 * This limit is calculated by subtracting a fraction of the hard 1104 * limit from the hard limit. By default this fraction is 5%, so 1105 * the soft limit is 95% of the hard limit. We cap the size of the 1106 * difference between the hard and soft limits at an absolute 1107 * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is 1108 * sufficient to not cause too frequent switching between the 1109 * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's 1110 * worth of queues is about 1.2 GiB of on-pool data, so scanning 1111 * that should take at least a decent fraction of a second). 1112 */ 1113static boolean_t 1114dsl_scan_should_clear(dsl_scan_t *scn) 1115{ 1116 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 1117 uint64_t mlim_hard, mlim_soft, mused; 1118 uint64_t alloc = metaslab_class_get_alloc(spa_normal_class( 1119 scn->scn_dp->dp_spa)); 1120 1121 mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE, 1122 zfs_scan_mem_lim_min); 1123 mlim_hard = MIN(mlim_hard, alloc / 20); 1124 mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact, 1125 zfs_scan_mem_lim_soft_max); 1126 mused = 0; 1127 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 1128 vdev_t *tvd = rvd->vdev_child[i]; 1129 dsl_scan_io_queue_t *queue; 1130 1131 mutex_enter(&tvd->vdev_scan_io_queue_lock); 1132 queue = tvd->vdev_scan_io_queue; 1133 if (queue != NULL) { 1134 /* #extents in exts_by_size = # in exts_by_addr */ 1135 mused += avl_numnodes(&queue->q_exts_by_size) * 1136 sizeof (range_seg_t) + 1137 avl_numnodes(&queue->q_sios_by_addr) * 1138 sizeof (scan_io_t); 1139 } 1140 mutex_exit(&tvd->vdev_scan_io_queue_lock); 1141 } 1142 1143 dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused); 1144 1145 if (mused == 0) 1146 ASSERT0(scn->scn_bytes_pending); 1147 1148 /* 1149 * If we are above our hard limit, we need to clear out memory. 1150 * If we are below our soft limit, we need to accumulate sequential IOs. 1151 * Otherwise, we should keep doing whatever we are currently doing. 1152 */ 1153 if (mused >= mlim_hard) 1154 return (B_TRUE); 1155 else if (mused < mlim_soft) 1156 return (B_FALSE); 1157 else 1158 return (scn->scn_clearing); 1159} 1160 1161static boolean_t 1162dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) 1163{ 1164 /* we never skip user/group accounting objects */ 1165 if (zb && (int64_t)zb->zb_object < 0) 1166 return (B_FALSE); 1167 1168 if (scn->scn_suspending) 1169 return (B_TRUE); /* we're already suspending */ 1170 1171 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 1172 return (B_FALSE); /* we're resuming */ 1173 1174 /* We only know how to resume from level-0 blocks. */ 1175 if (zb && zb->zb_level != 0) 1176 return (B_FALSE); 1177 1178 /* 1179 * We suspend if: 1180 * - we have scanned for at least the minimum time (default 1 sec 1181 * for scrub, 3 sec for resilver), and either we have sufficient 1182 * dirty data that we are starting to write more quickly 1183 * (default 30%), or someone is explicitly waiting for this txg 1184 * to complete. 1185 * or 1186 * - the spa is shutting down because this pool is being exported 1187 * or the machine is rebooting. 1188 * or 1189 * - the scan queue has reached its memory use limit 1190 */ 1191 uint64_t elapsed_nanosecs = gethrtime(); 1192 uint64_t curr_time_ns = gethrtime(); 1193 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 1194 uint64_t sync_time_ns = curr_time_ns - 1195 scn->scn_dp->dp_spa->spa_sync_starttime; 1196 1197 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; 1198 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 1199 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 1200 1201 if ((NSEC2MSEC(scan_time_ns) > mintime && 1202 (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || 1203 txg_sync_waiting(scn->scn_dp) || 1204 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 1205 spa_shutting_down(scn->scn_dp->dp_spa) || 1206 (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) { 1207 if (zb) { 1208 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", 1209 (longlong_t)zb->zb_objset, 1210 (longlong_t)zb->zb_object, 1211 (longlong_t)zb->zb_level, 1212 (longlong_t)zb->zb_blkid); 1213 scn->scn_phys.scn_bookmark = *zb; 1214 } else { 1215 dsl_scan_phys_t *scnp = &scn->scn_phys; 1216 1217 dprintf("suspending at at DDT bookmark " 1218 "%llx/%llx/%llx/%llx\n", 1219 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 1220 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 1221 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 1222 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 1223 } 1224 scn->scn_suspending = B_TRUE; 1225 return (B_TRUE); 1226 } 1227 return (B_FALSE); 1228} 1229 1230typedef struct zil_scan_arg { 1231 dsl_pool_t *zsa_dp; 1232 zil_header_t *zsa_zh; 1233} zil_scan_arg_t; 1234 1235/* ARGSUSED */ 1236static int 1237dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 1238{ 1239 zil_scan_arg_t *zsa = arg; 1240 dsl_pool_t *dp = zsa->zsa_dp; 1241 dsl_scan_t *scn = dp->dp_scan; 1242 zil_header_t *zh = zsa->zsa_zh; 1243 zbookmark_phys_t zb; 1244 1245 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 1246 return (0); 1247 1248 /* 1249 * One block ("stubby") can be allocated a long time ago; we 1250 * want to visit that one because it has been allocated 1251 * (on-disk) even if it hasn't been claimed (even though for 1252 * scrub there's nothing to do to it). 1253 */ 1254 if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa)) 1255 return (0); 1256 1257 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1258 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 1259 1260 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1261 return (0); 1262} 1263 1264/* ARGSUSED */ 1265static int 1266dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 1267{ 1268 if (lrc->lrc_txtype == TX_WRITE) { 1269 zil_scan_arg_t *zsa = arg; 1270 dsl_pool_t *dp = zsa->zsa_dp; 1271 dsl_scan_t *scn = dp->dp_scan; 1272 zil_header_t *zh = zsa->zsa_zh; 1273 lr_write_t *lr = (lr_write_t *)lrc; 1274 blkptr_t *bp = &lr->lr_blkptr; 1275 zbookmark_phys_t zb; 1276 1277 if (BP_IS_HOLE(bp) || 1278 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 1279 return (0); 1280 1281 /* 1282 * birth can be < claim_txg if this record's txg is 1283 * already txg sync'ed (but this log block contains 1284 * other records that are not synced) 1285 */ 1286 if (claim_txg == 0 || bp->blk_birth < claim_txg) 1287 return (0); 1288 1289 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1290 lr->lr_foid, ZB_ZIL_LEVEL, 1291 lr->lr_offset / BP_GET_LSIZE(bp)); 1292 1293 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1294 } 1295 return (0); 1296} 1297 1298static void 1299dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 1300{ 1301 uint64_t claim_txg = zh->zh_claim_txg; 1302 zil_scan_arg_t zsa = { dp, zh }; 1303 zilog_t *zilog; 1304 1305 ASSERT(spa_writeable(dp->dp_spa)); 1306 1307 /* 1308 * We only want to visit blocks that have been claimed 1309 * but not yet replayed. 1310 */ 1311 if (claim_txg == 0) 1312 return; 1313 1314 zilog = zil_alloc(dp->dp_meta_objset, zh); 1315 1316 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 1317 claim_txg); 1318 1319 zil_free(zilog); 1320} 1321 1322/* 1323 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea 1324 * here is to sort the AVL tree by the order each block will be needed. 1325 */ 1326static int 1327scan_prefetch_queue_compare(const void *a, const void *b) 1328{ 1329 const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b; 1330 const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc; 1331 const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc; 1332 1333 return (zbookmark_compare(spc_a->spc_datablkszsec, 1334 spc_a->spc_indblkshift, spc_b->spc_datablkszsec, 1335 spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb)); 1336} 1337 1338static void 1339scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag) 1340{ 1341 if (refcount_remove(&spc->spc_refcnt, tag) == 0) { 1342 refcount_destroy(&spc->spc_refcnt); 1343 kmem_free(spc, sizeof (scan_prefetch_ctx_t)); 1344 } 1345} 1346 1347static scan_prefetch_ctx_t * 1348scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag) 1349{ 1350 scan_prefetch_ctx_t *spc; 1351 1352 spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP); 1353 refcount_create(&spc->spc_refcnt); 1354 refcount_add(&spc->spc_refcnt, tag); 1355 spc->spc_scn = scn; 1356 if (dnp != NULL) { 1357 spc->spc_datablkszsec = dnp->dn_datablkszsec; 1358 spc->spc_indblkshift = dnp->dn_indblkshift; 1359 spc->spc_root = B_FALSE; 1360 } else { 1361 spc->spc_datablkszsec = 0; 1362 spc->spc_indblkshift = 0; 1363 spc->spc_root = B_TRUE; 1364 } 1365 1366 return (spc); 1367} 1368 1369static void 1370scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag) 1371{ 1372 refcount_add(&spc->spc_refcnt, tag); 1373} 1374 1375static boolean_t 1376dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc, 1377 const zbookmark_phys_t *zb) 1378{ 1379 zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark; 1380 dnode_phys_t tmp_dnp; 1381 dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp; 1382 1383 if (zb->zb_objset != last_zb->zb_objset) 1384 return (B_TRUE); 1385 if ((int64_t)zb->zb_object < 0) 1386 return (B_FALSE); 1387 1388 tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec; 1389 tmp_dnp.dn_indblkshift = spc->spc_indblkshift; 1390 1391 if (zbookmark_subtree_completed(dnp, zb, last_zb)) 1392 return (B_TRUE); 1393 1394 return (B_FALSE); 1395} 1396 1397static void 1398dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) 1399{ 1400 avl_index_t idx; 1401 dsl_scan_t *scn = spc->spc_scn; 1402 spa_t *spa = scn->scn_dp->dp_spa; 1403 scan_prefetch_issue_ctx_t *spic; 1404 1405 if (zfs_no_scrub_prefetch) 1406 return; 1407 1408 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg || 1409 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && 1410 BP_GET_TYPE(bp) != DMU_OT_OBJSET)) 1411 return; 1412 1413 if (dsl_scan_check_prefetch_resume(spc, zb)) 1414 return; 1415 1416 scan_prefetch_ctx_add_ref(spc, scn); 1417 spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP); 1418 spic->spic_spc = spc; 1419 spic->spic_bp = *bp; 1420 spic->spic_zb = *zb; 1421 1422 /* 1423 * Add the IO to the queue of blocks to prefetch. This allows us to 1424 * prioritize blocks that we will need first for the main traversal 1425 * thread. 1426 */ 1427 mutex_enter(&spa->spa_scrub_lock); 1428 if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) { 1429 /* this block is already queued for prefetch */ 1430 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1431 scan_prefetch_ctx_rele(spc, scn); 1432 mutex_exit(&spa->spa_scrub_lock); 1433 return; 1434 } 1435 1436 avl_insert(&scn->scn_prefetch_queue, spic, idx); 1437 cv_broadcast(&spa->spa_scrub_io_cv); 1438 mutex_exit(&spa->spa_scrub_lock); 1439} 1440 1441static void 1442dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp, 1443 uint64_t objset, uint64_t object) 1444{ 1445 int i; 1446 zbookmark_phys_t zb; 1447 scan_prefetch_ctx_t *spc; 1448 1449 if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 1450 return; 1451 1452 SET_BOOKMARK(&zb, objset, object, 0, 0); 1453 1454 spc = scan_prefetch_ctx_create(scn, dnp, FTAG); 1455 1456 for (i = 0; i < dnp->dn_nblkptr; i++) { 1457 zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]); 1458 zb.zb_blkid = i; 1459 dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb); 1460 } 1461 1462 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1463 zb.zb_level = 0; 1464 zb.zb_blkid = DMU_SPILL_BLKID; 1465 dsl_scan_prefetch(spc, &dnp->dn_spill, &zb); 1466 } 1467 1468 scan_prefetch_ctx_rele(spc, FTAG); 1469} 1470 1471void 1472dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1473 arc_buf_t *buf, void *private) 1474{ 1475 scan_prefetch_ctx_t *spc = private; 1476 dsl_scan_t *scn = spc->spc_scn; 1477 spa_t *spa = scn->scn_dp->dp_spa; 1478 1479 /* broadcast that the IO has completed for rate limitting purposes */ 1480 mutex_enter(&spa->spa_scrub_lock); 1481 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 1482 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 1483 cv_broadcast(&spa->spa_scrub_io_cv); 1484 mutex_exit(&spa->spa_scrub_lock); 1485 1486 /* if there was an error or we are done prefetching, just cleanup */ 1487 if (buf == NULL || scn->scn_suspending) 1488 goto out; 1489 1490 if (BP_GET_LEVEL(bp) > 0) { 1491 int i; 1492 blkptr_t *cbp; 1493 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1494 zbookmark_phys_t czb; 1495 1496 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 1497 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1498 zb->zb_level - 1, zb->zb_blkid * epb + i); 1499 dsl_scan_prefetch(spc, cbp, &czb); 1500 } 1501 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 1502 dnode_phys_t *cdnp = buf->b_data; 1503 int i; 1504 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 1505 1506 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 1507 dsl_scan_prefetch_dnode(scn, cdnp, 1508 zb->zb_objset, zb->zb_blkid * epb + i); 1509 } 1510 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 1511 objset_phys_t *osp = buf->b_data; 1512 1513 dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode, 1514 zb->zb_objset, DMU_META_DNODE_OBJECT); 1515 1516 if (OBJSET_BUF_HAS_USERUSED(buf)) { 1517 dsl_scan_prefetch_dnode(scn, 1518 &osp->os_groupused_dnode, zb->zb_objset, 1519 DMU_GROUPUSED_OBJECT); 1520 dsl_scan_prefetch_dnode(scn, 1521 &osp->os_userused_dnode, zb->zb_objset, 1522 DMU_USERUSED_OBJECT); 1523 } 1524 } 1525 1526out: 1527 if (buf != NULL) 1528 arc_buf_destroy(buf, private); 1529 scan_prefetch_ctx_rele(spc, scn); 1530} 1531 1532/* ARGSUSED */ 1533static void 1534dsl_scan_prefetch_thread(void *arg) 1535{ 1536 dsl_scan_t *scn = arg; 1537 spa_t *spa = scn->scn_dp->dp_spa; 1538 vdev_t *rvd = spa->spa_root_vdev; 1539 uint64_t maxinflight = rvd->vdev_children * zfs_top_maxinflight; 1540 scan_prefetch_issue_ctx_t *spic; 1541 1542 /* loop until we are told to stop */ 1543 while (!scn->scn_prefetch_stop) { 1544 arc_flags_t flags = ARC_FLAG_NOWAIT | 1545 ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH; 1546 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 1547 1548 mutex_enter(&spa->spa_scrub_lock); 1549 1550 /* 1551 * Wait until we have an IO to issue and are not above our 1552 * maximum in flight limit. 1553 */ 1554 while (!scn->scn_prefetch_stop && 1555 (avl_numnodes(&scn->scn_prefetch_queue) == 0 || 1556 spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) { 1557 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1558 } 1559 1560 /* recheck if we should stop since we waited for the cv */ 1561 if (scn->scn_prefetch_stop) { 1562 mutex_exit(&spa->spa_scrub_lock); 1563 break; 1564 } 1565 1566 /* remove the prefetch IO from the tree */ 1567 spic = avl_first(&scn->scn_prefetch_queue); 1568 spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp); 1569 avl_remove(&scn->scn_prefetch_queue, spic); 1570 1571 mutex_exit(&spa->spa_scrub_lock); 1572 1573 /* issue the prefetch asynchronously */ 1574 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, 1575 &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc, 1576 ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb); 1577 1578 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1579 } 1580 1581 ASSERT(scn->scn_prefetch_stop); 1582 1583 /* free any prefetches we didn't get to complete */ 1584 mutex_enter(&spa->spa_scrub_lock); 1585 while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) { 1586 avl_remove(&scn->scn_prefetch_queue, spic); 1587 scan_prefetch_ctx_rele(spic->spic_spc, scn); 1588 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1589 } 1590 ASSERT0(avl_numnodes(&scn->scn_prefetch_queue)); 1591 mutex_exit(&spa->spa_scrub_lock); 1592} 1593 1594static boolean_t 1595dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 1596 const zbookmark_phys_t *zb) 1597{ 1598 /* 1599 * We never skip over user/group accounting objects (obj<0) 1600 */ 1601 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 1602 (int64_t)zb->zb_object >= 0) { 1603 /* 1604 * If we already visited this bp & everything below (in 1605 * a prior txg sync), don't bother doing it again. 1606 */ 1607 if (zbookmark_subtree_completed(dnp, zb, 1608 &scn->scn_phys.scn_bookmark)) 1609 return (B_TRUE); 1610 1611 /* 1612 * If we found the block we're trying to resume from, or 1613 * we went past it to a different object, zero it out to 1614 * indicate that it's OK to start checking for suspending 1615 * again. 1616 */ 1617 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 1618 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 1619 dprintf("resuming at %llx/%llx/%llx/%llx\n", 1620 (longlong_t)zb->zb_objset, 1621 (longlong_t)zb->zb_object, 1622 (longlong_t)zb->zb_level, 1623 (longlong_t)zb->zb_blkid); 1624 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 1625 } 1626 } 1627 return (B_FALSE); 1628} 1629 1630static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 1631 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 1632 dmu_objset_type_t ostype, dmu_tx_t *tx); 1633static void dsl_scan_visitdnode( 1634 dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype, 1635 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); 1636 1637/* 1638 * Return nonzero on i/o error. 1639 * Return new buf to write out in *bufp. 1640 */ 1641static int 1642dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 1643 dnode_phys_t *dnp, const blkptr_t *bp, 1644 const zbookmark_phys_t *zb, dmu_tx_t *tx) 1645{ 1646 dsl_pool_t *dp = scn->scn_dp; 1647 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 1648 int err; 1649 1650 if (BP_GET_LEVEL(bp) > 0) { 1651 arc_flags_t flags = ARC_FLAG_WAIT; 1652 int i; 1653 blkptr_t *cbp; 1654 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1655 arc_buf_t *buf; 1656 1657 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 1658 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1659 if (err) { 1660 scn->scn_phys.scn_errors++; 1661 return (err); 1662 } 1663 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 1664 zbookmark_phys_t czb; 1665 1666 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1667 zb->zb_level - 1, 1668 zb->zb_blkid * epb + i); 1669 dsl_scan_visitbp(cbp, &czb, dnp, 1670 ds, scn, ostype, tx); 1671 } 1672 arc_buf_destroy(buf, &buf); 1673 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 1674 arc_flags_t flags = ARC_FLAG_WAIT; 1675 dnode_phys_t *cdnp; 1676 int i; 1677 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 1678 arc_buf_t *buf; 1679 1680 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 1681 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1682 if (err) { 1683 scn->scn_phys.scn_errors++; 1684 return (err); 1685 } 1686 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 1687 dsl_scan_visitdnode(scn, ds, ostype, 1688 cdnp, zb->zb_blkid * epb + i, tx); 1689 } 1690 1691 arc_buf_destroy(buf, &buf); 1692 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 1693 arc_flags_t flags = ARC_FLAG_WAIT; 1694 objset_phys_t *osp; 1695 arc_buf_t *buf; 1696 1697 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 1698 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1699 if (err) { 1700 scn->scn_phys.scn_errors++; 1701 return (err); 1702 } 1703 1704 osp = buf->b_data; 1705 1706 dsl_scan_visitdnode(scn, ds, osp->os_type, 1707 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); 1708 1709 if (OBJSET_BUF_HAS_USERUSED(buf)) { 1710 /* 1711 * We also always visit user/group accounting 1712 * objects, and never skip them, even if we are 1713 * suspending. This is necessary so that the space 1714 * deltas from this txg get integrated. 1715 */ 1716 dsl_scan_visitdnode(scn, ds, osp->os_type, 1717 &osp->os_groupused_dnode, 1718 DMU_GROUPUSED_OBJECT, tx); 1719 dsl_scan_visitdnode(scn, ds, osp->os_type, 1720 &osp->os_userused_dnode, 1721 DMU_USERUSED_OBJECT, tx); 1722 } 1723 arc_buf_destroy(buf, &buf); 1724 } 1725 1726 return (0); 1727} 1728 1729static void 1730dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 1731 dmu_objset_type_t ostype, dnode_phys_t *dnp, 1732 uint64_t object, dmu_tx_t *tx) 1733{ 1734 int j; 1735 1736 for (j = 0; j < dnp->dn_nblkptr; j++) { 1737 zbookmark_phys_t czb; 1738 1739 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 1740 dnp->dn_nlevels - 1, j); 1741 dsl_scan_visitbp(&dnp->dn_blkptr[j], 1742 &czb, dnp, ds, scn, ostype, tx); 1743 } 1744 1745 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1746 zbookmark_phys_t czb; 1747 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 1748 0, DMU_SPILL_BLKID); 1749 dsl_scan_visitbp(&dnp->dn_spill, 1750 &czb, dnp, ds, scn, ostype, tx); 1751 } 1752} 1753 1754/* 1755 * The arguments are in this order because mdb can only print the 1756 * first 5; we want them to be useful. 1757 */ 1758static void 1759dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 1760 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 1761 dmu_objset_type_t ostype, dmu_tx_t *tx) 1762{ 1763 dsl_pool_t *dp = scn->scn_dp; 1764 blkptr_t *bp_toread = NULL; 1765 1766 if (dsl_scan_check_suspend(scn, zb)) 1767 return; 1768 1769 if (dsl_scan_check_resume(scn, dnp, zb)) 1770 return; 1771 1772 scn->scn_visited_this_txg++; 1773 1774 dprintf_bp(bp, 1775 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", 1776 ds, ds ? ds->ds_object : 0, 1777 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 1778 bp); 1779 1780 if (BP_IS_HOLE(bp)) { 1781 scn->scn_holes_this_txg++; 1782 return; 1783 } 1784 1785 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) { 1786 scn->scn_lt_min_this_txg++; 1787 return; 1788 } 1789 1790 bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); 1791 *bp_toread = *bp; 1792 1793 if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0) 1794 goto out; 1795 1796 /* 1797 * If dsl_scan_ddt() has already visited this block, it will have 1798 * already done any translations or scrubbing, so don't call the 1799 * callback again. 1800 */ 1801 if (ddt_class_contains(dp->dp_spa, 1802 scn->scn_phys.scn_ddt_class_max, bp)) { 1803 scn->scn_ddt_contained_this_txg++; 1804 goto out; 1805 } 1806 1807 /* 1808 * If this block is from the future (after cur_max_txg), then we 1809 * are doing this on behalf of a deleted snapshot, and we will 1810 * revisit the future block on the next pass of this dataset. 1811 * Don't scan it now unless we need to because something 1812 * under it was modified. 1813 */ 1814 if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { 1815 scn->scn_gt_max_this_txg++; 1816 goto out; 1817 } 1818 1819 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 1820out: 1821 kmem_free(bp_toread, sizeof (blkptr_t)); 1822} 1823 1824static void 1825dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 1826 dmu_tx_t *tx) 1827{ 1828 zbookmark_phys_t zb; 1829 scan_prefetch_ctx_t *spc; 1830 1831 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 1832 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 1833 1834 if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) { 1835 SET_BOOKMARK(&scn->scn_prefetch_bookmark, 1836 zb.zb_objset, 0, 0, 0); 1837 } else { 1838 scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark; 1839 } 1840 1841 scn->scn_objsets_visited_this_txg++; 1842 1843 spc = scan_prefetch_ctx_create(scn, NULL, FTAG); 1844 dsl_scan_prefetch(spc, bp, &zb); 1845 scan_prefetch_ctx_rele(spc, FTAG); 1846 1847 dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx); 1848 1849 dprintf_ds(ds, "finished scan%s", ""); 1850} 1851 1852static void 1853ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys) 1854{ 1855 if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) { 1856 if (ds->ds_is_snapshot) { 1857 /* 1858 * Note: 1859 * - scn_cur_{min,max}_txg stays the same. 1860 * - Setting the flag is not really necessary if 1861 * scn_cur_max_txg == scn_max_txg, because there 1862 * is nothing after this snapshot that we care 1863 * about. However, we set it anyway and then 1864 * ignore it when we retraverse it in 1865 * dsl_scan_visitds(). 1866 */ 1867 scn_phys->scn_bookmark.zb_objset = 1868 dsl_dataset_phys(ds)->ds_next_snap_obj; 1869 zfs_dbgmsg("destroying ds %llu; currently traversing; " 1870 "reset zb_objset to %llu", 1871 (u_longlong_t)ds->ds_object, 1872 (u_longlong_t)dsl_dataset_phys(ds)-> 1873 ds_next_snap_obj); 1874 scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN; 1875 } else { 1876 SET_BOOKMARK(&scn_phys->scn_bookmark, 1877 ZB_DESTROYED_OBJSET, 0, 0, 0); 1878 zfs_dbgmsg("destroying ds %llu; currently traversing; " 1879 "reset bookmark to -1,0,0,0", 1880 (u_longlong_t)ds->ds_object); 1881 } 1882 } 1883} 1884 1885/* 1886 * Invoked when a dataset is destroyed. We need to make sure that: 1887 * 1888 * 1) If it is the dataset that was currently being scanned, we write 1889 * a new dsl_scan_phys_t and marking the objset reference in it 1890 * as destroyed. 1891 * 2) Remove it from the work queue, if it was present. 1892 * 1893 * If the dataset was actually a snapshot, instead of marking the dataset 1894 * as destroyed, we instead substitute the next snapshot in line. 1895 */ 1896void 1897dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 1898{ 1899 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1900 dsl_scan_t *scn = dp->dp_scan; 1901 uint64_t mintxg; 1902 1903 if (!dsl_scan_is_running(scn)) 1904 return; 1905 1906 ds_destroyed_scn_phys(ds, &scn->scn_phys); 1907 ds_destroyed_scn_phys(ds, &scn->scn_phys_cached); 1908 1909 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 1910 scan_ds_queue_remove(scn, ds->ds_object); 1911 if (ds->ds_is_snapshot) 1912 scan_ds_queue_insert(scn, 1913 dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg); 1914 } 1915 1916 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1917 ds->ds_object, &mintxg) == 0) { 1918 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); 1919 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1920 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 1921 if (ds->ds_is_snapshot) { 1922 /* 1923 * We keep the same mintxg; it could be > 1924 * ds_creation_txg if the previous snapshot was 1925 * deleted too. 1926 */ 1927 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1928 scn->scn_phys.scn_queue_obj, 1929 dsl_dataset_phys(ds)->ds_next_snap_obj, 1930 mintxg, tx) == 0); 1931 zfs_dbgmsg("destroying ds %llu; in queue; " 1932 "replacing with %llu", 1933 (u_longlong_t)ds->ds_object, 1934 (u_longlong_t)dsl_dataset_phys(ds)-> 1935 ds_next_snap_obj); 1936 } else { 1937 zfs_dbgmsg("destroying ds %llu; in queue; removing", 1938 (u_longlong_t)ds->ds_object); 1939 } 1940 } 1941 1942 /* 1943 * dsl_scan_sync() should be called after this, and should sync 1944 * out our changed state, but just to be safe, do it here. 1945 */ 1946 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 1947} 1948 1949static void 1950ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark) 1951{ 1952 if (scn_bookmark->zb_objset == ds->ds_object) { 1953 scn_bookmark->zb_objset = 1954 dsl_dataset_phys(ds)->ds_prev_snap_obj; 1955 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 1956 "reset zb_objset to %llu", 1957 (u_longlong_t)ds->ds_object, 1958 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 1959 } 1960} 1961 1962/* 1963 * Called when a dataset is snapshotted. If we were currently traversing 1964 * this snapshot, we reset our bookmark to point at the newly created 1965 * snapshot. We also modify our work queue to remove the old snapshot and 1966 * replace with the new one. 1967 */ 1968void 1969dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 1970{ 1971 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1972 dsl_scan_t *scn = dp->dp_scan; 1973 uint64_t mintxg; 1974 1975 if (!dsl_scan_is_running(scn)) 1976 return; 1977 1978 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); 1979 1980 ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark); 1981 ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark); 1982 1983 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 1984 scan_ds_queue_remove(scn, ds->ds_object); 1985 scan_ds_queue_insert(scn, 1986 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg); 1987 } 1988 1989 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1990 ds->ds_object, &mintxg) == 0) { 1991 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1992 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 1993 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1994 scn->scn_phys.scn_queue_obj, 1995 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); 1996 zfs_dbgmsg("snapshotting ds %llu; in queue; " 1997 "replacing with %llu", 1998 (u_longlong_t)ds->ds_object, 1999 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 2000 } 2001 2002 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2003} 2004 2005static void 2006ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2, 2007 zbookmark_phys_t *scn_bookmark) 2008{ 2009 if (scn_bookmark->zb_objset == ds1->ds_object) { 2010 scn_bookmark->zb_objset = ds2->ds_object; 2011 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 2012 "reset zb_objset to %llu", 2013 (u_longlong_t)ds1->ds_object, 2014 (u_longlong_t)ds2->ds_object); 2015 } else if (scn_bookmark->zb_objset == ds2->ds_object) { 2016 scn_bookmark->zb_objset = ds1->ds_object; 2017 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 2018 "reset zb_objset to %llu", 2019 (u_longlong_t)ds2->ds_object, 2020 (u_longlong_t)ds1->ds_object); 2021 } 2022} 2023 2024/* 2025 * Called when an origin dataset and its clone are swapped. If we were 2026 * currently traversing the dataset, we need to switch to traversing the 2027 * newly promoted clone. 2028 */ 2029void 2030dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 2031{ 2032 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 2033 dsl_scan_t *scn = dp->dp_scan; 2034 uint64_t mintxg1, mintxg2; 2035 boolean_t ds1_queued, ds2_queued; 2036 2037 if (!dsl_scan_is_running(scn)) 2038 return; 2039 2040 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark); 2041 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark); 2042 2043 /* 2044 * Handle the in-memory scan queue. 2045 */ 2046 ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1); 2047 ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2); 2048 2049 /* Sanity checking. */ 2050 if (ds1_queued) { 2051 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2052 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2053 } 2054 if (ds2_queued) { 2055 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2056 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2057 } 2058 2059 if (ds1_queued && ds2_queued) { 2060 /* 2061 * If both are queued, we don't need to do anything. 2062 * The swapping code below would not handle this case correctly, 2063 * since we can't insert ds2 if it is already there. That's 2064 * because scan_ds_queue_insert() prohibits a duplicate insert 2065 * and panics. 2066 */ 2067 } else if (ds1_queued) { 2068 scan_ds_queue_remove(scn, ds1->ds_object); 2069 scan_ds_queue_insert(scn, ds2->ds_object, mintxg1); 2070 } else if (ds2_queued) { 2071 scan_ds_queue_remove(scn, ds2->ds_object); 2072 scan_ds_queue_insert(scn, ds1->ds_object, mintxg2); 2073 } 2074 2075 /* 2076 * Handle the on-disk scan queue. 2077 * The on-disk state is an out-of-date version of the in-memory state, 2078 * so the in-memory and on-disk values for ds1_queued and ds2_queued may 2079 * be different. Therefore we need to apply the swap logic to the 2080 * on-disk state independently of the in-memory state. 2081 */ 2082 ds1_queued = zap_lookup_int_key(dp->dp_meta_objset, 2083 scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0; 2084 ds2_queued = zap_lookup_int_key(dp->dp_meta_objset, 2085 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0; 2086 2087 /* Sanity checking. */ 2088 if (ds1_queued) { 2089 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2090 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2091 } 2092 if (ds2_queued) { 2093 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2094 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2095 } 2096 2097 if (ds1_queued && ds2_queued) { 2098 /* 2099 * If both are queued, we don't need to do anything. 2100 * Alternatively, we could check for EEXIST from 2101 * zap_add_int_key() and back out to the original state, but 2102 * that would be more work than checking for this case upfront. 2103 */ 2104 } else if (ds1_queued) { 2105 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, 2106 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 2107 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, 2108 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx)); 2109 zfs_dbgmsg("clone_swap ds %llu; in queue; " 2110 "replacing with %llu", 2111 (u_longlong_t)ds1->ds_object, 2112 (u_longlong_t)ds2->ds_object); 2113 } else if (ds2_queued) { 2114 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, 2115 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 2116 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, 2117 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx)); 2118 zfs_dbgmsg("clone_swap ds %llu; in queue; " 2119 "replacing with %llu", 2120 (u_longlong_t)ds2->ds_object, 2121 (u_longlong_t)ds1->ds_object); 2122 } 2123 2124 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2125} 2126 2127/* ARGSUSED */ 2128static int 2129enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2130{ 2131 uint64_t originobj = *(uint64_t *)arg; 2132 dsl_dataset_t *ds; 2133 int err; 2134 dsl_scan_t *scn = dp->dp_scan; 2135 2136 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj) 2137 return (0); 2138 2139 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2140 if (err) 2141 return (err); 2142 2143 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) { 2144 dsl_dataset_t *prev; 2145 err = dsl_dataset_hold_obj(dp, 2146 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2147 2148 dsl_dataset_rele(ds, FTAG); 2149 if (err) 2150 return (err); 2151 ds = prev; 2152 } 2153 scan_ds_queue_insert(scn, ds->ds_object, 2154 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2155 dsl_dataset_rele(ds, FTAG); 2156 return (0); 2157} 2158 2159static void 2160dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 2161{ 2162 dsl_pool_t *dp = scn->scn_dp; 2163 dsl_dataset_t *ds; 2164 2165 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 2166 2167 if (scn->scn_phys.scn_cur_min_txg >= 2168 scn->scn_phys.scn_max_txg) { 2169 /* 2170 * This can happen if this snapshot was created after the 2171 * scan started, and we already completed a previous snapshot 2172 * that was created after the scan started. This snapshot 2173 * only references blocks with: 2174 * 2175 * birth < our ds_creation_txg 2176 * cur_min_txg is no less than ds_creation_txg. 2177 * We have already visited these blocks. 2178 * or 2179 * birth > scn_max_txg 2180 * The scan requested not to visit these blocks. 2181 * 2182 * Subsequent snapshots (and clones) can reference our 2183 * blocks, or blocks with even higher birth times. 2184 * Therefore we do not need to visit them either, 2185 * so we do not add them to the work queue. 2186 * 2187 * Note that checking for cur_min_txg >= cur_max_txg 2188 * is not sufficient, because in that case we may need to 2189 * visit subsequent snapshots. This happens when min_txg > 0, 2190 * which raises cur_min_txg. In this case we will visit 2191 * this dataset but skip all of its blocks, because the 2192 * rootbp's birth time is < cur_min_txg. Then we will 2193 * add the next snapshots/clones to the work queue. 2194 */ 2195 char *dsname = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2196 dsl_dataset_name(ds, dsname); 2197 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " 2198 "cur_min_txg (%llu) >= max_txg (%llu)", 2199 (longlong_t)dsobj, dsname, 2200 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2201 (longlong_t)scn->scn_phys.scn_max_txg); 2202 kmem_free(dsname, MAXNAMELEN); 2203 2204 goto out; 2205 } 2206 2207 /* 2208 * Only the ZIL in the head (non-snapshot) is valid. Even though 2209 * snapshots can have ZIL block pointers (which may be the same 2210 * BP as in the head), they must be ignored. In addition, $ORIGIN 2211 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't 2212 * need to look for a ZIL in it either. So we traverse the ZIL here, 2213 * rather than in scan_recurse(), because the regular snapshot 2214 * block-sharing rules don't apply to it. 2215 */ 2216 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds) && 2217 (dp->dp_origin_snap == NULL || 2218 ds->ds_dir != dp->dp_origin_snap->ds_dir)) { 2219 objset_t *os; 2220 if (dmu_objset_from_ds(ds, &os) != 0) { 2221 goto out; 2222 } 2223 dsl_scan_zil(dp, &os->os_zil_header); 2224 } 2225 2226 /* 2227 * Iterate over the bps in this ds. 2228 */ 2229 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2230 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 2231 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); 2232 rrw_exit(&ds->ds_bp_rwlock, FTAG); 2233 2234 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 2235 dsl_dataset_name(ds, dsname); 2236 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 2237 "suspending=%u", 2238 (longlong_t)dsobj, dsname, 2239 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2240 (longlong_t)scn->scn_phys.scn_cur_max_txg, 2241 (int)scn->scn_suspending); 2242 kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); 2243 2244 if (scn->scn_suspending) 2245 goto out; 2246 2247 /* 2248 * We've finished this pass over this dataset. 2249 */ 2250 2251 /* 2252 * If we did not completely visit this dataset, do another pass. 2253 */ 2254 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 2255 zfs_dbgmsg("incomplete pass; visiting again"); 2256 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 2257 scan_ds_queue_insert(scn, ds->ds_object, 2258 scn->scn_phys.scn_cur_max_txg); 2259 goto out; 2260 } 2261 2262 /* 2263 * Add descendent datasets to work queue. 2264 */ 2265 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { 2266 scan_ds_queue_insert(scn, 2267 dsl_dataset_phys(ds)->ds_next_snap_obj, 2268 dsl_dataset_phys(ds)->ds_creation_txg); 2269 } 2270 if (dsl_dataset_phys(ds)->ds_num_children > 1) { 2271 boolean_t usenext = B_FALSE; 2272 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { 2273 uint64_t count; 2274 /* 2275 * A bug in a previous version of the code could 2276 * cause upgrade_clones_cb() to not set 2277 * ds_next_snap_obj when it should, leading to a 2278 * missing entry. Therefore we can only use the 2279 * next_clones_obj when its count is correct. 2280 */ 2281 int err = zap_count(dp->dp_meta_objset, 2282 dsl_dataset_phys(ds)->ds_next_clones_obj, &count); 2283 if (err == 0 && 2284 count == dsl_dataset_phys(ds)->ds_num_children - 1) 2285 usenext = B_TRUE; 2286 } 2287 2288 if (usenext) { 2289 zap_cursor_t zc; 2290 zap_attribute_t za; 2291 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2292 dsl_dataset_phys(ds)->ds_next_clones_obj); 2293 zap_cursor_retrieve(&zc, &za) == 0; 2294 (void) zap_cursor_advance(&zc)) { 2295 scan_ds_queue_insert(scn, 2296 zfs_strtonum(za.za_name, NULL), 2297 dsl_dataset_phys(ds)->ds_creation_txg); 2298 } 2299 zap_cursor_fini(&zc); 2300 } else { 2301 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2302 enqueue_clones_cb, &ds->ds_object, 2303 DS_FIND_CHILDREN)); 2304 } 2305 } 2306 2307out: 2308 dsl_dataset_rele(ds, FTAG); 2309} 2310 2311/* ARGSUSED */ 2312static int 2313enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2314{ 2315 dsl_dataset_t *ds; 2316 int err; 2317 dsl_scan_t *scn = dp->dp_scan; 2318 2319 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2320 if (err) 2321 return (err); 2322 2323 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 2324 dsl_dataset_t *prev; 2325 err = dsl_dataset_hold_obj(dp, 2326 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2327 if (err) { 2328 dsl_dataset_rele(ds, FTAG); 2329 return (err); 2330 } 2331 2332 /* 2333 * If this is a clone, we don't need to worry about it for now. 2334 */ 2335 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { 2336 dsl_dataset_rele(ds, FTAG); 2337 dsl_dataset_rele(prev, FTAG); 2338 return (0); 2339 } 2340 dsl_dataset_rele(ds, FTAG); 2341 ds = prev; 2342 } 2343 2344 scan_ds_queue_insert(scn, ds->ds_object, 2345 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2346 dsl_dataset_rele(ds, FTAG); 2347 return (0); 2348} 2349 2350/* ARGSUSED */ 2351void 2352dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 2353 ddt_entry_t *dde, dmu_tx_t *tx) 2354{ 2355 const ddt_key_t *ddk = &dde->dde_key; 2356 ddt_phys_t *ddp = dde->dde_phys; 2357 blkptr_t bp; 2358 zbookmark_phys_t zb = { 0 }; 2359 int p; 2360 2361 if (!dsl_scan_is_running(scn)) 2362 return; 2363 2364 /* 2365 * This function is special because it is the only thing 2366 * that can add scan_io_t's to the vdev scan queues from 2367 * outside dsl_scan_sync(). For the most part this is ok 2368 * as long as it is called from within syncing context. 2369 * However, dsl_scan_sync() expects that no new sio's will 2370 * be added between when all the work for a scan is done 2371 * and the next txg when the scan is actually marked as 2372 * completed. This check ensures we do not issue new sio's 2373 * during this period. 2374 */ 2375 if (scn->scn_done_txg != 0) 2376 return; 2377 2378 for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2379 if (ddp->ddp_phys_birth == 0 || 2380 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 2381 continue; 2382 ddt_bp_create(checksum, ddk, ddp, &bp); 2383 2384 scn->scn_visited_this_txg++; 2385 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 2386 } 2387} 2388 2389/* 2390 * Scrub/dedup interaction. 2391 * 2392 * If there are N references to a deduped block, we don't want to scrub it 2393 * N times -- ideally, we should scrub it exactly once. 2394 * 2395 * We leverage the fact that the dde's replication class (enum ddt_class) 2396 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 2397 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 2398 * 2399 * To prevent excess scrubbing, the scrub begins by walking the DDT 2400 * to find all blocks with refcnt > 1, and scrubs each of these once. 2401 * Since there are two replication classes which contain blocks with 2402 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 2403 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 2404 * 2405 * There would be nothing more to say if a block's refcnt couldn't change 2406 * during a scrub, but of course it can so we must account for changes 2407 * in a block's replication class. 2408 * 2409 * Here's an example of what can occur: 2410 * 2411 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 2412 * when visited during the top-down scrub phase, it will be scrubbed twice. 2413 * This negates our scrub optimization, but is otherwise harmless. 2414 * 2415 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 2416 * on each visit during the top-down scrub phase, it will never be scrubbed. 2417 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 2418 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 2419 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 2420 * while a scrub is in progress, it scrubs the block right then. 2421 */ 2422static void 2423dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 2424{ 2425 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 2426 ddt_entry_t dde = { 0 }; 2427 int error; 2428 uint64_t n = 0; 2429 2430 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 2431 ddt_t *ddt; 2432 2433 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 2434 break; 2435 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 2436 (longlong_t)ddb->ddb_class, 2437 (longlong_t)ddb->ddb_type, 2438 (longlong_t)ddb->ddb_checksum, 2439 (longlong_t)ddb->ddb_cursor); 2440 2441 /* There should be no pending changes to the dedup table */ 2442 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 2443 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 2444 2445 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 2446 n++; 2447 2448 if (dsl_scan_check_suspend(scn, NULL)) 2449 break; 2450 } 2451 2452 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; " 2453 "suspending=%u", (longlong_t)n, 2454 (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); 2455 2456 ASSERT(error == 0 || error == ENOENT); 2457 ASSERT(error != ENOENT || 2458 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 2459} 2460 2461static uint64_t 2462dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 2463{ 2464 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 2465 if (ds->ds_is_snapshot) 2466 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); 2467 return (smt); 2468} 2469 2470static void 2471dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 2472{ 2473 scan_ds_t *sds; 2474 dsl_pool_t *dp = scn->scn_dp; 2475 2476 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 2477 scn->scn_phys.scn_ddt_class_max) { 2478 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 2479 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 2480 dsl_scan_ddt(scn, tx); 2481 if (scn->scn_suspending) 2482 return; 2483 } 2484 2485 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 2486 /* First do the MOS & ORIGIN */ 2487 2488 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 2489 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 2490 dsl_scan_visit_rootbp(scn, NULL, 2491 &dp->dp_meta_rootbp, tx); 2492 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 2493 if (scn->scn_suspending) 2494 return; 2495 2496 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 2497 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2498 enqueue_cb, NULL, DS_FIND_CHILDREN)); 2499 } else { 2500 dsl_scan_visitds(scn, 2501 dp->dp_origin_snap->ds_object, tx); 2502 } 2503 ASSERT(!scn->scn_suspending); 2504 } else if (scn->scn_phys.scn_bookmark.zb_objset != 2505 ZB_DESTROYED_OBJSET) { 2506 uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset; 2507 /* 2508 * If we were suspended, continue from here. Note if the 2509 * ds we were suspended on was deleted, the zb_objset may 2510 * be -1, so we will skip this and find a new objset 2511 * below. 2512 */ 2513 dsl_scan_visitds(scn, dsobj, tx); 2514 if (scn->scn_suspending) 2515 return; 2516 } 2517 2518 /* 2519 * In case we suspended right at the end of the ds, zero the 2520 * bookmark so we don't think that we're still trying to resume. 2521 */ 2522 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); 2523 2524 /* 2525 * Keep pulling things out of the dataset avl queue. Updates to the 2526 * persistent zap-object-as-queue happen only at checkpoints. 2527 */ 2528 while ((sds = avl_first(&scn->scn_queue)) != NULL) { 2529 dsl_dataset_t *ds; 2530 uint64_t dsobj = sds->sds_dsobj; 2531 uint64_t txg = sds->sds_txg; 2532 2533 /* dequeue and free the ds from the queue */ 2534 scan_ds_queue_remove(scn, dsobj); 2535 sds = NULL; /* must not be touched after removal */ 2536 2537 /* Set up min / max txg */ 2538 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 2539 if (txg != 0) { 2540 scn->scn_phys.scn_cur_min_txg = 2541 MAX(scn->scn_phys.scn_min_txg, txg); 2542 } else { 2543 scn->scn_phys.scn_cur_min_txg = 2544 MAX(scn->scn_phys.scn_min_txg, 2545 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2546 } 2547 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 2548 dsl_dataset_rele(ds, FTAG); 2549 2550 dsl_scan_visitds(scn, dsobj, tx); 2551 if (scn->scn_suspending) 2552 return; 2553 } 2554 /* No more objsets to fetch, we're done */ 2555 scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET; 2556 ASSERT0(scn->scn_suspending); 2557} 2558 2559static uint64_t 2560dsl_scan_count_leaves(vdev_t *vd) 2561{ 2562 uint64_t i, leaves = 0; 2563 2564 /* we only count leaves that belong to the main pool and are readable */ 2565 if (vd->vdev_islog || vd->vdev_isspare || 2566 vd->vdev_isl2cache || !vdev_readable(vd)) 2567 return (0); 2568 2569 if (vd->vdev_ops->vdev_op_leaf) 2570 return (1); 2571 2572 for (i = 0; i < vd->vdev_children; i++) { 2573 leaves += dsl_scan_count_leaves(vd->vdev_child[i]); 2574 } 2575 2576 return (leaves); 2577} 2578 2579 2580static void 2581scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp) 2582{ 2583 int i; 2584 uint64_t cur_size = 0; 2585 2586 for (i = 0; i < BP_GET_NDVAS(bp); i++) { 2587 cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]); 2588 } 2589 2590 q->q_total_zio_size_this_txg += cur_size; 2591 q->q_zios_this_txg++; 2592} 2593 2594static void 2595scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start, 2596 uint64_t end) 2597{ 2598 q->q_total_seg_size_this_txg += end - start; 2599 q->q_segs_this_txg++; 2600} 2601 2602static boolean_t 2603scan_io_queue_check_suspend(dsl_scan_t *scn) 2604{ 2605 /* See comment in dsl_scan_check_suspend() */ 2606 uint64_t curr_time_ns = gethrtime(); 2607 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 2608 uint64_t sync_time_ns = curr_time_ns - 2609 scn->scn_dp->dp_spa->spa_sync_starttime; 2610 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; 2611 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 2612 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 2613 2614 return ((NSEC2MSEC(scan_time_ns) > mintime && 2615 (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || 2616 txg_sync_waiting(scn->scn_dp) || 2617 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 2618 spa_shutting_down(scn->scn_dp->dp_spa)); 2619} 2620 2621/* 2622 * Given a list of scan_io_t's in io_list, this issues the io's out to 2623 * disk. This consumes the io_list and frees the scan_io_t's. This is 2624 * called when emptying queues, either when we're up against the memory 2625 * limit or when we have finished scanning. Returns B_TRUE if we stopped 2626 * processing the list before we finished. Any zios that were not issued 2627 * will remain in the io_list. 2628 */ 2629static boolean_t 2630scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list) 2631{ 2632 dsl_scan_t *scn = queue->q_scn; 2633 scan_io_t *sio; 2634 int64_t bytes_issued = 0; 2635 boolean_t suspended = B_FALSE; 2636 2637 while ((sio = list_head(io_list)) != NULL) { 2638 blkptr_t bp; 2639 2640 if (scan_io_queue_check_suspend(scn)) { 2641 suspended = B_TRUE; 2642 break; 2643 } 2644 2645 sio2bp(sio, &bp, queue->q_vd->vdev_id); 2646 bytes_issued += sio->sio_asize; 2647 scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, 2648 &sio->sio_zb, queue); 2649 (void) list_remove_head(io_list); 2650 scan_io_queues_update_zio_stats(queue, &bp); 2651 kmem_free(sio, sizeof (*sio)); 2652 } 2653 2654 atomic_add_64(&scn->scn_bytes_pending, -bytes_issued); 2655 2656 return (suspended); 2657} 2658 2659/* 2660 * Given a range_seg_t (extent) and a list, this function passes over a 2661 * scan queue and gathers up the appropriate ios which fit into that 2662 * scan seg (starting from lowest LBA). At the end, we remove the segment 2663 * from the q_exts_by_addr range tree. 2664 */ 2665static boolean_t 2666scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) 2667{ 2668 scan_io_t srch_sio, *sio, *next_sio; 2669 avl_index_t idx; 2670 uint_t num_sios = 0; 2671 int64_t bytes_issued = 0; 2672 2673 ASSERT(rs != NULL); 2674 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 2675 2676 srch_sio.sio_offset = rs->rs_start; 2677 2678 /* 2679 * The exact start of the extent might not contain any matching zios, 2680 * so if that's the case, examine the next one in the tree. 2681 */ 2682 sio = avl_find(&queue->q_sios_by_addr, &srch_sio, &idx); 2683 if (sio == NULL) 2684 sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); 2685 2686 while (sio != NULL && sio->sio_offset < rs->rs_end && num_sios <= 32) { 2687 ASSERT3U(sio->sio_offset, >=, rs->rs_start); 2688 ASSERT3U(sio->sio_offset + sio->sio_asize, <=, rs->rs_end); 2689 2690 next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); 2691 avl_remove(&queue->q_sios_by_addr, sio); 2692 2693 bytes_issued += sio->sio_asize; 2694 num_sios++; 2695 list_insert_tail(list, sio); 2696 sio = next_sio; 2697 } 2698 2699 /* 2700 * We limit the number of sios we process at once to 32 to avoid 2701 * biting off more than we can chew. If we didn't take everything 2702 * in the segment we update it to reflect the work we were able to 2703 * complete. Otherwise, we remove it from the range tree entirely. 2704 */ 2705 if (sio != NULL && sio->sio_offset < rs->rs_end) { 2706 range_tree_adjust_fill(queue->q_exts_by_addr, rs, 2707 -bytes_issued); 2708 range_tree_resize_segment(queue->q_exts_by_addr, rs, 2709 sio->sio_offset, rs->rs_end - sio->sio_offset); 2710 2711 return (B_TRUE); 2712 } else { 2713 range_tree_remove(queue->q_exts_by_addr, rs->rs_start, 2714 rs->rs_end - rs->rs_start); 2715 return (B_FALSE); 2716 } 2717} 2718 2719 2720/* 2721 * This is called from the queue emptying thread and selects the next 2722 * extent from which we are to issue io's. The behavior of this function 2723 * depends on the state of the scan, the current memory consumption and 2724 * whether or not we are performing a scan shutdown. 2725 * 1) We select extents in an elevator algorithm (LBA-order) if the scan 2726 * needs to perform a checkpoint 2727 * 2) We select the largest available extent if we are up against the 2728 * memory limit. 2729 * 3) Otherwise we don't select any extents. 2730 */ 2731static const range_seg_t * 2732scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) 2733{ 2734 dsl_scan_t *scn = queue->q_scn; 2735 2736 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 2737 ASSERT(scn->scn_is_sorted); 2738 2739 /* handle tunable overrides */ 2740 if (scn->scn_checkpointing || scn->scn_clearing) { 2741 if (zfs_scan_issue_strategy == 1) { 2742 return (range_tree_first(queue->q_exts_by_addr)); 2743 } else if (zfs_scan_issue_strategy == 2) { 2744 return (avl_first(&queue->q_exts_by_size)); 2745 } 2746 } 2747 2748 /* 2749 * During normal clearing, we want to issue our largest segments 2750 * first, keeping IO as sequential as possible, and leaving the 2751 * smaller extents for later with the hope that they might eventually 2752 * grow to larger sequential segments. However, when the scan is 2753 * checkpointing, no new extents will be added to the sorting queue, 2754 * so the way we are sorted now is as good as it will ever get. 2755 * In this case, we instead switch to issuing extents in LBA order. 2756 */ 2757 if (scn->scn_checkpointing) { 2758 return (range_tree_first(queue->q_exts_by_addr)); 2759 } else if (scn->scn_clearing) { 2760 return (avl_first(&queue->q_exts_by_size)); 2761 } else { 2762 return (NULL); 2763 } 2764} 2765 2766static void 2767scan_io_queues_run_one(void *arg) 2768{ 2769 dsl_scan_io_queue_t *queue = arg; 2770 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 2771 boolean_t suspended = B_FALSE; 2772 range_seg_t *rs = NULL; 2773 scan_io_t *sio = NULL; 2774 list_t sio_list; 2775 uint64_t bytes_per_leaf = zfs_scan_vdev_limit; 2776 uint64_t nr_leaves = dsl_scan_count_leaves(queue->q_vd); 2777 2778 ASSERT(queue->q_scn->scn_is_sorted); 2779 2780 list_create(&sio_list, sizeof (scan_io_t), 2781 offsetof(scan_io_t, sio_nodes.sio_list_node)); 2782 mutex_enter(q_lock); 2783 2784 /* calculate maximum in-flight bytes for this txg (min 1MB) */ 2785 queue->q_maxinflight_bytes = 2786 MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); 2787 2788 /* reset per-queue scan statistics for this txg */ 2789 queue->q_total_seg_size_this_txg = 0; 2790 queue->q_segs_this_txg = 0; 2791 queue->q_total_zio_size_this_txg = 0; 2792 queue->q_zios_this_txg = 0; 2793 2794 /* loop until we have run out of time or sios */ 2795 while ((rs = (range_seg_t*)scan_io_queue_fetch_ext(queue)) != NULL) { 2796 uint64_t seg_start = 0, seg_end = 0; 2797 boolean_t more_left = B_TRUE; 2798 2799 ASSERT(list_is_empty(&sio_list)); 2800 2801 /* loop while we still have sios left to process in this rs */ 2802 while (more_left) { 2803 scan_io_t *first_sio, *last_sio; 2804 2805 /* 2806 * We have selected which extent needs to be 2807 * processed next. Gather up the corresponding sios. 2808 */ 2809 more_left = scan_io_queue_gather(queue, rs, &sio_list); 2810 ASSERT(!list_is_empty(&sio_list)); 2811 first_sio = list_head(&sio_list); 2812 last_sio = list_tail(&sio_list); 2813 2814 seg_end = last_sio->sio_offset + last_sio->sio_asize; 2815 if (seg_start == 0) 2816 seg_start = first_sio->sio_offset; 2817 2818 /* 2819 * Issuing sios can take a long time so drop the 2820 * queue lock. The sio queue won't be updated by 2821 * other threads since we're in syncing context so 2822 * we can be sure that our trees will remain exactly 2823 * as we left them. 2824 */ 2825 mutex_exit(q_lock); 2826 suspended = scan_io_queue_issue(queue, &sio_list); 2827 mutex_enter(q_lock); 2828 2829 if (suspended) 2830 break; 2831 } 2832 /* update statistics for debugging purposes */ 2833 scan_io_queues_update_seg_stats(queue, seg_start, seg_end); 2834 2835 if (suspended) 2836 break; 2837 } 2838 2839 2840 /* If we were suspended in the middle of processing, 2841 * requeue any unfinished sios and exit. 2842 */ 2843 while ((sio = list_head(&sio_list)) != NULL) { 2844 list_remove(&sio_list, sio); 2845 scan_io_queue_insert_impl(queue, sio); 2846 } 2847 2848 mutex_exit(q_lock); 2849 list_destroy(&sio_list); 2850} 2851 2852/* 2853 * Performs an emptying run on all scan queues in the pool. This just 2854 * punches out one thread per top-level vdev, each of which processes 2855 * only that vdev's scan queue. We can parallelize the I/O here because 2856 * we know that each queue's io's only affect its own top-level vdev. 2857 * 2858 * This function waits for the queue runs to complete, and must be 2859 * called from dsl_scan_sync (or in general, syncing context). 2860 */ 2861static void 2862scan_io_queues_run(dsl_scan_t *scn) 2863{ 2864 spa_t *spa = scn->scn_dp->dp_spa; 2865 2866 ASSERT(scn->scn_is_sorted); 2867 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 2868 2869 if (scn->scn_bytes_pending == 0) 2870 return; 2871 2872 if (scn->scn_taskq == NULL) { 2873 char *tq_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN + 16, 2874 KM_SLEEP); 2875 int nthreads = spa->spa_root_vdev->vdev_children; 2876 2877 /* 2878 * We need to make this taskq *always* execute as many 2879 * threads in parallel as we have top-level vdevs and no 2880 * less, otherwise strange serialization of the calls to 2881 * scan_io_queues_run_one can occur during spa_sync runs 2882 * and that significantly impacts performance. 2883 */ 2884 (void) snprintf(tq_name, ZFS_MAX_DATASET_NAME_LEN + 16, 2885 "dsl_scan_tq_%s", spa->spa_name); 2886 scn->scn_taskq = taskq_create(tq_name, nthreads, minclsyspri, 2887 nthreads, nthreads, TASKQ_PREPOPULATE); 2888 kmem_free(tq_name, ZFS_MAX_DATASET_NAME_LEN + 16); 2889 } 2890 2891 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 2892 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 2893 2894 mutex_enter(&vd->vdev_scan_io_queue_lock); 2895 if (vd->vdev_scan_io_queue != NULL) { 2896 VERIFY(taskq_dispatch(scn->scn_taskq, 2897 scan_io_queues_run_one, vd->vdev_scan_io_queue, 2898 TQ_SLEEP) != TASKQID_INVALID); 2899 } 2900 mutex_exit(&vd->vdev_scan_io_queue_lock); 2901 } 2902 2903 /* 2904 * Wait for the queues to finish issuing thir IOs for this run 2905 * before we return. There may still be IOs in flight at this 2906 * point. 2907 */ 2908 taskq_wait(scn->scn_taskq); 2909} 2910 2911static boolean_t 2912dsl_scan_async_block_should_pause(dsl_scan_t *scn) 2913{ 2914 uint64_t elapsed_nanosecs; 2915 2916 if (zfs_recover) 2917 return (B_FALSE); 2918 2919 if (scn->scn_visited_this_txg >= zfs_async_block_max_blocks) 2920 return (B_TRUE); 2921 2922 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 2923 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 2924 (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && 2925 txg_sync_waiting(scn->scn_dp)) || 2926 spa_shutting_down(scn->scn_dp->dp_spa)); 2927} 2928 2929static int 2930dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2931{ 2932 dsl_scan_t *scn = arg; 2933 2934 if (!scn->scn_is_bptree || 2935 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 2936 if (dsl_scan_async_block_should_pause(scn)) 2937 return (SET_ERROR(ERESTART)); 2938 } 2939 2940 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 2941 dmu_tx_get_txg(tx), bp, BP_GET_PSIZE(bp), 0)); 2942 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 2943 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 2944 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 2945 scn->scn_visited_this_txg++; 2946 return (0); 2947} 2948 2949static void 2950dsl_scan_update_stats(dsl_scan_t *scn) 2951{ 2952 spa_t *spa = scn->scn_dp->dp_spa; 2953 uint64_t i; 2954 uint64_t seg_size_total = 0, zio_size_total = 0; 2955 uint64_t seg_count_total = 0, zio_count_total = 0; 2956 2957 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 2958 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 2959 dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue; 2960 2961 if (queue == NULL) 2962 continue; 2963 2964 seg_size_total += queue->q_total_seg_size_this_txg; 2965 zio_size_total += queue->q_total_zio_size_this_txg; 2966 seg_count_total += queue->q_segs_this_txg; 2967 zio_count_total += queue->q_zios_this_txg; 2968 } 2969 2970 if (seg_count_total == 0 || zio_count_total == 0) { 2971 scn->scn_avg_seg_size_this_txg = 0; 2972 scn->scn_avg_zio_size_this_txg = 0; 2973 scn->scn_segs_this_txg = 0; 2974 scn->scn_zios_this_txg = 0; 2975 return; 2976 } 2977 2978 scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total; 2979 scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total; 2980 scn->scn_segs_this_txg = seg_count_total; 2981 scn->scn_zios_this_txg = zio_count_total; 2982} 2983 2984static int 2985dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2986{ 2987 dsl_scan_t *scn = arg; 2988 const dva_t *dva = &bp->blk_dva[0]; 2989 2990 if (dsl_scan_async_block_should_pause(scn)) 2991 return (SET_ERROR(ERESTART)); 2992 2993 spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, 2994 DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), 2995 DVA_GET_ASIZE(dva), tx); 2996 scn->scn_visited_this_txg++; 2997 return (0); 2998} 2999 3000boolean_t 3001dsl_scan_active(dsl_scan_t *scn) 3002{ 3003 spa_t *spa = scn->scn_dp->dp_spa; 3004 uint64_t used = 0, comp, uncomp; 3005 3006 if (spa->spa_load_state != SPA_LOAD_NONE) 3007 return (B_FALSE); 3008 if (spa_shutting_down(spa)) 3009 return (B_FALSE); 3010 if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) || 3011 (scn->scn_async_destroying && !scn->scn_async_stalled)) 3012 return (B_TRUE); 3013 3014 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 3015 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 3016 &used, &comp, &uncomp); 3017 } 3018 return (used != 0); 3019} 3020 3021static boolean_t 3022dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize, 3023 uint64_t phys_birth) 3024{ 3025 vdev_t *vd; 3026 3027 vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 3028 3029 if (vd->vdev_ops == &vdev_indirect_ops) { 3030 /* 3031 * The indirect vdev can point to multiple 3032 * vdevs. For simplicity, always create 3033 * the resilver zio_t. zio_vdev_io_start() 3034 * will bypass the child resilver i/o's if 3035 * they are on vdevs that don't have DTL's. 3036 */ 3037 return (B_TRUE); 3038 } 3039 3040 if (DVA_GET_GANG(dva)) { 3041 /* 3042 * Gang members may be spread across multiple 3043 * vdevs, so the best estimate we have is the 3044 * scrub range, which has already been checked. 3045 * XXX -- it would be better to change our 3046 * allocation policy to ensure that all 3047 * gang members reside on the same vdev. 3048 */ 3049 return (B_TRUE); 3050 } 3051 3052 /* 3053 * Check if the txg falls within the range which must be 3054 * resilvered. DVAs outside this range can always be skipped. 3055 */ 3056 if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1)) 3057 return (B_FALSE); 3058 3059 /* 3060 * Check if the top-level vdev must resilver this offset. 3061 * When the offset does not intersect with a dirty leaf DTL 3062 * then it may be possible to skip the resilver IO. The psize 3063 * is provided instead of asize to simplify the check for RAIDZ. 3064 */ 3065 if (!vdev_dtl_need_resilver(vd, DVA_GET_OFFSET(dva), psize)) 3066 return (B_FALSE); 3067 3068 return (B_TRUE); 3069} 3070 3071static int 3072dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) 3073{ 3074 int err = 0; 3075 dsl_scan_t *scn = dp->dp_scan; 3076 spa_t *spa = dp->dp_spa; 3077 3078 if (spa_suspend_async_destroy(spa)) 3079 return (0); 3080 3081 if (zfs_free_bpobj_enabled && 3082 spa_version(spa) >= SPA_VERSION_DEADLISTS) { 3083 scn->scn_is_bptree = B_FALSE; 3084 scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; 3085 scn->scn_zio_root = zio_root(spa, NULL, 3086 NULL, ZIO_FLAG_MUSTSUCCEED); 3087 err = bpobj_iterate(&dp->dp_free_bpobj, 3088 dsl_scan_free_block_cb, scn, tx); 3089 VERIFY0(zio_wait(scn->scn_zio_root)); 3090 scn->scn_zio_root = NULL; 3091 3092 if (err != 0 && err != ERESTART) 3093 zfs_panic_recover("error %u from bpobj_iterate()", err); 3094 } 3095 3096 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 3097 ASSERT(scn->scn_async_destroying); 3098 scn->scn_is_bptree = B_TRUE; 3099 scn->scn_zio_root = zio_root(spa, NULL, 3100 NULL, ZIO_FLAG_MUSTSUCCEED); 3101 err = bptree_iterate(dp->dp_meta_objset, 3102 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 3103 VERIFY0(zio_wait(scn->scn_zio_root)); 3104 scn->scn_zio_root = NULL; 3105 3106 if (err == EIO || err == ECKSUM) { 3107 err = 0; 3108 } else if (err != 0 && err != ERESTART) { 3109 zfs_panic_recover("error %u from " 3110 "traverse_dataset_destroyed()", err); 3111 } 3112 3113 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 3114 /* finished; deactivate async destroy feature */ 3115 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 3116 ASSERT(!spa_feature_is_active(spa, 3117 SPA_FEATURE_ASYNC_DESTROY)); 3118 VERIFY0(zap_remove(dp->dp_meta_objset, 3119 DMU_POOL_DIRECTORY_OBJECT, 3120 DMU_POOL_BPTREE_OBJ, tx)); 3121 VERIFY0(bptree_free(dp->dp_meta_objset, 3122 dp->dp_bptree_obj, tx)); 3123 dp->dp_bptree_obj = 0; 3124 scn->scn_async_destroying = B_FALSE; 3125 scn->scn_async_stalled = B_FALSE; 3126 } else { 3127 /* 3128 * If we didn't make progress, mark the async 3129 * destroy as stalled, so that we will not initiate 3130 * a spa_sync() on its behalf. Note that we only 3131 * check this if we are not finished, because if the 3132 * bptree had no blocks for us to visit, we can 3133 * finish without "making progress". 3134 */ 3135 scn->scn_async_stalled = 3136 (scn->scn_visited_this_txg == 0); 3137 } 3138 } 3139 if (scn->scn_visited_this_txg) { 3140 zfs_dbgmsg("freed %llu blocks in %llums from " 3141 "free_bpobj/bptree txg %llu; err=%d", 3142 (longlong_t)scn->scn_visited_this_txg, 3143 (longlong_t) 3144 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 3145 (longlong_t)tx->tx_txg, err); 3146 scn->scn_visited_this_txg = 0; 3147 3148 /* 3149 * Write out changes to the DDT that may be required as a 3150 * result of the blocks freed. This ensures that the DDT 3151 * is clean when a scrub/resilver runs. 3152 */ 3153 ddt_sync(spa, tx->tx_txg); 3154 } 3155 if (err != 0) 3156 return (err); 3157 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 3158 zfs_free_leak_on_eio && 3159 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || 3160 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || 3161 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { 3162 /* 3163 * We have finished background destroying, but there is still 3164 * some space left in the dp_free_dir. Transfer this leaked 3165 * space to the dp_leak_dir. 3166 */ 3167 if (dp->dp_leak_dir == NULL) { 3168 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 3169 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 3170 LEAK_DIR_NAME, tx); 3171 VERIFY0(dsl_pool_open_special_dir(dp, 3172 LEAK_DIR_NAME, &dp->dp_leak_dir)); 3173 rrw_exit(&dp->dp_config_rwlock, FTAG); 3174 } 3175 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 3176 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3177 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3178 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3179 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 3180 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3181 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3182 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3183 } 3184 3185 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) { 3186 /* finished; verify that space accounting went to zero */ 3187 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); 3188 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); 3189 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); 3190 } 3191 3192 EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), 3193 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3194 DMU_POOL_OBSOLETE_BPOBJ)); 3195 if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { 3196 ASSERT(spa_feature_is_active(dp->dp_spa, 3197 SPA_FEATURE_OBSOLETE_COUNTS)); 3198 3199 scn->scn_is_bptree = B_FALSE; 3200 scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; 3201 err = bpobj_iterate(&dp->dp_obsolete_bpobj, 3202 dsl_scan_obsolete_block_cb, scn, tx); 3203 if (err != 0 && err != ERESTART) 3204 zfs_panic_recover("error %u from bpobj_iterate()", err); 3205 3206 if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) 3207 dsl_pool_destroy_obsolete_bpobj(dp, tx); 3208 } 3209 3210 return (0); 3211} 3212 3213/* 3214 * This is the primary entry point for scans that is called from syncing 3215 * context. Scans must happen entirely during syncing context so that we 3216 * cna guarantee that blocks we are currently scanning will not change out 3217 * from under us. While a scan is active, this funciton controls how quickly 3218 * transaction groups proceed, instead of the normal handling provided by 3219 * txg_sync_thread(). 3220 */ 3221void 3222dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 3223{ 3224 dsl_scan_t *scn = dp->dp_scan; 3225 spa_t *spa = dp->dp_spa; 3226 int err = 0; 3227 state_sync_type_t sync_type = SYNC_OPTIONAL; 3228 3229 /* 3230 * Check for scn_restart_txg before checking spa_load_state, so 3231 * that we can restart an old-style scan while the pool is being 3232 * imported (see dsl_scan_init). 3233 */ 3234 if (dsl_scan_restarting(scn, tx)) { 3235 pool_scan_func_t func = POOL_SCAN_SCRUB; 3236 dsl_scan_done(scn, B_FALSE, tx); 3237 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 3238 func = POOL_SCAN_RESILVER; 3239 zfs_dbgmsg("restarting scan func=%u txg=%llu", 3240 func, (longlong_t)tx->tx_txg); 3241 dsl_scan_setup_sync(&func, tx); 3242 } 3243 3244 /* 3245 * Only process scans in sync pass 1. 3246 */ 3247 if (spa_sync_pass(dp->dp_spa) > 1) 3248 return; 3249 3250 /* 3251 * If the spa is shutting down, then stop scanning. This will 3252 * ensure that the scan does not dirty any new data during the 3253 * shutdown phase. 3254 */ 3255 if (spa_shutting_down(spa)) 3256 return; 3257 3258 /* 3259 * If the scan is inactive due to a stalled async destroy, try again. 3260 */ 3261 if (!scn->scn_async_stalled && !dsl_scan_active(scn)) 3262 return; 3263 3264 /* reset scan statistics */ 3265 scn->scn_visited_this_txg = 0; 3266 scn->scn_holes_this_txg = 0; 3267 scn->scn_lt_min_this_txg = 0; 3268 scn->scn_gt_max_this_txg = 0; 3269 scn->scn_ddt_contained_this_txg = 0; 3270 scn->scn_objsets_visited_this_txg = 0; 3271 scn->scn_avg_seg_size_this_txg = 0; 3272 scn->scn_segs_this_txg = 0; 3273 scn->scn_avg_zio_size_this_txg = 0; 3274 scn->scn_zios_this_txg = 0; 3275 scn->scn_suspending = B_FALSE; 3276 scn->scn_sync_start_time = gethrtime(); 3277 spa->spa_scrub_active = B_TRUE; 3278 3279 /* 3280 * First process the async destroys. If we pause, don't do 3281 * any scrubbing or resilvering. This ensures that there are no 3282 * async destroys while we are scanning, so the scan code doesn't 3283 * have to worry about traversing it. It is also faster to free the 3284 * blocks than to scrub them. 3285 */ 3286 err = dsl_process_async_destroys(dp, tx); 3287 if (err != 0) 3288 return; 3289 3290 if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn)) 3291 return; 3292 3293 /* 3294 * Wait a few txgs after importing to begin scanning so that 3295 * we can get the pool imported quickly. 3296 */ 3297 if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS) 3298 return; 3299 3300 /* 3301 * It is possible to switch from unsorted to sorted at any time, 3302 * but afterwards the scan will remain sorted unless reloaded from 3303 * a checkpoint after a reboot. 3304 */ 3305 if (!zfs_scan_legacy) { 3306 scn->scn_is_sorted = B_TRUE; 3307 if (scn->scn_last_checkpoint == 0) 3308 scn->scn_last_checkpoint = ddi_get_lbolt(); 3309 } 3310 3311 /* 3312 * For sorted scans, determine what kind of work we will be doing 3313 * this txg based on our memory limitations and whether or not we 3314 * need to perform a checkpoint. 3315 */ 3316 if (scn->scn_is_sorted) { 3317 /* 3318 * If we are over our checkpoint interval, set scn_clearing 3319 * so that we can begin checkpointing immediately. The 3320 * checkpoint allows us to save a consisent bookmark 3321 * representing how much data we have scrubbed so far. 3322 * Otherwise, use the memory limit to determine if we should 3323 * scan for metadata or start issue scrub IOs. We accumulate 3324 * metadata until we hit our hard memory limit at which point 3325 * we issue scrub IOs until we are at our soft memory limit. 3326 */ 3327 if (scn->scn_checkpointing || 3328 ddi_get_lbolt() - scn->scn_last_checkpoint > 3329 SEC_TO_TICK(zfs_scan_checkpoint_intval)) { 3330 if (!scn->scn_checkpointing) 3331 zfs_dbgmsg("begin scan checkpoint"); 3332 3333 scn->scn_checkpointing = B_TRUE; 3334 scn->scn_clearing = B_TRUE; 3335 } else { 3336 boolean_t should_clear = dsl_scan_should_clear(scn); 3337 if (should_clear && !scn->scn_clearing) { 3338 zfs_dbgmsg("begin scan clearing"); 3339 scn->scn_clearing = B_TRUE; 3340 } else if (!should_clear && scn->scn_clearing) { 3341 zfs_dbgmsg("finish scan clearing"); 3342 scn->scn_clearing = B_FALSE; 3343 } 3344 } 3345 } else { 3346 ASSERT0(scn->scn_checkpointing); 3347 ASSERT0(scn->scn_clearing); 3348 } 3349 3350 if (!scn->scn_clearing && scn->scn_done_txg == 0) { 3351 /* Need to scan metadata for more blocks to scrub */ 3352 dsl_scan_phys_t *scnp = &scn->scn_phys; 3353 taskqid_t prefetch_tqid; 3354 uint64_t bytes_per_leaf = zfs_scan_vdev_limit; 3355 uint64_t nr_leaves = dsl_scan_count_leaves(spa->spa_root_vdev); 3356 3357 /* 3358 * Recalculate the max number of in-flight bytes for pool-wide 3359 * scanning operations (minimum 1MB). Limits for the issuing 3360 * phase are done per top-level vdev and are handled separately. 3361 */ 3362 scn->scn_maxinflight_bytes = 3363 MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); 3364 3365 if (scnp->scn_ddt_bookmark.ddb_class <= 3366 scnp->scn_ddt_class_max) { 3367 ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark)); 3368 zfs_dbgmsg("doing scan sync txg %llu; " 3369 "ddt bm=%llu/%llu/%llu/%llx", 3370 (longlong_t)tx->tx_txg, 3371 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 3372 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 3373 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 3374 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 3375 } else { 3376 zfs_dbgmsg("doing scan sync txg %llu; " 3377 "bm=%llu/%llu/%llu/%llu", 3378 (longlong_t)tx->tx_txg, 3379 (longlong_t)scnp->scn_bookmark.zb_objset, 3380 (longlong_t)scnp->scn_bookmark.zb_object, 3381 (longlong_t)scnp->scn_bookmark.zb_level, 3382 (longlong_t)scnp->scn_bookmark.zb_blkid); 3383 } 3384 3385 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 3386 NULL, ZIO_FLAG_CANFAIL); 3387 3388 scn->scn_prefetch_stop = B_FALSE; 3389 prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq, 3390 dsl_scan_prefetch_thread, scn, TQ_SLEEP); 3391 ASSERT(prefetch_tqid != TASKQID_INVALID); 3392 3393 dsl_pool_config_enter(dp, FTAG); 3394 dsl_scan_visit(scn, tx); 3395 dsl_pool_config_exit(dp, FTAG); 3396 3397 mutex_enter(&dp->dp_spa->spa_scrub_lock); 3398 scn->scn_prefetch_stop = B_TRUE; 3399 cv_broadcast(&spa->spa_scrub_io_cv); 3400 mutex_exit(&dp->dp_spa->spa_scrub_lock); 3401 3402 taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid); 3403 (void) zio_wait(scn->scn_zio_root); 3404 scn->scn_zio_root = NULL; 3405 3406 zfs_dbgmsg("scan visited %llu blocks in %llums " 3407 "(%llu os's, %llu holes, %llu < mintxg, " 3408 "%llu in ddt, %llu > maxtxg)", 3409 (longlong_t)scn->scn_visited_this_txg, 3410 (longlong_t)NSEC2MSEC(gethrtime() - 3411 scn->scn_sync_start_time), 3412 (longlong_t)scn->scn_objsets_visited_this_txg, 3413 (longlong_t)scn->scn_holes_this_txg, 3414 (longlong_t)scn->scn_lt_min_this_txg, 3415 (longlong_t)scn->scn_ddt_contained_this_txg, 3416 (longlong_t)scn->scn_gt_max_this_txg); 3417 3418 if (!scn->scn_suspending) { 3419 ASSERT0(avl_numnodes(&scn->scn_queue)); 3420 scn->scn_done_txg = tx->tx_txg + 1; 3421 if (scn->scn_is_sorted) { 3422 scn->scn_checkpointing = B_TRUE; 3423 scn->scn_clearing = B_TRUE; 3424 } 3425 zfs_dbgmsg("scan complete txg %llu", 3426 (longlong_t)tx->tx_txg); 3427 } 3428 } else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) { 3429 ASSERT(scn->scn_clearing); 3430 3431 /* need to issue scrubbing IOs from per-vdev queues */ 3432 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 3433 NULL, ZIO_FLAG_CANFAIL); 3434 scan_io_queues_run(scn); 3435 (void) zio_wait(scn->scn_zio_root); 3436 scn->scn_zio_root = NULL; 3437 3438 /* calculate and dprintf the current memory usage */ 3439 (void) dsl_scan_should_clear(scn); 3440 dsl_scan_update_stats(scn); 3441 3442 zfs_dbgmsg("scrubbed %llu blocks (%llu segs) in %llums " 3443 "(avg_block_size = %llu, avg_seg_size = %llu)", 3444 (longlong_t)scn->scn_zios_this_txg, 3445 (longlong_t)scn->scn_segs_this_txg, 3446 (longlong_t)NSEC2MSEC(gethrtime() - 3447 scn->scn_sync_start_time), 3448 (longlong_t)scn->scn_avg_zio_size_this_txg, 3449 (longlong_t)scn->scn_avg_seg_size_this_txg); 3450 } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) { 3451 /* Finished with everything. Mark the scrub as complete */ 3452 zfs_dbgmsg("scan issuing complete txg %llu", 3453 (longlong_t)tx->tx_txg); 3454 ASSERT3U(scn->scn_done_txg, !=, 0); 3455 ASSERT0(spa->spa_scrub_inflight); 3456 ASSERT0(scn->scn_bytes_pending); 3457 dsl_scan_done(scn, B_TRUE, tx); 3458 sync_type = SYNC_MANDATORY; 3459 } 3460 3461 dsl_scan_sync_state(scn, tx, sync_type); 3462} 3463 3464static void 3465count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp) 3466{ 3467 int i; 3468 3469 /* update the spa's stats on how many bytes we have issued */ 3470 for (i = 0; i < BP_GET_NDVAS(bp); i++) { 3471 atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued, 3472 DVA_GET_ASIZE(&bp->blk_dva[i])); 3473 } 3474 3475 /* 3476 * If we resume after a reboot, zab will be NULL; don't record 3477 * incomplete stats in that case. 3478 */ 3479 if (zab == NULL) 3480 return; 3481 3482 mutex_enter(&zab->zab_lock); 3483 3484 for (i = 0; i < 4; i++) { 3485 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 3486 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 3487 if (t & DMU_OT_NEWTYPE) 3488 t = DMU_OT_OTHER; 3489 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 3490 int equal; 3491 3492 zb->zb_count++; 3493 zb->zb_asize += BP_GET_ASIZE(bp); 3494 zb->zb_lsize += BP_GET_LSIZE(bp); 3495 zb->zb_psize += BP_GET_PSIZE(bp); 3496 zb->zb_gangs += BP_COUNT_GANG(bp); 3497 3498 switch (BP_GET_NDVAS(bp)) { 3499 case 2: 3500 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 3501 DVA_GET_VDEV(&bp->blk_dva[1])) 3502 zb->zb_ditto_2_of_2_samevdev++; 3503 break; 3504 case 3: 3505 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 3506 DVA_GET_VDEV(&bp->blk_dva[1])) + 3507 (DVA_GET_VDEV(&bp->blk_dva[0]) == 3508 DVA_GET_VDEV(&bp->blk_dva[2])) + 3509 (DVA_GET_VDEV(&bp->blk_dva[1]) == 3510 DVA_GET_VDEV(&bp->blk_dva[2])); 3511 if (equal == 1) 3512 zb->zb_ditto_2_of_3_samevdev++; 3513 else if (equal == 3) 3514 zb->zb_ditto_3_of_3_samevdev++; 3515 break; 3516 } 3517 } 3518 3519 mutex_exit(&zab->zab_lock); 3520} 3521 3522static void 3523scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) 3524{ 3525 avl_index_t idx; 3526 int64_t asize = sio->sio_asize; 3527 dsl_scan_t *scn = queue->q_scn; 3528 3529 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3530 3531 if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { 3532 /* block is already scheduled for reading */ 3533 atomic_add_64(&scn->scn_bytes_pending, -asize); 3534 kmem_free(sio, sizeof (*sio)); 3535 return; 3536 } 3537 avl_insert(&queue->q_sios_by_addr, sio, idx); 3538 range_tree_add(queue->q_exts_by_addr, sio->sio_offset, asize); 3539} 3540 3541/* 3542 * Given all the info we got from our metadata scanning process, we 3543 * construct a scan_io_t and insert it into the scan sorting queue. The 3544 * I/O must already be suitable for us to process. This is controlled 3545 * by dsl_scan_enqueue(). 3546 */ 3547static void 3548scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i, 3549 int zio_flags, const zbookmark_phys_t *zb) 3550{ 3551 dsl_scan_t *scn = queue->q_scn; 3552 scan_io_t *sio = kmem_zalloc(sizeof (*sio), KM_SLEEP); 3553 3554 ASSERT0(BP_IS_GANG(bp)); 3555 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3556 3557 bp2sio(bp, sio, dva_i); 3558 sio->sio_flags = zio_flags; 3559 sio->sio_zb = *zb; 3560 3561 /* 3562 * Increment the bytes pending counter now so that we can't 3563 * get an integer underflow in case the worker processes the 3564 * zio before we get to incrementing this counter. 3565 */ 3566 atomic_add_64(&scn->scn_bytes_pending, sio->sio_asize); 3567 3568 scan_io_queue_insert_impl(queue, sio); 3569} 3570 3571/* 3572 * Given a set of I/O parameters as discovered by the metadata traversal 3573 * process, attempts to place the I/O into the sorted queues (if allowed), 3574 * or immediately executes the I/O. 3575 */ 3576static void 3577dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 3578 const zbookmark_phys_t *zb) 3579{ 3580 spa_t *spa = dp->dp_spa; 3581 3582 ASSERT(!BP_IS_EMBEDDED(bp)); 3583 3584 /* 3585 * Gang blocks are hard to issue sequentially, so we just issue them 3586 * here immediately instead of queuing them. 3587 */ 3588 if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) { 3589 scan_exec_io(dp, bp, zio_flags, zb, NULL); 3590 return; 3591 } 3592 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 3593 dva_t dva; 3594 vdev_t *vdev; 3595 3596 dva = bp->blk_dva[i]; 3597 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva)); 3598 ASSERT(vdev != NULL); 3599 3600 mutex_enter(&vdev->vdev_scan_io_queue_lock); 3601 if (vdev->vdev_scan_io_queue == NULL) 3602 vdev->vdev_scan_io_queue = scan_io_queue_create(vdev); 3603 ASSERT(dp->dp_scan != NULL); 3604 scan_io_queue_insert(vdev->vdev_scan_io_queue, bp, 3605 i, zio_flags, zb); 3606 mutex_exit(&vdev->vdev_scan_io_queue_lock); 3607 } 3608} 3609 3610static int 3611dsl_scan_scrub_cb(dsl_pool_t *dp, 3612 const blkptr_t *bp, const zbookmark_phys_t *zb) 3613{ 3614 dsl_scan_t *scn = dp->dp_scan; 3615 spa_t *spa = dp->dp_spa; 3616 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 3617 size_t psize = BP_GET_PSIZE(bp); 3618 boolean_t needs_io; 3619 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 3620 int d; 3621 3622 if (phys_birth <= scn->scn_phys.scn_min_txg || 3623 phys_birth >= scn->scn_phys.scn_max_txg) { 3624 count_block(scn, dp->dp_blkstats, bp); 3625 return (0); 3626 } 3627 3628 /* Embedded BP's have phys_birth==0, so we reject them above. */ 3629 ASSERT(!BP_IS_EMBEDDED(bp)); 3630 3631 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 3632 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 3633 zio_flags |= ZIO_FLAG_SCRUB; 3634 needs_io = B_TRUE; 3635 } else { 3636 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 3637 zio_flags |= ZIO_FLAG_RESILVER; 3638 needs_io = B_FALSE; 3639 } 3640 3641 /* If it's an intent log block, failure is expected. */ 3642 if (zb->zb_level == ZB_ZIL_LEVEL) 3643 zio_flags |= ZIO_FLAG_SPECULATIVE; 3644 3645 for (d = 0; d < BP_GET_NDVAS(bp); d++) { 3646 const dva_t *dva = &bp->blk_dva[d]; 3647 3648 /* 3649 * Keep track of how much data we've examined so that 3650 * zpool(1M) status can make useful progress reports. 3651 */ 3652 scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva); 3653 spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva); 3654 3655 /* if it's a resilver, this may not be in the target range */ 3656 if (!needs_io) 3657 needs_io = dsl_scan_need_resilver(spa, dva, psize, 3658 phys_birth); 3659 } 3660 3661 if (needs_io && !zfs_no_scrub_io) { 3662 dsl_scan_enqueue(dp, bp, zio_flags, zb); 3663 } else { 3664 count_block(scn, dp->dp_blkstats, bp); 3665 } 3666 3667 /* do not relocate this block */ 3668 return (0); 3669} 3670 3671static void 3672dsl_scan_scrub_done(zio_t *zio) 3673{ 3674 spa_t *spa = zio->io_spa; 3675 blkptr_t *bp = zio->io_bp; 3676 dsl_scan_io_queue_t *queue = zio->io_private; 3677 3678 abd_free(zio->io_abd); 3679 3680 if (queue == NULL) { 3681 mutex_enter(&spa->spa_scrub_lock); 3682 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 3683 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 3684 cv_broadcast(&spa->spa_scrub_io_cv); 3685 mutex_exit(&spa->spa_scrub_lock); 3686 } else { 3687 mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock); 3688 ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp)); 3689 queue->q_inflight_bytes -= BP_GET_PSIZE(bp); 3690 cv_broadcast(&queue->q_zio_cv); 3691 mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock); 3692 } 3693 3694 if (zio->io_error && (zio->io_error != ECKSUM || 3695 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 3696 atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors); 3697 } 3698} 3699 3700/* 3701 * Given a scanning zio's information, executes the zio. The zio need 3702 * not necessarily be only sortable, this function simply executes the 3703 * zio, no matter what it is. The optional queue argument allows the 3704 * caller to specify that they want per top level vdev IO rate limiting 3705 * instead of the legacy global limiting. 3706 */ 3707static void 3708scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 3709 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue) 3710{ 3711 spa_t *spa = dp->dp_spa; 3712 dsl_scan_t *scn = dp->dp_scan; 3713 size_t size = BP_GET_PSIZE(bp); 3714 abd_t *data = abd_alloc_for_io(size, B_FALSE); 3715 unsigned int scan_delay = 0; 3716 3717 ASSERT3U(scn->scn_maxinflight_bytes, >, 0); 3718 3719 if (queue == NULL) { 3720 mutex_enter(&spa->spa_scrub_lock); 3721 while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes) 3722 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 3723 spa->spa_scrub_inflight += BP_GET_PSIZE(bp); 3724 mutex_exit(&spa->spa_scrub_lock); 3725 } else { 3726 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 3727 3728 mutex_enter(q_lock); 3729 while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes) 3730 cv_wait(&queue->q_zio_cv, q_lock); 3731 queue->q_inflight_bytes += BP_GET_PSIZE(bp); 3732 mutex_exit(q_lock); 3733 } 3734 3735 if (zio_flags & ZIO_FLAG_RESILVER) 3736 scan_delay = zfs_resilver_delay; 3737 else { 3738 ASSERT(zio_flags & ZIO_FLAG_SCRUB); 3739 scan_delay = zfs_scrub_delay; 3740 } 3741 3742 if (scan_delay && (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle)) 3743 delay(MAX((int)scan_delay, 0)); 3744 3745 count_block(dp->dp_scan, dp->dp_blkstats, bp); 3746 zio_nowait(zio_read(dp->dp_scan->scn_zio_root, spa, bp, data, size, 3747 dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); 3748} 3749 3750/* 3751 * This is the primary extent sorting algorithm. We balance two parameters: 3752 * 1) how many bytes of I/O are in an extent 3753 * 2) how well the extent is filled with I/O (as a fraction of its total size) 3754 * Since we allow extents to have gaps between their constituent I/Os, it's 3755 * possible to have a fairly large extent that contains the same amount of 3756 * I/O bytes than a much smaller extent, which just packs the I/O more tightly. 3757 * The algorithm sorts based on a score calculated from the extent's size, 3758 * the relative fill volume (in %) and a "fill weight" parameter that controls 3759 * the split between whether we prefer larger extents or more well populated 3760 * extents: 3761 * 3762 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT) 3763 * 3764 * Example: 3765 * 1) assume extsz = 64 MiB 3766 * 2) assume fill = 32 MiB (extent is half full) 3767 * 3) assume fill_weight = 3 3768 * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100 3769 * SCORE = 32M + (50 * 3 * 32M) / 100 3770 * SCORE = 32M + (4800M / 100) 3771 * SCORE = 32M + 48M 3772 * ^ ^ 3773 * | +--- final total relative fill-based score 3774 * +--------- final total fill-based score 3775 * SCORE = 80M 3776 * 3777 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards 3778 * extents that are more completely filled (in a 3:2 ratio) vs just larger. 3779 * Note that as an optimization, we replace multiplication and division by 3780 * 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128). 3781 */ 3782static int 3783ext_size_compare(const void *x, const void *y) 3784{ 3785 const range_seg_t *rsa = x, *rsb = y; 3786 uint64_t sa = rsa->rs_end - rsa->rs_start, 3787 sb = rsb->rs_end - rsb->rs_start; 3788 uint64_t score_a, score_b; 3789 3790 score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) * 3791 fill_weight * rsa->rs_fill) >> 7); 3792 score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) * 3793 fill_weight * rsb->rs_fill) >> 7); 3794 3795 if (score_a > score_b) 3796 return (-1); 3797 if (score_a == score_b) { 3798 if (rsa->rs_start < rsb->rs_start) 3799 return (-1); 3800 if (rsa->rs_start == rsb->rs_start) 3801 return (0); 3802 return (1); 3803 } 3804 return (1); 3805} 3806 3807/* 3808 * Comparator for the q_sios_by_addr tree. Sorting is simply performed 3809 * based on LBA-order (from lowest to highest). 3810 */ 3811static int 3812io_addr_compare(const void *x, const void *y) 3813{ 3814 const scan_io_t *a = x, *b = y; 3815 3816 if (a->sio_offset < b->sio_offset) 3817 return (-1); 3818 if (a->sio_offset == b->sio_offset) 3819 return (0); 3820 return (1); 3821} 3822 3823/* IO queues are created on demand when they are needed. */ 3824static dsl_scan_io_queue_t * 3825scan_io_queue_create(vdev_t *vd) 3826{ 3827 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; 3828 dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP); 3829 3830 q->q_scn = scn; 3831 q->q_vd = vd; 3832 cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); 3833 q->q_exts_by_addr = range_tree_create_impl(&rt_avl_ops, 3834 &q->q_exts_by_size, ext_size_compare, zfs_scan_max_ext_gap); 3835 avl_create(&q->q_sios_by_addr, io_addr_compare, 3836 sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); 3837 3838 return (q); 3839} 3840 3841/* 3842 * Destroys a scan queue and all segments and scan_io_t's contained in it. 3843 * No further execution of I/O occurs, anything pending in the queue is 3844 * simply freed without being executed. 3845 */ 3846void 3847dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) 3848{ 3849 dsl_scan_t *scn = queue->q_scn; 3850 scan_io_t *sio; 3851 void *cookie = NULL; 3852 int64_t bytes_dequeued = 0; 3853 3854 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3855 3856 while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != 3857 NULL) { 3858 ASSERT(range_tree_contains(queue->q_exts_by_addr, 3859 sio->sio_offset, sio->sio_asize)); 3860 bytes_dequeued += sio->sio_asize; 3861 kmem_free(sio, sizeof (*sio)); 3862 } 3863 3864 atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued); 3865 range_tree_vacate(queue->q_exts_by_addr, NULL, queue); 3866 range_tree_destroy(queue->q_exts_by_addr); 3867 avl_destroy(&queue->q_sios_by_addr); 3868 cv_destroy(&queue->q_zio_cv); 3869 3870 kmem_free(queue, sizeof (*queue)); 3871} 3872 3873/* 3874 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is 3875 * called on behalf of vdev_top_transfer when creating or destroying 3876 * a mirror vdev due to zpool attach/detach. 3877 */ 3878void 3879dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd) 3880{ 3881 mutex_enter(&svd->vdev_scan_io_queue_lock); 3882 mutex_enter(&tvd->vdev_scan_io_queue_lock); 3883 3884 VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL); 3885 tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue; 3886 svd->vdev_scan_io_queue = NULL; 3887 if (tvd->vdev_scan_io_queue != NULL) 3888 tvd->vdev_scan_io_queue->q_vd = tvd; 3889 3890 mutex_exit(&tvd->vdev_scan_io_queue_lock); 3891 mutex_exit(&svd->vdev_scan_io_queue_lock); 3892} 3893 3894static void 3895scan_io_queues_destroy(dsl_scan_t *scn) 3896{ 3897 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 3898 3899 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 3900 vdev_t *tvd = rvd->vdev_child[i]; 3901 3902 mutex_enter(&tvd->vdev_scan_io_queue_lock); 3903 if (tvd->vdev_scan_io_queue != NULL) 3904 dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue); 3905 tvd->vdev_scan_io_queue = NULL; 3906 mutex_exit(&tvd->vdev_scan_io_queue_lock); 3907 } 3908} 3909 3910static void 3911dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) 3912{ 3913 dsl_pool_t *dp = spa->spa_dsl_pool; 3914 dsl_scan_t *scn = dp->dp_scan; 3915 vdev_t *vdev; 3916 kmutex_t *q_lock; 3917 dsl_scan_io_queue_t *queue; 3918 scan_io_t srch, *sio; 3919 avl_index_t idx; 3920 uint64_t start, size; 3921 3922 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i])); 3923 ASSERT(vdev != NULL); 3924 q_lock = &vdev->vdev_scan_io_queue_lock; 3925 queue = vdev->vdev_scan_io_queue; 3926 3927 mutex_enter(q_lock); 3928 if (queue == NULL) { 3929 mutex_exit(q_lock); 3930 return; 3931 } 3932 3933 bp2sio(bp, &srch, dva_i); 3934 start = srch.sio_offset; 3935 size = srch.sio_asize; 3936 3937 /* 3938 * We can find the zio in two states: 3939 * 1) Cold, just sitting in the queue of zio's to be issued at 3940 * some point in the future. In this case, all we do is 3941 * remove the zio from the q_sios_by_addr tree, decrement 3942 * its data volume from the containing range_seg_t and 3943 * resort the q_exts_by_size tree to reflect that the 3944 * range_seg_t has lost some of its 'fill'. We don't shorten 3945 * the range_seg_t - this is usually rare enough not to be 3946 * worth the extra hassle of trying keep track of precise 3947 * extent boundaries. 3948 * 2) Hot, where the zio is currently in-flight in 3949 * dsl_scan_issue_ios. In this case, we can't simply 3950 * reach in and stop the in-flight zio's, so we instead 3951 * block the caller. Eventually, dsl_scan_issue_ios will 3952 * be done with issuing the zio's it gathered and will 3953 * signal us. 3954 */ 3955 sio = avl_find(&queue->q_sios_by_addr, &srch, &idx); 3956 if (sio != NULL) { 3957 int64_t asize = sio->sio_asize; 3958 blkptr_t tmpbp; 3959 3960 /* Got it while it was cold in the queue */ 3961 ASSERT3U(start, ==, sio->sio_offset); 3962 ASSERT3U(size, ==, asize); 3963 avl_remove(&queue->q_sios_by_addr, sio); 3964 3965 ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); 3966 range_tree_remove_fill(queue->q_exts_by_addr, start, size); 3967 3968 /* 3969 * We only update scn_bytes_pending in the cold path, 3970 * otherwise it will already have been accounted for as 3971 * part of the zio's execution. 3972 */ 3973 atomic_add_64(&scn->scn_bytes_pending, -asize); 3974 3975 /* count the block as though we issued it */ 3976 sio2bp(sio, &tmpbp, dva_i); 3977 count_block(scn, dp->dp_blkstats, &tmpbp); 3978 3979 kmem_free(sio, sizeof (*sio)); 3980 } 3981 mutex_exit(q_lock); 3982} 3983 3984/* 3985 * Callback invoked when a zio_free() zio is executing. This needs to be 3986 * intercepted to prevent the zio from deallocating a particular portion 3987 * of disk space and it then getting reallocated and written to, while we 3988 * still have it queued up for processing. 3989 */ 3990void 3991dsl_scan_freed(spa_t *spa, const blkptr_t *bp) 3992{ 3993 dsl_pool_t *dp = spa->spa_dsl_pool; 3994 dsl_scan_t *scn = dp->dp_scan; 3995 3996 ASSERT(!BP_IS_EMBEDDED(bp)); 3997 ASSERT(scn != NULL); 3998 if (!dsl_scan_is_running(scn)) 3999 return; 4000 4001 for (int i = 0; i < BP_GET_NDVAS(bp); i++) 4002 dsl_scan_freed_dva(spa, bp, i); 4003} 4004