zio.c revision 339106
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28#include <sys/sysmacros.h> 29#include <sys/zfs_context.h> 30#include <sys/fm/fs/zfs.h> 31#include <sys/spa.h> 32#include <sys/txg.h> 33#include <sys/spa_impl.h> 34#include <sys/vdev_impl.h> 35#include <sys/zio_impl.h> 36#include <sys/zio_compress.h> 37#include <sys/zio_checksum.h> 38#include <sys/dmu_objset.h> 39#include <sys/arc.h> 40#include <sys/ddt.h> 41#include <sys/trim_map.h> 42#include <sys/blkptr.h> 43#include <sys/zfeature.h> 44#include <sys/dsl_scan.h> 45#include <sys/metaslab_impl.h> 46#include <sys/abd.h> 47#include <sys/cityhash.h> 48 49SYSCTL_DECL(_vfs_zfs); 50SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO"); 51#if defined(__amd64__) 52static int zio_use_uma = 1; 53#else 54static int zio_use_uma = 0; 55#endif 56SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0, 57 "Use uma(9) for ZIO allocations"); 58static int zio_exclude_metadata = 0; 59SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0, 60 "Exclude metadata buffers from dumps as well"); 61 62zio_trim_stats_t zio_trim_stats = { 63 { "bytes", KSTAT_DATA_UINT64, 64 "Number of bytes successfully TRIMmed" }, 65 { "success", KSTAT_DATA_UINT64, 66 "Number of successful TRIM requests" }, 67 { "unsupported", KSTAT_DATA_UINT64, 68 "Number of TRIM requests that failed because TRIM is not supported" }, 69 { "failed", KSTAT_DATA_UINT64, 70 "Number of TRIM requests that failed for reasons other than not supported" }, 71}; 72 73static kstat_t *zio_trim_ksp; 74 75/* 76 * ========================================================================== 77 * I/O type descriptions 78 * ========================================================================== 79 */ 80const char *zio_type_name[ZIO_TYPES] = { 81 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 82 "zio_ioctl" 83}; 84 85boolean_t zio_dva_throttle_enabled = B_TRUE; 86SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, dva_throttle_enabled, CTLFLAG_RDTUN, 87 &zio_dva_throttle_enabled, 0, ""); 88 89/* 90 * ========================================================================== 91 * I/O kmem caches 92 * ========================================================================== 93 */ 94kmem_cache_t *zio_cache; 95kmem_cache_t *zio_link_cache; 96kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 97kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 98 99#ifdef _KERNEL 100extern vmem_t *zio_alloc_arena; 101#endif 102 103#define ZIO_PIPELINE_CONTINUE 0x100 104#define ZIO_PIPELINE_STOP 0x101 105 106#define BP_SPANB(indblkshift, level) \ 107 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 108#define COMPARE_META_LEVEL 0x80000000ul 109/* 110 * The following actions directly effect the spa's sync-to-convergence logic. 111 * The values below define the sync pass when we start performing the action. 112 * Care should be taken when changing these values as they directly impact 113 * spa_sync() performance. Tuning these values may introduce subtle performance 114 * pathologies and should only be done in the context of performance analysis. 115 * These tunables will eventually be removed and replaced with #defines once 116 * enough analysis has been done to determine optimal values. 117 * 118 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 119 * regular blocks are not deferred. 120 */ 121int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 122SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_deferred_free, CTLFLAG_RDTUN, 123 &zfs_sync_pass_deferred_free, 0, "defer frees starting in this pass"); 124int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 125SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_dont_compress, CTLFLAG_RDTUN, 126 &zfs_sync_pass_dont_compress, 0, "don't compress starting in this pass"); 127int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 128SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_rewrite, CTLFLAG_RDTUN, 129 &zfs_sync_pass_rewrite, 0, "rewrite new bps starting in this pass"); 130 131/* 132 * An allocating zio is one that either currently has the DVA allocate 133 * stage set or will have it later in its lifetime. 134 */ 135#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 136 137boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 138 139#ifdef illumos 140#ifdef ZFS_DEBUG 141int zio_buf_debug_limit = 16384; 142#else 143int zio_buf_debug_limit = 0; 144#endif 145#endif 146 147static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 148 149void 150zio_init(void) 151{ 152 size_t c; 153 zio_cache = kmem_cache_create("zio_cache", 154 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 155 zio_link_cache = kmem_cache_create("zio_link_cache", 156 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 157 if (!zio_use_uma) 158 goto out; 159 160 /* 161 * For small buffers, we want a cache for each multiple of 162 * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 163 * for each quarter-power of 2. 164 */ 165 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 166 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 167 size_t p2 = size; 168 size_t align = 0; 169 int cflags = zio_exclude_metadata ? KMC_NODEBUG : 0; 170 171 while (!ISP2(p2)) 172 p2 &= p2 - 1; 173 174#ifdef illumos 175#ifndef _KERNEL 176 /* 177 * If we are using watchpoints, put each buffer on its own page, 178 * to eliminate the performance overhead of trapping to the 179 * kernel when modifying a non-watched buffer that shares the 180 * page with a watched buffer. 181 */ 182 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 183 continue; 184#endif 185#endif /* illumos */ 186 if (size <= 4 * SPA_MINBLOCKSIZE) { 187 align = SPA_MINBLOCKSIZE; 188 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 189 align = MIN(p2 >> 2, PAGESIZE); 190 } 191 192 if (align != 0) { 193 char name[36]; 194 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 195 zio_buf_cache[c] = kmem_cache_create(name, size, 196 align, NULL, NULL, NULL, NULL, NULL, cflags); 197 198 /* 199 * Since zio_data bufs do not appear in crash dumps, we 200 * pass KMC_NOTOUCH so that no allocator metadata is 201 * stored with the buffers. 202 */ 203 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 204 zio_data_buf_cache[c] = kmem_cache_create(name, size, 205 align, NULL, NULL, NULL, NULL, NULL, 206 cflags | KMC_NOTOUCH | KMC_NODEBUG); 207 } 208 } 209 210 while (--c != 0) { 211 ASSERT(zio_buf_cache[c] != NULL); 212 if (zio_buf_cache[c - 1] == NULL) 213 zio_buf_cache[c - 1] = zio_buf_cache[c]; 214 215 ASSERT(zio_data_buf_cache[c] != NULL); 216 if (zio_data_buf_cache[c - 1] == NULL) 217 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 218 } 219out: 220 221 zio_inject_init(); 222 223 zio_trim_ksp = kstat_create("zfs", 0, "zio_trim", "misc", 224 KSTAT_TYPE_NAMED, 225 sizeof(zio_trim_stats) / sizeof(kstat_named_t), 226 KSTAT_FLAG_VIRTUAL); 227 228 if (zio_trim_ksp != NULL) { 229 zio_trim_ksp->ks_data = &zio_trim_stats; 230 kstat_install(zio_trim_ksp); 231 } 232} 233 234void 235zio_fini(void) 236{ 237 size_t c; 238 kmem_cache_t *last_cache = NULL; 239 kmem_cache_t *last_data_cache = NULL; 240 241 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 242 if (zio_buf_cache[c] != last_cache) { 243 last_cache = zio_buf_cache[c]; 244 kmem_cache_destroy(zio_buf_cache[c]); 245 } 246 zio_buf_cache[c] = NULL; 247 248 if (zio_data_buf_cache[c] != last_data_cache) { 249 last_data_cache = zio_data_buf_cache[c]; 250 kmem_cache_destroy(zio_data_buf_cache[c]); 251 } 252 zio_data_buf_cache[c] = NULL; 253 } 254 255 kmem_cache_destroy(zio_link_cache); 256 kmem_cache_destroy(zio_cache); 257 258 zio_inject_fini(); 259 260 if (zio_trim_ksp != NULL) { 261 kstat_delete(zio_trim_ksp); 262 zio_trim_ksp = NULL; 263 } 264} 265 266/* 267 * ========================================================================== 268 * Allocate and free I/O buffers 269 * ========================================================================== 270 */ 271 272/* 273 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 274 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 275 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 276 * excess / transient data in-core during a crashdump. 277 */ 278void * 279zio_buf_alloc(size_t size) 280{ 281 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 282 int flags = zio_exclude_metadata ? KM_NODEBUG : 0; 283 284 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 285 286 if (zio_use_uma) 287 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 288 else 289 return (kmem_alloc(size, KM_SLEEP|flags)); 290} 291 292/* 293 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 294 * crashdump if the kernel panics. This exists so that we will limit the amount 295 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 296 * of kernel heap dumped to disk when the kernel panics) 297 */ 298void * 299zio_data_buf_alloc(size_t size) 300{ 301 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 302 303 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 304 305 if (zio_use_uma) 306 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 307 else 308 return (kmem_alloc(size, KM_SLEEP | KM_NODEBUG)); 309} 310 311void 312zio_buf_free(void *buf, size_t size) 313{ 314 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 315 316 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 317 318 if (zio_use_uma) 319 kmem_cache_free(zio_buf_cache[c], buf); 320 else 321 kmem_free(buf, size); 322} 323 324void 325zio_data_buf_free(void *buf, size_t size) 326{ 327 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 328 329 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 330 331 if (zio_use_uma) 332 kmem_cache_free(zio_data_buf_cache[c], buf); 333 else 334 kmem_free(buf, size); 335} 336 337/* 338 * ========================================================================== 339 * Push and pop I/O transform buffers 340 * ========================================================================== 341 */ 342void 343zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 344 zio_transform_func_t *transform) 345{ 346 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 347 348 /* 349 * Ensure that anyone expecting this zio to contain a linear ABD isn't 350 * going to get a nasty surprise when they try to access the data. 351 */ 352#ifdef illumos 353 IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data)); 354#else 355 IMPLY(zio->io_abd != NULL && abd_is_linear(zio->io_abd), 356 abd_is_linear(data)); 357#endif 358 359 zt->zt_orig_abd = zio->io_abd; 360 zt->zt_orig_size = zio->io_size; 361 zt->zt_bufsize = bufsize; 362 zt->zt_transform = transform; 363 364 zt->zt_next = zio->io_transform_stack; 365 zio->io_transform_stack = zt; 366 367 zio->io_abd = data; 368 zio->io_size = size; 369} 370 371void 372zio_pop_transforms(zio_t *zio) 373{ 374 zio_transform_t *zt; 375 376 while ((zt = zio->io_transform_stack) != NULL) { 377 if (zt->zt_transform != NULL) 378 zt->zt_transform(zio, 379 zt->zt_orig_abd, zt->zt_orig_size); 380 381 if (zt->zt_bufsize != 0) 382 abd_free(zio->io_abd); 383 384 zio->io_abd = zt->zt_orig_abd; 385 zio->io_size = zt->zt_orig_size; 386 zio->io_transform_stack = zt->zt_next; 387 388 kmem_free(zt, sizeof (zio_transform_t)); 389 } 390} 391 392/* 393 * ========================================================================== 394 * I/O transform callbacks for subblocks and decompression 395 * ========================================================================== 396 */ 397static void 398zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 399{ 400 ASSERT(zio->io_size > size); 401 402 if (zio->io_type == ZIO_TYPE_READ) 403 abd_copy(data, zio->io_abd, size); 404} 405 406static void 407zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 408{ 409 if (zio->io_error == 0) { 410 void *tmp = abd_borrow_buf(data, size); 411 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 412 zio->io_abd, tmp, zio->io_size, size); 413 abd_return_buf_copy(data, tmp, size); 414 415 if (ret != 0) 416 zio->io_error = SET_ERROR(EIO); 417 } 418} 419 420/* 421 * ========================================================================== 422 * I/O parent/child relationships and pipeline interlocks 423 * ========================================================================== 424 */ 425zio_t * 426zio_walk_parents(zio_t *cio, zio_link_t **zl) 427{ 428 list_t *pl = &cio->io_parent_list; 429 430 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 431 if (*zl == NULL) 432 return (NULL); 433 434 ASSERT((*zl)->zl_child == cio); 435 return ((*zl)->zl_parent); 436} 437 438zio_t * 439zio_walk_children(zio_t *pio, zio_link_t **zl) 440{ 441 list_t *cl = &pio->io_child_list; 442 443 ASSERT(MUTEX_HELD(&pio->io_lock)); 444 445 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 446 if (*zl == NULL) 447 return (NULL); 448 449 ASSERT((*zl)->zl_parent == pio); 450 return ((*zl)->zl_child); 451} 452 453zio_t * 454zio_unique_parent(zio_t *cio) 455{ 456 zio_link_t *zl = NULL; 457 zio_t *pio = zio_walk_parents(cio, &zl); 458 459 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 460 return (pio); 461} 462 463void 464zio_add_child(zio_t *pio, zio_t *cio) 465{ 466 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 467 468 /* 469 * Logical I/Os can have logical, gang, or vdev children. 470 * Gang I/Os can have gang or vdev children. 471 * Vdev I/Os can only have vdev children. 472 * The following ASSERT captures all of these constraints. 473 */ 474 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 475 476 zl->zl_parent = pio; 477 zl->zl_child = cio; 478 479 mutex_enter(&pio->io_lock); 480 mutex_enter(&cio->io_lock); 481 482 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 483 484 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 485 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 486 487 list_insert_head(&pio->io_child_list, zl); 488 list_insert_head(&cio->io_parent_list, zl); 489 490 pio->io_child_count++; 491 cio->io_parent_count++; 492 493 mutex_exit(&cio->io_lock); 494 mutex_exit(&pio->io_lock); 495} 496 497static void 498zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 499{ 500 ASSERT(zl->zl_parent == pio); 501 ASSERT(zl->zl_child == cio); 502 503 mutex_enter(&pio->io_lock); 504 mutex_enter(&cio->io_lock); 505 506 list_remove(&pio->io_child_list, zl); 507 list_remove(&cio->io_parent_list, zl); 508 509 pio->io_child_count--; 510 cio->io_parent_count--; 511 512 mutex_exit(&cio->io_lock); 513 mutex_exit(&pio->io_lock); 514 kmem_cache_free(zio_link_cache, zl); 515} 516 517static boolean_t 518zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 519{ 520 boolean_t waiting = B_FALSE; 521 522 mutex_enter(&zio->io_lock); 523 ASSERT(zio->io_stall == NULL); 524 for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 525 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 526 continue; 527 528 uint64_t *countp = &zio->io_children[c][wait]; 529 if (*countp != 0) { 530 zio->io_stage >>= 1; 531 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 532 zio->io_stall = countp; 533 waiting = B_TRUE; 534 break; 535 } 536 } 537 mutex_exit(&zio->io_lock); 538 return (waiting); 539} 540 541static void 542zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 543{ 544 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 545 int *errorp = &pio->io_child_error[zio->io_child_type]; 546 547 mutex_enter(&pio->io_lock); 548 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 549 *errorp = zio_worst_error(*errorp, zio->io_error); 550 pio->io_reexecute |= zio->io_reexecute; 551 ASSERT3U(*countp, >, 0); 552 553 (*countp)--; 554 555 if (*countp == 0 && pio->io_stall == countp) { 556 zio_taskq_type_t type = 557 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 558 ZIO_TASKQ_INTERRUPT; 559 pio->io_stall = NULL; 560 mutex_exit(&pio->io_lock); 561 /* 562 * Dispatch the parent zio in its own taskq so that 563 * the child can continue to make progress. This also 564 * prevents overflowing the stack when we have deeply nested 565 * parent-child relationships. 566 */ 567 zio_taskq_dispatch(pio, type, B_FALSE); 568 } else { 569 mutex_exit(&pio->io_lock); 570 } 571} 572 573static void 574zio_inherit_child_errors(zio_t *zio, enum zio_child c) 575{ 576 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 577 zio->io_error = zio->io_child_error[c]; 578} 579 580int 581zio_bookmark_compare(const void *x1, const void *x2) 582{ 583 const zio_t *z1 = x1; 584 const zio_t *z2 = x2; 585 586 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 587 return (-1); 588 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 589 return (1); 590 591 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 592 return (-1); 593 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 594 return (1); 595 596 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 597 return (-1); 598 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 599 return (1); 600 601 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 602 return (-1); 603 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 604 return (1); 605 606 if (z1 < z2) 607 return (-1); 608 if (z1 > z2) 609 return (1); 610 611 return (0); 612} 613 614/* 615 * ========================================================================== 616 * Create the various types of I/O (read, write, free, etc) 617 * ========================================================================== 618 */ 619static zio_t * 620zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 621 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 622 void *private, zio_type_t type, zio_priority_t priority, 623 enum zio_flag flags, vdev_t *vd, uint64_t offset, 624 const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline) 625{ 626 zio_t *zio; 627 628 ASSERT3U(type == ZIO_TYPE_FREE || psize, <=, SPA_MAXBLOCKSIZE); 629 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 630 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 631 632 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 633 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 634 ASSERT(vd || stage == ZIO_STAGE_OPEN); 635 636 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW) != 0); 637 638 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 639 bzero(zio, sizeof (zio_t)); 640 641 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 642 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 643 644 list_create(&zio->io_parent_list, sizeof (zio_link_t), 645 offsetof(zio_link_t, zl_parent_node)); 646 list_create(&zio->io_child_list, sizeof (zio_link_t), 647 offsetof(zio_link_t, zl_child_node)); 648 metaslab_trace_init(&zio->io_alloc_list); 649 650 if (vd != NULL) 651 zio->io_child_type = ZIO_CHILD_VDEV; 652 else if (flags & ZIO_FLAG_GANG_CHILD) 653 zio->io_child_type = ZIO_CHILD_GANG; 654 else if (flags & ZIO_FLAG_DDT_CHILD) 655 zio->io_child_type = ZIO_CHILD_DDT; 656 else 657 zio->io_child_type = ZIO_CHILD_LOGICAL; 658 659 if (bp != NULL) { 660 zio->io_bp = (blkptr_t *)bp; 661 zio->io_bp_copy = *bp; 662 zio->io_bp_orig = *bp; 663 if (type != ZIO_TYPE_WRITE || 664 zio->io_child_type == ZIO_CHILD_DDT) 665 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 666 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 667 zio->io_logical = zio; 668 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 669 pipeline |= ZIO_GANG_STAGES; 670 } 671 672 zio->io_spa = spa; 673 zio->io_txg = txg; 674 zio->io_done = done; 675 zio->io_private = private; 676 zio->io_type = type; 677 zio->io_priority = priority; 678 zio->io_vd = vd; 679 zio->io_offset = offset; 680 zio->io_orig_abd = zio->io_abd = data; 681 zio->io_orig_size = zio->io_size = psize; 682 zio->io_lsize = lsize; 683 zio->io_orig_flags = zio->io_flags = flags; 684 zio->io_orig_stage = zio->io_stage = stage; 685 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 686 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 687 688 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 689 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 690 691 if (zb != NULL) 692 zio->io_bookmark = *zb; 693 694 if (pio != NULL) { 695 if (zio->io_logical == NULL) 696 zio->io_logical = pio->io_logical; 697 if (zio->io_child_type == ZIO_CHILD_GANG) 698 zio->io_gang_leader = pio->io_gang_leader; 699 zio_add_child(pio, zio); 700 } 701 702 return (zio); 703} 704 705static void 706zio_destroy(zio_t *zio) 707{ 708 metaslab_trace_fini(&zio->io_alloc_list); 709 list_destroy(&zio->io_parent_list); 710 list_destroy(&zio->io_child_list); 711 mutex_destroy(&zio->io_lock); 712 cv_destroy(&zio->io_cv); 713 kmem_cache_free(zio_cache, zio); 714} 715 716zio_t * 717zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 718 void *private, enum zio_flag flags) 719{ 720 zio_t *zio; 721 722 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 723 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 724 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 725 726 return (zio); 727} 728 729zio_t * 730zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 731{ 732 return (zio_null(NULL, spa, NULL, done, private, flags)); 733} 734 735void 736zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 737{ 738 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 739 zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 740 bp, (longlong_t)BP_GET_TYPE(bp)); 741 } 742 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 743 BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 744 zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 745 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 746 } 747 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 748 BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 749 zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 750 bp, (longlong_t)BP_GET_COMPRESS(bp)); 751 } 752 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 753 zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 754 bp, (longlong_t)BP_GET_LSIZE(bp)); 755 } 756 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 757 zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 758 bp, (longlong_t)BP_GET_PSIZE(bp)); 759 } 760 761 if (BP_IS_EMBEDDED(bp)) { 762 if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 763 zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 764 bp, (longlong_t)BPE_GET_ETYPE(bp)); 765 } 766 } 767 768 /* 769 * Do not verify individual DVAs if the config is not trusted. This 770 * will be done once the zio is executed in vdev_mirror_map_alloc. 771 */ 772 if (!spa->spa_trust_config) 773 return; 774 775 /* 776 * Pool-specific checks. 777 * 778 * Note: it would be nice to verify that the blk_birth and 779 * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 780 * allows the birth time of log blocks (and dmu_sync()-ed blocks 781 * that are in the log) to be arbitrarily large. 782 */ 783 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 784 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 785 if (vdevid >= spa->spa_root_vdev->vdev_children) { 786 zfs_panic_recover("blkptr at %p DVA %u has invalid " 787 "VDEV %llu", 788 bp, i, (longlong_t)vdevid); 789 continue; 790 } 791 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 792 if (vd == NULL) { 793 zfs_panic_recover("blkptr at %p DVA %u has invalid " 794 "VDEV %llu", 795 bp, i, (longlong_t)vdevid); 796 continue; 797 } 798 if (vd->vdev_ops == &vdev_hole_ops) { 799 zfs_panic_recover("blkptr at %p DVA %u has hole " 800 "VDEV %llu", 801 bp, i, (longlong_t)vdevid); 802 continue; 803 } 804 if (vd->vdev_ops == &vdev_missing_ops) { 805 /* 806 * "missing" vdevs are valid during import, but we 807 * don't have their detailed info (e.g. asize), so 808 * we can't perform any more checks on them. 809 */ 810 continue; 811 } 812 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 813 uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 814 if (BP_IS_GANG(bp)) 815 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 816 if (offset + asize > vd->vdev_asize) { 817 zfs_panic_recover("blkptr at %p DVA %u has invalid " 818 "OFFSET %llu", 819 bp, i, (longlong_t)offset); 820 } 821 } 822} 823 824boolean_t 825zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 826{ 827 uint64_t vdevid = DVA_GET_VDEV(dva); 828 829 if (vdevid >= spa->spa_root_vdev->vdev_children) 830 return (B_FALSE); 831 832 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 833 if (vd == NULL) 834 return (B_FALSE); 835 836 if (vd->vdev_ops == &vdev_hole_ops) 837 return (B_FALSE); 838 839 if (vd->vdev_ops == &vdev_missing_ops) { 840 return (B_FALSE); 841 } 842 843 uint64_t offset = DVA_GET_OFFSET(dva); 844 uint64_t asize = DVA_GET_ASIZE(dva); 845 846 if (BP_IS_GANG(bp)) 847 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 848 if (offset + asize > vd->vdev_asize) 849 return (B_FALSE); 850 851 return (B_TRUE); 852} 853 854zio_t * 855zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 856 abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 857 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 858{ 859 zio_t *zio; 860 861 zfs_blkptr_verify(spa, bp); 862 863 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 864 data, size, size, done, private, 865 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 866 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 867 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 868 869 return (zio); 870} 871 872zio_t * 873zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 874 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 875 zio_done_func_t *ready, zio_done_func_t *children_ready, 876 zio_done_func_t *physdone, zio_done_func_t *done, 877 void *private, zio_priority_t priority, enum zio_flag flags, 878 const zbookmark_phys_t *zb) 879{ 880 zio_t *zio; 881 882 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 883 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 884 zp->zp_compress >= ZIO_COMPRESS_OFF && 885 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 886 DMU_OT_IS_VALID(zp->zp_type) && 887 zp->zp_level < 32 && 888 zp->zp_copies > 0 && 889 zp->zp_copies <= spa_max_replication(spa)); 890 891 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 892 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 893 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 894 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 895 896 zio->io_ready = ready; 897 zio->io_children_ready = children_ready; 898 zio->io_physdone = physdone; 899 zio->io_prop = *zp; 900 901 /* 902 * Data can be NULL if we are going to call zio_write_override() to 903 * provide the already-allocated BP. But we may need the data to 904 * verify a dedup hit (if requested). In this case, don't try to 905 * dedup (just take the already-allocated BP verbatim). 906 */ 907 if (data == NULL && zio->io_prop.zp_dedup_verify) { 908 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 909 } 910 911 return (zio); 912} 913 914zio_t * 915zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 916 uint64_t size, zio_done_func_t *done, void *private, 917 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 918{ 919 zio_t *zio; 920 921 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 922 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 923 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 924 925 return (zio); 926} 927 928void 929zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 930{ 931 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 932 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 933 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 934 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 935 936 /* 937 * We must reset the io_prop to match the values that existed 938 * when the bp was first written by dmu_sync() keeping in mind 939 * that nopwrite and dedup are mutually exclusive. 940 */ 941 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 942 zio->io_prop.zp_nopwrite = nopwrite; 943 zio->io_prop.zp_copies = copies; 944 zio->io_bp_override = bp; 945} 946 947void 948zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 949{ 950 951 zfs_blkptr_verify(spa, bp); 952 953 /* 954 * The check for EMBEDDED is a performance optimization. We 955 * process the free here (by ignoring it) rather than 956 * putting it on the list and then processing it in zio_free_sync(). 957 */ 958 if (BP_IS_EMBEDDED(bp)) 959 return; 960 metaslab_check_free(spa, bp); 961 962 /* 963 * Frees that are for the currently-syncing txg, are not going to be 964 * deferred, and which will not need to do a read (i.e. not GANG or 965 * DEDUP), can be processed immediately. Otherwise, put them on the 966 * in-memory list for later processing. 967 */ 968 if (zfs_trim_enabled || BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 969 txg != spa->spa_syncing_txg || 970 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 971 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 972 } else { 973 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 974 BP_GET_PSIZE(bp), 0))); 975 } 976} 977 978zio_t * 979zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 980 uint64_t size, enum zio_flag flags) 981{ 982 zio_t *zio; 983 enum zio_stage stage = ZIO_FREE_PIPELINE; 984 985 ASSERT(!BP_IS_HOLE(bp)); 986 ASSERT(spa_syncing_txg(spa) == txg); 987 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 988 989 if (BP_IS_EMBEDDED(bp)) 990 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 991 992 metaslab_check_free(spa, bp); 993 arc_freed(spa, bp); 994 dsl_scan_freed(spa, bp); 995 996 if (zfs_trim_enabled) 997 stage |= ZIO_STAGE_ISSUE_ASYNC | ZIO_STAGE_VDEV_IO_START | 998 ZIO_STAGE_VDEV_IO_ASSESS; 999 /* 1000 * GANG and DEDUP blocks can induce a read (for the gang block header, 1001 * or the DDT), so issue them asynchronously so that this thread is 1002 * not tied up. 1003 */ 1004 else if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 1005 stage |= ZIO_STAGE_ISSUE_ASYNC; 1006 1007 flags |= ZIO_FLAG_DONT_QUEUE; 1008 1009 zio = zio_create(pio, spa, txg, bp, NULL, size, 1010 size, NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 1011 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 1012 1013 return (zio); 1014} 1015 1016zio_t * 1017zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1018 zio_done_func_t *done, void *private, enum zio_flag flags) 1019{ 1020 zio_t *zio; 1021 1022 zfs_blkptr_verify(spa, bp); 1023 1024 if (BP_IS_EMBEDDED(bp)) 1025 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 1026 1027 /* 1028 * A claim is an allocation of a specific block. Claims are needed 1029 * to support immediate writes in the intent log. The issue is that 1030 * immediate writes contain committed data, but in a txg that was 1031 * *not* committed. Upon opening the pool after an unclean shutdown, 1032 * the intent log claims all blocks that contain immediate write data 1033 * so that the SPA knows they're in use. 1034 * 1035 * All claims *must* be resolved in the first txg -- before the SPA 1036 * starts allocating blocks -- so that nothing is allocated twice. 1037 * If txg == 0 we just verify that the block is claimable. 1038 */ 1039 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, 1040 spa_min_claim_txg(spa)); 1041 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 1042 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 1043 1044 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1045 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 1046 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 1047 ASSERT0(zio->io_queued_timestamp); 1048 1049 return (zio); 1050} 1051 1052zio_t * 1053zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, uint64_t offset, 1054 uint64_t size, zio_done_func_t *done, void *private, 1055 zio_priority_t priority, enum zio_flag flags) 1056{ 1057 zio_t *zio; 1058 int c; 1059 1060 if (vd->vdev_children == 0) { 1061 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 1062 ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 1063 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 1064 1065 zio->io_cmd = cmd; 1066 } else { 1067 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 1068 1069 for (c = 0; c < vd->vdev_children; c++) 1070 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 1071 offset, size, done, private, priority, flags)); 1072 } 1073 1074 return (zio); 1075} 1076 1077zio_t * 1078zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1079 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1080 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1081{ 1082 zio_t *zio; 1083 1084 ASSERT(vd->vdev_children == 0); 1085 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1086 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1087 ASSERT3U(offset + size, <=, vd->vdev_psize); 1088 1089 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1090 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1091 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1092 1093 zio->io_prop.zp_checksum = checksum; 1094 1095 return (zio); 1096} 1097 1098zio_t * 1099zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1100 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1101 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1102{ 1103 zio_t *zio; 1104 1105 ASSERT(vd->vdev_children == 0); 1106 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1107 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1108 ASSERT3U(offset + size, <=, vd->vdev_psize); 1109 1110 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1111 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1112 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1113 1114 zio->io_prop.zp_checksum = checksum; 1115 1116 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1117 /* 1118 * zec checksums are necessarily destructive -- they modify 1119 * the end of the write buffer to hold the verifier/checksum. 1120 * Therefore, we must make a local copy in case the data is 1121 * being written to multiple places in parallel. 1122 */ 1123 abd_t *wbuf = abd_alloc_sametype(data, size); 1124 abd_copy(wbuf, data, size); 1125 1126 zio_push_transform(zio, wbuf, size, size, NULL); 1127 } 1128 1129 return (zio); 1130} 1131 1132/* 1133 * Create a child I/O to do some work for us. 1134 */ 1135zio_t * 1136zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1137 abd_t *data, uint64_t size, int type, zio_priority_t priority, 1138 enum zio_flag flags, zio_done_func_t *done, void *private) 1139{ 1140 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1141 zio_t *zio; 1142 1143 /* 1144 * vdev child I/Os do not propagate their error to the parent. 1145 * Therefore, for correct operation the caller *must* check for 1146 * and handle the error in the child i/o's done callback. 1147 * The only exceptions are i/os that we don't care about 1148 * (OPTIONAL or REPAIR). 1149 */ 1150 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 1151 done != NULL); 1152 1153 if (type == ZIO_TYPE_READ && bp != NULL) { 1154 /* 1155 * If we have the bp, then the child should perform the 1156 * checksum and the parent need not. This pushes error 1157 * detection as close to the leaves as possible and 1158 * eliminates redundant checksums in the interior nodes. 1159 */ 1160 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1161 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1162 } 1163 1164 /* Not all IO types require vdev io done stage e.g. free */ 1165 if (!(pio->io_pipeline & ZIO_STAGE_VDEV_IO_DONE)) 1166 pipeline &= ~ZIO_STAGE_VDEV_IO_DONE; 1167 1168 if (vd->vdev_ops->vdev_op_leaf) { 1169 ASSERT0(vd->vdev_children); 1170 offset += VDEV_LABEL_START_SIZE; 1171 } 1172 1173 flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1174 1175 /* 1176 * If we've decided to do a repair, the write is not speculative -- 1177 * even if the original read was. 1178 */ 1179 if (flags & ZIO_FLAG_IO_REPAIR) 1180 flags &= ~ZIO_FLAG_SPECULATIVE; 1181 1182 /* 1183 * If we're creating a child I/O that is not associated with a 1184 * top-level vdev, then the child zio is not an allocating I/O. 1185 * If this is a retried I/O then we ignore it since we will 1186 * have already processed the original allocating I/O. 1187 */ 1188 if (flags & ZIO_FLAG_IO_ALLOCATING && 1189 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1190 metaslab_class_t *mc = spa_normal_class(pio->io_spa); 1191 1192 ASSERT(mc->mc_alloc_throttle_enabled); 1193 ASSERT(type == ZIO_TYPE_WRITE); 1194 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1195 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1196 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1197 pio->io_child_type == ZIO_CHILD_GANG); 1198 1199 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1200 } 1201 1202 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1203 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1204 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1205 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1206 1207 zio->io_physdone = pio->io_physdone; 1208 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 1209 zio->io_logical->io_phys_children++; 1210 1211 return (zio); 1212} 1213 1214zio_t * 1215zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 1216 zio_type_t type, zio_priority_t priority, enum zio_flag flags, 1217 zio_done_func_t *done, void *private) 1218{ 1219 zio_t *zio; 1220 1221 ASSERT(vd->vdev_ops->vdev_op_leaf); 1222 1223 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1224 data, size, size, done, private, type, priority, 1225 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1226 vd, offset, NULL, 1227 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1228 1229 return (zio); 1230} 1231 1232void 1233zio_flush(zio_t *zio, vdev_t *vd) 1234{ 1235 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 0, 0, 1236 NULL, NULL, ZIO_PRIORITY_NOW, 1237 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1238} 1239 1240zio_t * 1241zio_trim(zio_t *zio, spa_t *spa, vdev_t *vd, uint64_t offset, uint64_t size) 1242{ 1243 1244 ASSERT(vd->vdev_ops->vdev_op_leaf); 1245 1246 return (zio_create(zio, spa, 0, NULL, NULL, size, size, NULL, NULL, 1247 ZIO_TYPE_FREE, ZIO_PRIORITY_TRIM, ZIO_FLAG_DONT_AGGREGATE | 1248 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, 1249 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PHYS_PIPELINE)); 1250} 1251 1252void 1253zio_shrink(zio_t *zio, uint64_t size) 1254{ 1255 ASSERT3P(zio->io_executor, ==, NULL); 1256 ASSERT3P(zio->io_orig_size, ==, zio->io_size); 1257 ASSERT3U(size, <=, zio->io_size); 1258 1259 /* 1260 * We don't shrink for raidz because of problems with the 1261 * reconstruction when reading back less than the block size. 1262 * Note, BP_IS_RAIDZ() assumes no compression. 1263 */ 1264 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1265 if (!BP_IS_RAIDZ(zio->io_bp)) { 1266 /* we are not doing a raw write */ 1267 ASSERT3U(zio->io_size, ==, zio->io_lsize); 1268 zio->io_orig_size = zio->io_size = zio->io_lsize = size; 1269 } 1270} 1271 1272/* 1273 * ========================================================================== 1274 * Prepare to read and write logical blocks 1275 * ========================================================================== 1276 */ 1277 1278static int 1279zio_read_bp_init(zio_t *zio) 1280{ 1281 blkptr_t *bp = zio->io_bp; 1282 1283 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1284 1285 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1286 zio->io_child_type == ZIO_CHILD_LOGICAL && 1287 !(zio->io_flags & ZIO_FLAG_RAW)) { 1288 uint64_t psize = 1289 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1290 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1291 psize, psize, zio_decompress); 1292 } 1293 1294 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1295 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1296 1297 int psize = BPE_GET_PSIZE(bp); 1298 void *data = abd_borrow_buf(zio->io_abd, psize); 1299 decode_embedded_bp_compressed(bp, data); 1300 abd_return_buf_copy(zio->io_abd, data, psize); 1301 } else { 1302 ASSERT(!BP_IS_EMBEDDED(bp)); 1303 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1304 } 1305 1306 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1307 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1308 1309 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1310 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1311 1312 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1313 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1314 1315 return (ZIO_PIPELINE_CONTINUE); 1316} 1317 1318static int 1319zio_write_bp_init(zio_t *zio) 1320{ 1321 if (!IO_IS_ALLOCATING(zio)) 1322 return (ZIO_PIPELINE_CONTINUE); 1323 1324 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1325 1326 if (zio->io_bp_override) { 1327 blkptr_t *bp = zio->io_bp; 1328 zio_prop_t *zp = &zio->io_prop; 1329 1330 ASSERT(bp->blk_birth != zio->io_txg); 1331 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1332 1333 *bp = *zio->io_bp_override; 1334 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1335 1336 if (BP_IS_EMBEDDED(bp)) 1337 return (ZIO_PIPELINE_CONTINUE); 1338 1339 /* 1340 * If we've been overridden and nopwrite is set then 1341 * set the flag accordingly to indicate that a nopwrite 1342 * has already occurred. 1343 */ 1344 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1345 ASSERT(!zp->zp_dedup); 1346 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1347 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1348 return (ZIO_PIPELINE_CONTINUE); 1349 } 1350 1351 ASSERT(!zp->zp_nopwrite); 1352 1353 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1354 return (ZIO_PIPELINE_CONTINUE); 1355 1356 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1357 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1358 1359 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1360 BP_SET_DEDUP(bp, 1); 1361 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1362 return (ZIO_PIPELINE_CONTINUE); 1363 } 1364 1365 /* 1366 * We were unable to handle this as an override bp, treat 1367 * it as a regular write I/O. 1368 */ 1369 zio->io_bp_override = NULL; 1370 *bp = zio->io_bp_orig; 1371 zio->io_pipeline = zio->io_orig_pipeline; 1372 } 1373 1374 return (ZIO_PIPELINE_CONTINUE); 1375} 1376 1377static int 1378zio_write_compress(zio_t *zio) 1379{ 1380 spa_t *spa = zio->io_spa; 1381 zio_prop_t *zp = &zio->io_prop; 1382 enum zio_compress compress = zp->zp_compress; 1383 blkptr_t *bp = zio->io_bp; 1384 uint64_t lsize = zio->io_lsize; 1385 uint64_t psize = zio->io_size; 1386 int pass = 1; 1387 1388 EQUIV(lsize != psize, (zio->io_flags & ZIO_FLAG_RAW) != 0); 1389 1390 /* 1391 * If our children haven't all reached the ready stage, 1392 * wait for them and then repeat this pipeline stage. 1393 */ 1394 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1395 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 1396 return (ZIO_PIPELINE_STOP); 1397 } 1398 1399 if (!IO_IS_ALLOCATING(zio)) 1400 return (ZIO_PIPELINE_CONTINUE); 1401 1402 if (zio->io_children_ready != NULL) { 1403 /* 1404 * Now that all our children are ready, run the callback 1405 * associated with this zio in case it wants to modify the 1406 * data to be written. 1407 */ 1408 ASSERT3U(zp->zp_level, >, 0); 1409 zio->io_children_ready(zio); 1410 } 1411 1412 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1413 ASSERT(zio->io_bp_override == NULL); 1414 1415 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1416 /* 1417 * We're rewriting an existing block, which means we're 1418 * working on behalf of spa_sync(). For spa_sync() to 1419 * converge, it must eventually be the case that we don't 1420 * have to allocate new blocks. But compression changes 1421 * the blocksize, which forces a reallocate, and makes 1422 * convergence take longer. Therefore, after the first 1423 * few passes, stop compressing to ensure convergence. 1424 */ 1425 pass = spa_sync_pass(spa); 1426 1427 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1428 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1429 ASSERT(!BP_GET_DEDUP(bp)); 1430 1431 if (pass >= zfs_sync_pass_dont_compress) 1432 compress = ZIO_COMPRESS_OFF; 1433 1434 /* Make sure someone doesn't change their mind on overwrites */ 1435 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1436 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1437 } 1438 1439 /* If it's a compressed write that is not raw, compress the buffer. */ 1440 if (compress != ZIO_COMPRESS_OFF && psize == lsize) { 1441 void *cbuf = zio_buf_alloc(lsize); 1442 psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize); 1443 if (psize == 0 || psize == lsize) { 1444 compress = ZIO_COMPRESS_OFF; 1445 zio_buf_free(cbuf, lsize); 1446 } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && 1447 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1448 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1449 encode_embedded_bp_compressed(bp, 1450 cbuf, compress, lsize, psize); 1451 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1452 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1453 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1454 zio_buf_free(cbuf, lsize); 1455 bp->blk_birth = zio->io_txg; 1456 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1457 ASSERT(spa_feature_is_active(spa, 1458 SPA_FEATURE_EMBEDDED_DATA)); 1459 return (ZIO_PIPELINE_CONTINUE); 1460 } else { 1461 /* 1462 * Round up compressed size up to the ashift 1463 * of the smallest-ashift device, and zero the tail. 1464 * This ensures that the compressed size of the BP 1465 * (and thus compressratio property) are correct, 1466 * in that we charge for the padding used to fill out 1467 * the last sector. 1468 */ 1469 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 1470 size_t rounded = (size_t)P2ROUNDUP(psize, 1471 1ULL << spa->spa_min_ashift); 1472 if (rounded >= lsize) { 1473 compress = ZIO_COMPRESS_OFF; 1474 zio_buf_free(cbuf, lsize); 1475 psize = lsize; 1476 } else { 1477 abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1478 abd_take_ownership_of_buf(cdata, B_TRUE); 1479 abd_zero_off(cdata, psize, rounded - psize); 1480 psize = rounded; 1481 zio_push_transform(zio, cdata, 1482 psize, lsize, NULL); 1483 } 1484 } 1485 1486 /* 1487 * We were unable to handle this as an override bp, treat 1488 * it as a regular write I/O. 1489 */ 1490 zio->io_bp_override = NULL; 1491 *bp = zio->io_bp_orig; 1492 zio->io_pipeline = zio->io_orig_pipeline; 1493 } else { 1494 ASSERT3U(psize, !=, 0); 1495 } 1496 1497 /* 1498 * The final pass of spa_sync() must be all rewrites, but the first 1499 * few passes offer a trade-off: allocating blocks defers convergence, 1500 * but newly allocated blocks are sequential, so they can be written 1501 * to disk faster. Therefore, we allow the first few passes of 1502 * spa_sync() to allocate new blocks, but force rewrites after that. 1503 * There should only be a handful of blocks after pass 1 in any case. 1504 */ 1505 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1506 BP_GET_PSIZE(bp) == psize && 1507 pass >= zfs_sync_pass_rewrite) { 1508 ASSERT(psize != 0); 1509 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1510 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1511 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1512 } else { 1513 BP_ZERO(bp); 1514 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1515 } 1516 1517 if (psize == 0) { 1518 if (zio->io_bp_orig.blk_birth != 0 && 1519 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1520 BP_SET_LSIZE(bp, lsize); 1521 BP_SET_TYPE(bp, zp->zp_type); 1522 BP_SET_LEVEL(bp, zp->zp_level); 1523 BP_SET_BIRTH(bp, zio->io_txg, 0); 1524 } 1525 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1526 } else { 1527 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1528 BP_SET_LSIZE(bp, lsize); 1529 BP_SET_TYPE(bp, zp->zp_type); 1530 BP_SET_LEVEL(bp, zp->zp_level); 1531 BP_SET_PSIZE(bp, psize); 1532 BP_SET_COMPRESS(bp, compress); 1533 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1534 BP_SET_DEDUP(bp, zp->zp_dedup); 1535 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1536 if (zp->zp_dedup) { 1537 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1538 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1539 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1540 } 1541 if (zp->zp_nopwrite) { 1542 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1543 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1544 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1545 } 1546 } 1547 return (ZIO_PIPELINE_CONTINUE); 1548} 1549 1550static int 1551zio_free_bp_init(zio_t *zio) 1552{ 1553 blkptr_t *bp = zio->io_bp; 1554 1555 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1556 if (BP_GET_DEDUP(bp)) 1557 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1558 } 1559 1560 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1561 1562 return (ZIO_PIPELINE_CONTINUE); 1563} 1564 1565/* 1566 * ========================================================================== 1567 * Execute the I/O pipeline 1568 * ========================================================================== 1569 */ 1570 1571static void 1572zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1573{ 1574 spa_t *spa = zio->io_spa; 1575 zio_type_t t = zio->io_type; 1576 int flags = (cutinline ? TQ_FRONT : 0); 1577 1578 ASSERT(q == ZIO_TASKQ_ISSUE || q == ZIO_TASKQ_INTERRUPT); 1579 1580 /* 1581 * If we're a config writer or a probe, the normal issue and 1582 * interrupt threads may all be blocked waiting for the config lock. 1583 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1584 */ 1585 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1586 t = ZIO_TYPE_NULL; 1587 1588 /* 1589 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1590 */ 1591 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1592 t = ZIO_TYPE_NULL; 1593 1594 /* 1595 * If this is a high priority I/O, then use the high priority taskq if 1596 * available. 1597 */ 1598 if (zio->io_priority == ZIO_PRIORITY_NOW && 1599 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1600 q++; 1601 1602 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1603 1604 /* 1605 * NB: We are assuming that the zio can only be dispatched 1606 * to a single taskq at a time. It would be a grievous error 1607 * to dispatch the zio to another taskq at the same time. 1608 */ 1609#if defined(illumos) || !defined(_KERNEL) 1610 ASSERT(zio->io_tqent.tqent_next == NULL); 1611#else 1612 ASSERT(zio->io_tqent.tqent_task.ta_pending == 0); 1613#endif 1614 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1615 flags, &zio->io_tqent); 1616} 1617 1618static boolean_t 1619zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1620{ 1621 kthread_t *executor = zio->io_executor; 1622 spa_t *spa = zio->io_spa; 1623 1624 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1625 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1626 uint_t i; 1627 for (i = 0; i < tqs->stqs_count; i++) { 1628 if (taskq_member(tqs->stqs_taskq[i], executor)) 1629 return (B_TRUE); 1630 } 1631 } 1632 1633 return (B_FALSE); 1634} 1635 1636static int 1637zio_issue_async(zio_t *zio) 1638{ 1639 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1640 1641 return (ZIO_PIPELINE_STOP); 1642} 1643 1644void 1645zio_interrupt(zio_t *zio) 1646{ 1647 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1648} 1649 1650void 1651zio_delay_interrupt(zio_t *zio) 1652{ 1653 /* 1654 * The timeout_generic() function isn't defined in userspace, so 1655 * rather than trying to implement the function, the zio delay 1656 * functionality has been disabled for userspace builds. 1657 */ 1658 1659#ifdef _KERNEL 1660 /* 1661 * If io_target_timestamp is zero, then no delay has been registered 1662 * for this IO, thus jump to the end of this function and "skip" the 1663 * delay; issuing it directly to the zio layer. 1664 */ 1665 if (zio->io_target_timestamp != 0) { 1666 hrtime_t now = gethrtime(); 1667 1668 if (now >= zio->io_target_timestamp) { 1669 /* 1670 * This IO has already taken longer than the target 1671 * delay to complete, so we don't want to delay it 1672 * any longer; we "miss" the delay and issue it 1673 * directly to the zio layer. This is likely due to 1674 * the target latency being set to a value less than 1675 * the underlying hardware can satisfy (e.g. delay 1676 * set to 1ms, but the disks take 10ms to complete an 1677 * IO request). 1678 */ 1679 1680 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 1681 hrtime_t, now); 1682 1683 zio_interrupt(zio); 1684 } else { 1685 hrtime_t diff = zio->io_target_timestamp - now; 1686 1687 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 1688 hrtime_t, now, hrtime_t, diff); 1689 1690 (void) timeout_generic(CALLOUT_NORMAL, 1691 (void (*)(void *))zio_interrupt, zio, diff, 1, 0); 1692 } 1693 1694 return; 1695 } 1696#endif 1697 1698 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 1699 zio_interrupt(zio); 1700} 1701 1702/* 1703 * Execute the I/O pipeline until one of the following occurs: 1704 * 1705 * (1) the I/O completes 1706 * (2) the pipeline stalls waiting for dependent child I/Os 1707 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1708 * (4) the I/O is delegated by vdev-level caching or aggregation 1709 * (5) the I/O is deferred due to vdev-level queueing 1710 * (6) the I/O is handed off to another thread. 1711 * 1712 * In all cases, the pipeline stops whenever there's no CPU work; it never 1713 * burns a thread in cv_wait(). 1714 * 1715 * There's no locking on io_stage because there's no legitimate way 1716 * for multiple threads to be attempting to process the same I/O. 1717 */ 1718static zio_pipe_stage_t *zio_pipeline[]; 1719 1720void 1721zio_execute(zio_t *zio) 1722{ 1723 zio->io_executor = curthread; 1724 1725 ASSERT3U(zio->io_queued_timestamp, >, 0); 1726 1727 while (zio->io_stage < ZIO_STAGE_DONE) { 1728 enum zio_stage pipeline = zio->io_pipeline; 1729 enum zio_stage stage = zio->io_stage; 1730 int rv; 1731 1732 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1733 ASSERT(ISP2(stage)); 1734 ASSERT(zio->io_stall == NULL); 1735 1736 do { 1737 stage <<= 1; 1738 } while ((stage & pipeline) == 0); 1739 1740 ASSERT(stage <= ZIO_STAGE_DONE); 1741 1742 /* 1743 * If we are in interrupt context and this pipeline stage 1744 * will grab a config lock that is held across I/O, 1745 * or may wait for an I/O that needs an interrupt thread 1746 * to complete, issue async to avoid deadlock. 1747 * 1748 * For VDEV_IO_START, we cut in line so that the io will 1749 * be sent to disk promptly. 1750 */ 1751 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1752 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1753 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1754 zio_requeue_io_start_cut_in_line : B_FALSE; 1755 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1756 return; 1757 } 1758 1759 zio->io_stage = stage; 1760 zio->io_pipeline_trace |= zio->io_stage; 1761 rv = zio_pipeline[highbit64(stage) - 1](zio); 1762 1763 if (rv == ZIO_PIPELINE_STOP) 1764 return; 1765 1766 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1767 } 1768} 1769 1770/* 1771 * ========================================================================== 1772 * Initiate I/O, either sync or async 1773 * ========================================================================== 1774 */ 1775int 1776zio_wait(zio_t *zio) 1777{ 1778 int error; 1779 1780 ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN); 1781 ASSERT3P(zio->io_executor, ==, NULL); 1782 1783 zio->io_waiter = curthread; 1784 ASSERT0(zio->io_queued_timestamp); 1785 zio->io_queued_timestamp = gethrtime(); 1786 1787 zio_execute(zio); 1788 1789 mutex_enter(&zio->io_lock); 1790 while (zio->io_executor != NULL) 1791 cv_wait(&zio->io_cv, &zio->io_lock); 1792 mutex_exit(&zio->io_lock); 1793 1794 error = zio->io_error; 1795 zio_destroy(zio); 1796 1797 return (error); 1798} 1799 1800void 1801zio_nowait(zio_t *zio) 1802{ 1803 ASSERT3P(zio->io_executor, ==, NULL); 1804 1805 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1806 zio_unique_parent(zio) == NULL) { 1807 /* 1808 * This is a logical async I/O with no parent to wait for it. 1809 * We add it to the spa_async_root_zio "Godfather" I/O which 1810 * will ensure they complete prior to unloading the pool. 1811 */ 1812 spa_t *spa = zio->io_spa; 1813 1814 zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1815 } 1816 1817 ASSERT0(zio->io_queued_timestamp); 1818 zio->io_queued_timestamp = gethrtime(); 1819 zio_execute(zio); 1820} 1821 1822/* 1823 * ========================================================================== 1824 * Reexecute, cancel, or suspend/resume failed I/O 1825 * ========================================================================== 1826 */ 1827 1828static void 1829zio_reexecute(zio_t *pio) 1830{ 1831 zio_t *cio, *cio_next; 1832 1833 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1834 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1835 ASSERT(pio->io_gang_leader == NULL); 1836 ASSERT(pio->io_gang_tree == NULL); 1837 1838 pio->io_flags = pio->io_orig_flags; 1839 pio->io_stage = pio->io_orig_stage; 1840 pio->io_pipeline = pio->io_orig_pipeline; 1841 pio->io_reexecute = 0; 1842 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1843 pio->io_pipeline_trace = 0; 1844 pio->io_error = 0; 1845 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1846 pio->io_state[w] = 0; 1847 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1848 pio->io_child_error[c] = 0; 1849 1850 if (IO_IS_ALLOCATING(pio)) 1851 BP_ZERO(pio->io_bp); 1852 1853 /* 1854 * As we reexecute pio's children, new children could be created. 1855 * New children go to the head of pio's io_child_list, however, 1856 * so we will (correctly) not reexecute them. The key is that 1857 * the remainder of pio's io_child_list, from 'cio_next' onward, 1858 * cannot be affected by any side effects of reexecuting 'cio'. 1859 */ 1860 zio_link_t *zl = NULL; 1861 mutex_enter(&pio->io_lock); 1862 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 1863 cio_next = zio_walk_children(pio, &zl); 1864 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1865 pio->io_children[cio->io_child_type][w]++; 1866 mutex_exit(&pio->io_lock); 1867 zio_reexecute(cio); 1868 mutex_enter(&pio->io_lock); 1869 } 1870 mutex_exit(&pio->io_lock); 1871 1872 /* 1873 * Now that all children have been reexecuted, execute the parent. 1874 * We don't reexecute "The Godfather" I/O here as it's the 1875 * responsibility of the caller to wait on it. 1876 */ 1877 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 1878 pio->io_queued_timestamp = gethrtime(); 1879 zio_execute(pio); 1880 } 1881} 1882 1883void 1884zio_suspend(spa_t *spa, zio_t *zio) 1885{ 1886 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1887 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1888 "failure and the failure mode property for this pool " 1889 "is set to panic.", spa_name(spa)); 1890 1891 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1892 1893 mutex_enter(&spa->spa_suspend_lock); 1894 1895 if (spa->spa_suspend_zio_root == NULL) 1896 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1897 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1898 ZIO_FLAG_GODFATHER); 1899 1900 spa->spa_suspended = B_TRUE; 1901 1902 if (zio != NULL) { 1903 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1904 ASSERT(zio != spa->spa_suspend_zio_root); 1905 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1906 ASSERT(zio_unique_parent(zio) == NULL); 1907 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1908 zio_add_child(spa->spa_suspend_zio_root, zio); 1909 } 1910 1911 mutex_exit(&spa->spa_suspend_lock); 1912} 1913 1914int 1915zio_resume(spa_t *spa) 1916{ 1917 zio_t *pio; 1918 1919 /* 1920 * Reexecute all previously suspended i/o. 1921 */ 1922 mutex_enter(&spa->spa_suspend_lock); 1923 spa->spa_suspended = B_FALSE; 1924 cv_broadcast(&spa->spa_suspend_cv); 1925 pio = spa->spa_suspend_zio_root; 1926 spa->spa_suspend_zio_root = NULL; 1927 mutex_exit(&spa->spa_suspend_lock); 1928 1929 if (pio == NULL) 1930 return (0); 1931 1932 zio_reexecute(pio); 1933 return (zio_wait(pio)); 1934} 1935 1936void 1937zio_resume_wait(spa_t *spa) 1938{ 1939 mutex_enter(&spa->spa_suspend_lock); 1940 while (spa_suspended(spa)) 1941 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1942 mutex_exit(&spa->spa_suspend_lock); 1943} 1944 1945/* 1946 * ========================================================================== 1947 * Gang blocks. 1948 * 1949 * A gang block is a collection of small blocks that looks to the DMU 1950 * like one large block. When zio_dva_allocate() cannot find a block 1951 * of the requested size, due to either severe fragmentation or the pool 1952 * being nearly full, it calls zio_write_gang_block() to construct the 1953 * block from smaller fragments. 1954 * 1955 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1956 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1957 * an indirect block: it's an array of block pointers. It consumes 1958 * only one sector and hence is allocatable regardless of fragmentation. 1959 * The gang header's bps point to its gang members, which hold the data. 1960 * 1961 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1962 * as the verifier to ensure uniqueness of the SHA256 checksum. 1963 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1964 * not the gang header. This ensures that data block signatures (needed for 1965 * deduplication) are independent of how the block is physically stored. 1966 * 1967 * Gang blocks can be nested: a gang member may itself be a gang block. 1968 * Thus every gang block is a tree in which root and all interior nodes are 1969 * gang headers, and the leaves are normal blocks that contain user data. 1970 * The root of the gang tree is called the gang leader. 1971 * 1972 * To perform any operation (read, rewrite, free, claim) on a gang block, 1973 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1974 * in the io_gang_tree field of the original logical i/o by recursively 1975 * reading the gang leader and all gang headers below it. This yields 1976 * an in-core tree containing the contents of every gang header and the 1977 * bps for every constituent of the gang block. 1978 * 1979 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1980 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1981 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1982 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1983 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1984 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1985 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1986 * of the gang header plus zio_checksum_compute() of the data to update the 1987 * gang header's blk_cksum as described above. 1988 * 1989 * The two-phase assemble/issue model solves the problem of partial failure -- 1990 * what if you'd freed part of a gang block but then couldn't read the 1991 * gang header for another part? Assembling the entire gang tree first 1992 * ensures that all the necessary gang header I/O has succeeded before 1993 * starting the actual work of free, claim, or write. Once the gang tree 1994 * is assembled, free and claim are in-memory operations that cannot fail. 1995 * 1996 * In the event that a gang write fails, zio_dva_unallocate() walks the 1997 * gang tree to immediately free (i.e. insert back into the space map) 1998 * everything we've allocated. This ensures that we don't get ENOSPC 1999 * errors during repeated suspend/resume cycles due to a flaky device. 2000 * 2001 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 2002 * the gang tree, we won't modify the block, so we can safely defer the free 2003 * (knowing that the block is still intact). If we *can* assemble the gang 2004 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 2005 * each constituent bp and we can allocate a new block on the next sync pass. 2006 * 2007 * In all cases, the gang tree allows complete recovery from partial failure. 2008 * ========================================================================== 2009 */ 2010 2011static void 2012zio_gang_issue_func_done(zio_t *zio) 2013{ 2014 abd_put(zio->io_abd); 2015} 2016 2017static zio_t * 2018zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2019 uint64_t offset) 2020{ 2021 if (gn != NULL) 2022 return (pio); 2023 2024 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 2025 BP_GET_PSIZE(bp), zio_gang_issue_func_done, 2026 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2027 &pio->io_bookmark)); 2028} 2029 2030static zio_t * 2031zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2032 uint64_t offset) 2033{ 2034 zio_t *zio; 2035 2036 if (gn != NULL) { 2037 abd_t *gbh_abd = 2038 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2039 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2040 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 2041 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2042 &pio->io_bookmark); 2043 /* 2044 * As we rewrite each gang header, the pipeline will compute 2045 * a new gang block header checksum for it; but no one will 2046 * compute a new data checksum, so we do that here. The one 2047 * exception is the gang leader: the pipeline already computed 2048 * its data checksum because that stage precedes gang assembly. 2049 * (Presently, nothing actually uses interior data checksums; 2050 * this is just good hygiene.) 2051 */ 2052 if (gn != pio->io_gang_leader->io_gang_tree) { 2053 abd_t *buf = abd_get_offset(data, offset); 2054 2055 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 2056 buf, BP_GET_PSIZE(bp)); 2057 2058 abd_put(buf); 2059 } 2060 /* 2061 * If we are here to damage data for testing purposes, 2062 * leave the GBH alone so that we can detect the damage. 2063 */ 2064 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 2065 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2066 } else { 2067 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2068 abd_get_offset(data, offset), BP_GET_PSIZE(bp), 2069 zio_gang_issue_func_done, NULL, pio->io_priority, 2070 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2071 } 2072 2073 return (zio); 2074} 2075 2076/* ARGSUSED */ 2077static zio_t * 2078zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2079 uint64_t offset) 2080{ 2081 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 2082 BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp), 2083 ZIO_GANG_CHILD_FLAGS(pio))); 2084} 2085 2086/* ARGSUSED */ 2087static zio_t * 2088zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2089 uint64_t offset) 2090{ 2091 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 2092 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 2093} 2094 2095static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2096 NULL, 2097 zio_read_gang, 2098 zio_rewrite_gang, 2099 zio_free_gang, 2100 zio_claim_gang, 2101 NULL 2102}; 2103 2104static void zio_gang_tree_assemble_done(zio_t *zio); 2105 2106static zio_gang_node_t * 2107zio_gang_node_alloc(zio_gang_node_t **gnpp) 2108{ 2109 zio_gang_node_t *gn; 2110 2111 ASSERT(*gnpp == NULL); 2112 2113 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2114 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2115 *gnpp = gn; 2116 2117 return (gn); 2118} 2119 2120static void 2121zio_gang_node_free(zio_gang_node_t **gnpp) 2122{ 2123 zio_gang_node_t *gn = *gnpp; 2124 2125 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2126 ASSERT(gn->gn_child[g] == NULL); 2127 2128 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2129 kmem_free(gn, sizeof (*gn)); 2130 *gnpp = NULL; 2131} 2132 2133static void 2134zio_gang_tree_free(zio_gang_node_t **gnpp) 2135{ 2136 zio_gang_node_t *gn = *gnpp; 2137 2138 if (gn == NULL) 2139 return; 2140 2141 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2142 zio_gang_tree_free(&gn->gn_child[g]); 2143 2144 zio_gang_node_free(gnpp); 2145} 2146 2147static void 2148zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2149{ 2150 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2151 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2152 2153 ASSERT(gio->io_gang_leader == gio); 2154 ASSERT(BP_IS_GANG(bp)); 2155 2156 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2157 zio_gang_tree_assemble_done, gn, gio->io_priority, 2158 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2159} 2160 2161static void 2162zio_gang_tree_assemble_done(zio_t *zio) 2163{ 2164 zio_t *gio = zio->io_gang_leader; 2165 zio_gang_node_t *gn = zio->io_private; 2166 blkptr_t *bp = zio->io_bp; 2167 2168 ASSERT(gio == zio_unique_parent(zio)); 2169 ASSERT(zio->io_child_count == 0); 2170 2171 if (zio->io_error) 2172 return; 2173 2174 /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2175 if (BP_SHOULD_BYTESWAP(bp)) 2176 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2177 2178 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2179 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 2180 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2181 2182 abd_put(zio->io_abd); 2183 2184 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2185 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2186 if (!BP_IS_GANG(gbp)) 2187 continue; 2188 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2189 } 2190} 2191 2192static void 2193zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2194 uint64_t offset) 2195{ 2196 zio_t *gio = pio->io_gang_leader; 2197 zio_t *zio; 2198 2199 ASSERT(BP_IS_GANG(bp) == !!gn); 2200 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2201 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2202 2203 /* 2204 * If you're a gang header, your data is in gn->gn_gbh. 2205 * If you're a gang member, your data is in 'data' and gn == NULL. 2206 */ 2207 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2208 2209 if (gn != NULL) { 2210 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2211 2212 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2213 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2214 if (BP_IS_HOLE(gbp)) 2215 continue; 2216 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2217 offset); 2218 offset += BP_GET_PSIZE(gbp); 2219 } 2220 } 2221 2222 if (gn == gio->io_gang_tree && gio->io_abd != NULL) 2223 ASSERT3U(gio->io_size, ==, offset); 2224 2225 if (zio != pio) 2226 zio_nowait(zio); 2227} 2228 2229static int 2230zio_gang_assemble(zio_t *zio) 2231{ 2232 blkptr_t *bp = zio->io_bp; 2233 2234 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2235 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2236 2237 zio->io_gang_leader = zio; 2238 2239 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2240 2241 return (ZIO_PIPELINE_CONTINUE); 2242} 2243 2244static int 2245zio_gang_issue(zio_t *zio) 2246{ 2247 blkptr_t *bp = zio->io_bp; 2248 2249 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2250 return (ZIO_PIPELINE_STOP); 2251 } 2252 2253 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2254 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2255 2256 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2257 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2258 0); 2259 else 2260 zio_gang_tree_free(&zio->io_gang_tree); 2261 2262 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2263 2264 return (ZIO_PIPELINE_CONTINUE); 2265} 2266 2267static void 2268zio_write_gang_member_ready(zio_t *zio) 2269{ 2270 zio_t *pio = zio_unique_parent(zio); 2271 zio_t *gio = zio->io_gang_leader; 2272 dva_t *cdva = zio->io_bp->blk_dva; 2273 dva_t *pdva = pio->io_bp->blk_dva; 2274 uint64_t asize; 2275 2276 if (BP_IS_HOLE(zio->io_bp)) 2277 return; 2278 2279 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2280 2281 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2282 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2283 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2284 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 2285 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2286 2287 mutex_enter(&pio->io_lock); 2288 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 2289 ASSERT(DVA_GET_GANG(&pdva[d])); 2290 asize = DVA_GET_ASIZE(&pdva[d]); 2291 asize += DVA_GET_ASIZE(&cdva[d]); 2292 DVA_SET_ASIZE(&pdva[d], asize); 2293 } 2294 mutex_exit(&pio->io_lock); 2295} 2296 2297static void 2298zio_write_gang_done(zio_t *zio) 2299{ 2300 abd_put(zio->io_abd); 2301} 2302 2303static int 2304zio_write_gang_block(zio_t *pio) 2305{ 2306 spa_t *spa = pio->io_spa; 2307 metaslab_class_t *mc = spa_normal_class(spa); 2308 blkptr_t *bp = pio->io_bp; 2309 zio_t *gio = pio->io_gang_leader; 2310 zio_t *zio; 2311 zio_gang_node_t *gn, **gnpp; 2312 zio_gbh_phys_t *gbh; 2313 abd_t *gbh_abd; 2314 uint64_t txg = pio->io_txg; 2315 uint64_t resid = pio->io_size; 2316 uint64_t lsize; 2317 int copies = gio->io_prop.zp_copies; 2318 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2319 zio_prop_t zp; 2320 int error; 2321 2322 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 2323 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2324 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2325 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 2326 2327 flags |= METASLAB_ASYNC_ALLOC; 2328 VERIFY(refcount_held(&mc->mc_alloc_slots[pio->io_allocator], 2329 pio)); 2330 2331 /* 2332 * The logical zio has already placed a reservation for 2333 * 'copies' allocation slots but gang blocks may require 2334 * additional copies. These additional copies 2335 * (i.e. gbh_copies - copies) are guaranteed to succeed 2336 * since metaslab_class_throttle_reserve() always allows 2337 * additional reservations for gang blocks. 2338 */ 2339 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 2340 pio->io_allocator, pio, flags)); 2341 } 2342 2343 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 2344 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 2345 &pio->io_alloc_list, pio, pio->io_allocator); 2346 if (error) { 2347 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2348 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2349 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 2350 2351 /* 2352 * If we failed to allocate the gang block header then 2353 * we remove any additional allocation reservations that 2354 * we placed here. The original reservation will 2355 * be removed when the logical I/O goes to the ready 2356 * stage. 2357 */ 2358 metaslab_class_throttle_unreserve(mc, 2359 gbh_copies - copies, pio->io_allocator, pio); 2360 } 2361 pio->io_error = error; 2362 return (ZIO_PIPELINE_CONTINUE); 2363 } 2364 2365 if (pio == gio) { 2366 gnpp = &gio->io_gang_tree; 2367 } else { 2368 gnpp = pio->io_private; 2369 ASSERT(pio->io_ready == zio_write_gang_member_ready); 2370 } 2371 2372 gn = zio_gang_node_alloc(gnpp); 2373 gbh = gn->gn_gbh; 2374 bzero(gbh, SPA_GANGBLOCKSIZE); 2375 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 2376 2377 /* 2378 * Create the gang header. 2379 */ 2380 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2381 zio_write_gang_done, NULL, pio->io_priority, 2382 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2383 2384 /* 2385 * Create and nowait the gang children. 2386 */ 2387 for (int g = 0; resid != 0; resid -= lsize, g++) { 2388 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2389 SPA_MINBLOCKSIZE); 2390 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2391 2392 zp.zp_checksum = gio->io_prop.zp_checksum; 2393 zp.zp_compress = ZIO_COMPRESS_OFF; 2394 zp.zp_type = DMU_OT_NONE; 2395 zp.zp_level = 0; 2396 zp.zp_copies = gio->io_prop.zp_copies; 2397 zp.zp_dedup = B_FALSE; 2398 zp.zp_dedup_verify = B_FALSE; 2399 zp.zp_nopwrite = B_FALSE; 2400 2401 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 2402 abd_get_offset(pio->io_abd, pio->io_size - resid), lsize, 2403 lsize, &zp, zio_write_gang_member_ready, NULL, NULL, 2404 zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 2405 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2406 2407 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2408 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2409 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 2410 2411 /* 2412 * Gang children won't throttle but we should 2413 * account for their work, so reserve an allocation 2414 * slot for them here. 2415 */ 2416 VERIFY(metaslab_class_throttle_reserve(mc, 2417 zp.zp_copies, cio->io_allocator, cio, flags)); 2418 } 2419 zio_nowait(cio); 2420 } 2421 2422 /* 2423 * Set pio's pipeline to just wait for zio to finish. 2424 */ 2425 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2426 2427 zio_nowait(zio); 2428 2429 return (ZIO_PIPELINE_CONTINUE); 2430} 2431 2432/* 2433 * The zio_nop_write stage in the pipeline determines if allocating a 2434 * new bp is necessary. The nopwrite feature can handle writes in 2435 * either syncing or open context (i.e. zil writes) and as a result is 2436 * mutually exclusive with dedup. 2437 * 2438 * By leveraging a cryptographically secure checksum, such as SHA256, we 2439 * can compare the checksums of the new data and the old to determine if 2440 * allocating a new block is required. Note that our requirements for 2441 * cryptographic strength are fairly weak: there can't be any accidental 2442 * hash collisions, but we don't need to be secure against intentional 2443 * (malicious) collisions. To trigger a nopwrite, you have to be able 2444 * to write the file to begin with, and triggering an incorrect (hash 2445 * collision) nopwrite is no worse than simply writing to the file. 2446 * That said, there are no known attacks against the checksum algorithms 2447 * used for nopwrite, assuming that the salt and the checksums 2448 * themselves remain secret. 2449 */ 2450static int 2451zio_nop_write(zio_t *zio) 2452{ 2453 blkptr_t *bp = zio->io_bp; 2454 blkptr_t *bp_orig = &zio->io_bp_orig; 2455 zio_prop_t *zp = &zio->io_prop; 2456 2457 ASSERT(BP_GET_LEVEL(bp) == 0); 2458 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2459 ASSERT(zp->zp_nopwrite); 2460 ASSERT(!zp->zp_dedup); 2461 ASSERT(zio->io_bp_override == NULL); 2462 ASSERT(IO_IS_ALLOCATING(zio)); 2463 2464 /* 2465 * Check to see if the original bp and the new bp have matching 2466 * characteristics (i.e. same checksum, compression algorithms, etc). 2467 * If they don't then just continue with the pipeline which will 2468 * allocate a new bp. 2469 */ 2470 if (BP_IS_HOLE(bp_orig) || 2471 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 2472 ZCHECKSUM_FLAG_NOPWRITE) || 2473 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 2474 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 2475 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 2476 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 2477 return (ZIO_PIPELINE_CONTINUE); 2478 2479 /* 2480 * If the checksums match then reset the pipeline so that we 2481 * avoid allocating a new bp and issuing any I/O. 2482 */ 2483 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 2484 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 2485 ZCHECKSUM_FLAG_NOPWRITE); 2486 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 2487 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 2488 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 2489 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 2490 sizeof (uint64_t)) == 0); 2491 2492 *bp = *bp_orig; 2493 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2494 zio->io_flags |= ZIO_FLAG_NOPWRITE; 2495 } 2496 2497 return (ZIO_PIPELINE_CONTINUE); 2498} 2499 2500/* 2501 * ========================================================================== 2502 * Dedup 2503 * ========================================================================== 2504 */ 2505static void 2506zio_ddt_child_read_done(zio_t *zio) 2507{ 2508 blkptr_t *bp = zio->io_bp; 2509 ddt_entry_t *dde = zio->io_private; 2510 ddt_phys_t *ddp; 2511 zio_t *pio = zio_unique_parent(zio); 2512 2513 mutex_enter(&pio->io_lock); 2514 ddp = ddt_phys_select(dde, bp); 2515 if (zio->io_error == 0) 2516 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2517 2518 if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 2519 dde->dde_repair_abd = zio->io_abd; 2520 else 2521 abd_free(zio->io_abd); 2522 mutex_exit(&pio->io_lock); 2523} 2524 2525static int 2526zio_ddt_read_start(zio_t *zio) 2527{ 2528 blkptr_t *bp = zio->io_bp; 2529 2530 ASSERT(BP_GET_DEDUP(bp)); 2531 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2532 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2533 2534 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2535 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2536 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2537 ddt_phys_t *ddp = dde->dde_phys; 2538 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2539 blkptr_t blk; 2540 2541 ASSERT(zio->io_vsd == NULL); 2542 zio->io_vsd = dde; 2543 2544 if (ddp_self == NULL) 2545 return (ZIO_PIPELINE_CONTINUE); 2546 2547 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2548 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2549 continue; 2550 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2551 &blk); 2552 zio_nowait(zio_read(zio, zio->io_spa, &blk, 2553 abd_alloc_for_io(zio->io_size, B_TRUE), 2554 zio->io_size, zio_ddt_child_read_done, dde, 2555 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 2556 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 2557 } 2558 return (ZIO_PIPELINE_CONTINUE); 2559 } 2560 2561 zio_nowait(zio_read(zio, zio->io_spa, bp, 2562 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 2563 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2564 2565 return (ZIO_PIPELINE_CONTINUE); 2566} 2567 2568static int 2569zio_ddt_read_done(zio_t *zio) 2570{ 2571 blkptr_t *bp = zio->io_bp; 2572 2573 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 2574 return (ZIO_PIPELINE_STOP); 2575 } 2576 2577 ASSERT(BP_GET_DEDUP(bp)); 2578 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2579 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2580 2581 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2582 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2583 ddt_entry_t *dde = zio->io_vsd; 2584 if (ddt == NULL) { 2585 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2586 return (ZIO_PIPELINE_CONTINUE); 2587 } 2588 if (dde == NULL) { 2589 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2590 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2591 return (ZIO_PIPELINE_STOP); 2592 } 2593 if (dde->dde_repair_abd != NULL) { 2594 abd_copy(zio->io_abd, dde->dde_repair_abd, 2595 zio->io_size); 2596 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2597 } 2598 ddt_repair_done(ddt, dde); 2599 zio->io_vsd = NULL; 2600 } 2601 2602 ASSERT(zio->io_vsd == NULL); 2603 2604 return (ZIO_PIPELINE_CONTINUE); 2605} 2606 2607static boolean_t 2608zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2609{ 2610 spa_t *spa = zio->io_spa; 2611 boolean_t do_raw = (zio->io_flags & ZIO_FLAG_RAW); 2612 2613 /* We should never get a raw, override zio */ 2614 ASSERT(!(zio->io_bp_override && do_raw)); 2615 2616 /* 2617 * Note: we compare the original data, not the transformed data, 2618 * because when zio->io_bp is an override bp, we will not have 2619 * pushed the I/O transforms. That's an important optimization 2620 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2621 */ 2622 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2623 zio_t *lio = dde->dde_lead_zio[p]; 2624 2625 if (lio != NULL) { 2626 return (lio->io_orig_size != zio->io_orig_size || 2627 abd_cmp(zio->io_orig_abd, lio->io_orig_abd, 2628 zio->io_orig_size) != 0); 2629 } 2630 } 2631 2632 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2633 ddt_phys_t *ddp = &dde->dde_phys[p]; 2634 2635 if (ddp->ddp_phys_birth != 0) { 2636 arc_buf_t *abuf = NULL; 2637 arc_flags_t aflags = ARC_FLAG_WAIT; 2638 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2639 blkptr_t blk = *zio->io_bp; 2640 int error; 2641 2642 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2643 2644 ddt_exit(ddt); 2645 2646 /* 2647 * Intuitively, it would make more sense to compare 2648 * io_abd than io_orig_abd in the raw case since you 2649 * don't want to look at any transformations that have 2650 * happened to the data. However, for raw I/Os the 2651 * data will actually be the same in io_abd and 2652 * io_orig_abd, so all we have to do is issue this as 2653 * a raw ARC read. 2654 */ 2655 if (do_raw) { 2656 zio_flags |= ZIO_FLAG_RAW; 2657 ASSERT3U(zio->io_size, ==, zio->io_orig_size); 2658 ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd, 2659 zio->io_size)); 2660 ASSERT3P(zio->io_transform_stack, ==, NULL); 2661 } 2662 2663 error = arc_read(NULL, spa, &blk, 2664 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2665 zio_flags, &aflags, &zio->io_bookmark); 2666 2667 if (error == 0) { 2668 if (arc_buf_size(abuf) != zio->io_orig_size || 2669 abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 2670 zio->io_orig_size) != 0) 2671 error = SET_ERROR(EEXIST); 2672 arc_buf_destroy(abuf, &abuf); 2673 } 2674 2675 ddt_enter(ddt); 2676 return (error != 0); 2677 } 2678 } 2679 2680 return (B_FALSE); 2681} 2682 2683static void 2684zio_ddt_child_write_ready(zio_t *zio) 2685{ 2686 int p = zio->io_prop.zp_copies; 2687 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2688 ddt_entry_t *dde = zio->io_private; 2689 ddt_phys_t *ddp = &dde->dde_phys[p]; 2690 zio_t *pio; 2691 2692 if (zio->io_error) 2693 return; 2694 2695 ddt_enter(ddt); 2696 2697 ASSERT(dde->dde_lead_zio[p] == zio); 2698 2699 ddt_phys_fill(ddp, zio->io_bp); 2700 2701 zio_link_t *zl = NULL; 2702 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 2703 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2704 2705 ddt_exit(ddt); 2706} 2707 2708static void 2709zio_ddt_child_write_done(zio_t *zio) 2710{ 2711 int p = zio->io_prop.zp_copies; 2712 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2713 ddt_entry_t *dde = zio->io_private; 2714 ddt_phys_t *ddp = &dde->dde_phys[p]; 2715 2716 ddt_enter(ddt); 2717 2718 ASSERT(ddp->ddp_refcnt == 0); 2719 ASSERT(dde->dde_lead_zio[p] == zio); 2720 dde->dde_lead_zio[p] = NULL; 2721 2722 if (zio->io_error == 0) { 2723 zio_link_t *zl = NULL; 2724 while (zio_walk_parents(zio, &zl) != NULL) 2725 ddt_phys_addref(ddp); 2726 } else { 2727 ddt_phys_clear(ddp); 2728 } 2729 2730 ddt_exit(ddt); 2731} 2732 2733static void 2734zio_ddt_ditto_write_done(zio_t *zio) 2735{ 2736 int p = DDT_PHYS_DITTO; 2737 zio_prop_t *zp = &zio->io_prop; 2738 blkptr_t *bp = zio->io_bp; 2739 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2740 ddt_entry_t *dde = zio->io_private; 2741 ddt_phys_t *ddp = &dde->dde_phys[p]; 2742 ddt_key_t *ddk = &dde->dde_key; 2743 2744 ddt_enter(ddt); 2745 2746 ASSERT(ddp->ddp_refcnt == 0); 2747 ASSERT(dde->dde_lead_zio[p] == zio); 2748 dde->dde_lead_zio[p] = NULL; 2749 2750 if (zio->io_error == 0) { 2751 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2752 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2753 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2754 if (ddp->ddp_phys_birth != 0) 2755 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2756 ddt_phys_fill(ddp, bp); 2757 } 2758 2759 ddt_exit(ddt); 2760} 2761 2762static int 2763zio_ddt_write(zio_t *zio) 2764{ 2765 spa_t *spa = zio->io_spa; 2766 blkptr_t *bp = zio->io_bp; 2767 uint64_t txg = zio->io_txg; 2768 zio_prop_t *zp = &zio->io_prop; 2769 int p = zp->zp_copies; 2770 int ditto_copies; 2771 zio_t *cio = NULL; 2772 zio_t *dio = NULL; 2773 ddt_t *ddt = ddt_select(spa, bp); 2774 ddt_entry_t *dde; 2775 ddt_phys_t *ddp; 2776 2777 ASSERT(BP_GET_DEDUP(bp)); 2778 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2779 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2780 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 2781 2782 ddt_enter(ddt); 2783 dde = ddt_lookup(ddt, bp, B_TRUE); 2784 ddp = &dde->dde_phys[p]; 2785 2786 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2787 /* 2788 * If we're using a weak checksum, upgrade to a strong checksum 2789 * and try again. If we're already using a strong checksum, 2790 * we can't resolve it, so just convert to an ordinary write. 2791 * (And automatically e-mail a paper to Nature?) 2792 */ 2793 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 2794 ZCHECKSUM_FLAG_DEDUP)) { 2795 zp->zp_checksum = spa_dedup_checksum(spa); 2796 zio_pop_transforms(zio); 2797 zio->io_stage = ZIO_STAGE_OPEN; 2798 BP_ZERO(bp); 2799 } else { 2800 zp->zp_dedup = B_FALSE; 2801 BP_SET_DEDUP(bp, B_FALSE); 2802 } 2803 ASSERT(!BP_GET_DEDUP(bp)); 2804 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2805 ddt_exit(ddt); 2806 return (ZIO_PIPELINE_CONTINUE); 2807 } 2808 2809 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2810 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2811 2812 if (ditto_copies > ddt_ditto_copies_present(dde) && 2813 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2814 zio_prop_t czp = *zp; 2815 2816 czp.zp_copies = ditto_copies; 2817 2818 /* 2819 * If we arrived here with an override bp, we won't have run 2820 * the transform stack, so we won't have the data we need to 2821 * generate a child i/o. So, toss the override bp and restart. 2822 * This is safe, because using the override bp is just an 2823 * optimization; and it's rare, so the cost doesn't matter. 2824 */ 2825 if (zio->io_bp_override) { 2826 zio_pop_transforms(zio); 2827 zio->io_stage = ZIO_STAGE_OPEN; 2828 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2829 zio->io_bp_override = NULL; 2830 BP_ZERO(bp); 2831 ddt_exit(ddt); 2832 return (ZIO_PIPELINE_CONTINUE); 2833 } 2834 2835 dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 2836 zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL, 2837 NULL, zio_ddt_ditto_write_done, dde, zio->io_priority, 2838 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2839 2840 zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL); 2841 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2842 } 2843 2844 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2845 if (ddp->ddp_phys_birth != 0) 2846 ddt_bp_fill(ddp, bp, txg); 2847 if (dde->dde_lead_zio[p] != NULL) 2848 zio_add_child(zio, dde->dde_lead_zio[p]); 2849 else 2850 ddt_phys_addref(ddp); 2851 } else if (zio->io_bp_override) { 2852 ASSERT(bp->blk_birth == txg); 2853 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2854 ddt_phys_fill(ddp, bp); 2855 ddt_phys_addref(ddp); 2856 } else { 2857 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 2858 zio->io_orig_size, zio->io_orig_size, zp, 2859 zio_ddt_child_write_ready, NULL, NULL, 2860 zio_ddt_child_write_done, dde, zio->io_priority, 2861 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2862 2863 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 2864 dde->dde_lead_zio[p] = cio; 2865 } 2866 2867 ddt_exit(ddt); 2868 2869 if (cio) 2870 zio_nowait(cio); 2871 if (dio) 2872 zio_nowait(dio); 2873 2874 return (ZIO_PIPELINE_CONTINUE); 2875} 2876 2877ddt_entry_t *freedde; /* for debugging */ 2878 2879static int 2880zio_ddt_free(zio_t *zio) 2881{ 2882 spa_t *spa = zio->io_spa; 2883 blkptr_t *bp = zio->io_bp; 2884 ddt_t *ddt = ddt_select(spa, bp); 2885 ddt_entry_t *dde; 2886 ddt_phys_t *ddp; 2887 2888 ASSERT(BP_GET_DEDUP(bp)); 2889 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2890 2891 ddt_enter(ddt); 2892 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2893 ddp = ddt_phys_select(dde, bp); 2894 ddt_phys_decref(ddp); 2895 ddt_exit(ddt); 2896 2897 return (ZIO_PIPELINE_CONTINUE); 2898} 2899 2900/* 2901 * ========================================================================== 2902 * Allocate and free blocks 2903 * ========================================================================== 2904 */ 2905 2906static zio_t * 2907zio_io_to_allocate(spa_t *spa, int allocator) 2908{ 2909 zio_t *zio; 2910 2911 ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator])); 2912 2913 zio = avl_first(&spa->spa_alloc_trees[allocator]); 2914 if (zio == NULL) 2915 return (NULL); 2916 2917 ASSERT(IO_IS_ALLOCATING(zio)); 2918 2919 /* 2920 * Try to place a reservation for this zio. If we're unable to 2921 * reserve then we throttle. 2922 */ 2923 ASSERT3U(zio->io_allocator, ==, allocator); 2924 if (!metaslab_class_throttle_reserve(spa_normal_class(spa), 2925 zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) { 2926 return (NULL); 2927 } 2928 2929 avl_remove(&spa->spa_alloc_trees[allocator], zio); 2930 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 2931 2932 return (zio); 2933} 2934 2935static int 2936zio_dva_throttle(zio_t *zio) 2937{ 2938 spa_t *spa = zio->io_spa; 2939 zio_t *nio; 2940 2941 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 2942 !spa_normal_class(zio->io_spa)->mc_alloc_throttle_enabled || 2943 zio->io_child_type == ZIO_CHILD_GANG || 2944 zio->io_flags & ZIO_FLAG_NODATA) { 2945 return (ZIO_PIPELINE_CONTINUE); 2946 } 2947 2948 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2949 2950 ASSERT3U(zio->io_queued_timestamp, >, 0); 2951 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2952 2953 zbookmark_phys_t *bm = &zio->io_bookmark; 2954 /* 2955 * We want to try to use as many allocators as possible to help improve 2956 * performance, but we also want logically adjacent IOs to be physically 2957 * adjacent to improve sequential read performance. We chunk each object 2958 * into 2^20 block regions, and then hash based on the objset, object, 2959 * level, and region to accomplish both of these goals. 2960 */ 2961 zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object, 2962 bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count; 2963 mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]); 2964 2965 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2966 avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio); 2967 2968 nio = zio_io_to_allocate(zio->io_spa, zio->io_allocator); 2969 mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]); 2970 2971 if (nio == zio) 2972 return (ZIO_PIPELINE_CONTINUE); 2973 2974 if (nio != NULL) { 2975 ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2976 /* 2977 * We are passing control to a new zio so make sure that 2978 * it is processed by a different thread. We do this to 2979 * avoid stack overflows that can occur when parents are 2980 * throttled and children are making progress. We allow 2981 * it to go to the head of the taskq since it's already 2982 * been waiting. 2983 */ 2984 zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE); 2985 } 2986 return (ZIO_PIPELINE_STOP); 2987} 2988 2989void 2990zio_allocate_dispatch(spa_t *spa, int allocator) 2991{ 2992 zio_t *zio; 2993 2994 mutex_enter(&spa->spa_alloc_locks[allocator]); 2995 zio = zio_io_to_allocate(spa, allocator); 2996 mutex_exit(&spa->spa_alloc_locks[allocator]); 2997 if (zio == NULL) 2998 return; 2999 3000 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 3001 ASSERT0(zio->io_error); 3002 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 3003} 3004 3005static int 3006zio_dva_allocate(zio_t *zio) 3007{ 3008 spa_t *spa = zio->io_spa; 3009 metaslab_class_t *mc = spa_normal_class(spa); 3010 blkptr_t *bp = zio->io_bp; 3011 int error; 3012 int flags = 0; 3013 3014 if (zio->io_gang_leader == NULL) { 3015 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3016 zio->io_gang_leader = zio; 3017 } 3018 3019 ASSERT(BP_IS_HOLE(bp)); 3020 ASSERT0(BP_GET_NDVAS(bp)); 3021 ASSERT3U(zio->io_prop.zp_copies, >, 0); 3022 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 3023 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 3024 3025 if (zio->io_flags & ZIO_FLAG_NODATA) { 3026 flags |= METASLAB_DONT_THROTTLE; 3027 } 3028 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) { 3029 flags |= METASLAB_GANG_CHILD; 3030 } 3031 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) { 3032 flags |= METASLAB_ASYNC_ALLOC; 3033 } 3034 3035 error = metaslab_alloc(spa, mc, zio->io_size, bp, 3036 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3037 &zio->io_alloc_list, zio, zio->io_allocator); 3038 3039 if (error != 0) { 3040 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 3041 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 3042 error); 3043 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 3044 return (zio_write_gang_block(zio)); 3045 zio->io_error = error; 3046 } 3047 3048 return (ZIO_PIPELINE_CONTINUE); 3049} 3050 3051static int 3052zio_dva_free(zio_t *zio) 3053{ 3054 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 3055 3056 return (ZIO_PIPELINE_CONTINUE); 3057} 3058 3059static int 3060zio_dva_claim(zio_t *zio) 3061{ 3062 int error; 3063 3064 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 3065 if (error) 3066 zio->io_error = error; 3067 3068 return (ZIO_PIPELINE_CONTINUE); 3069} 3070 3071/* 3072 * Undo an allocation. This is used by zio_done() when an I/O fails 3073 * and we want to give back the block we just allocated. 3074 * This handles both normal blocks and gang blocks. 3075 */ 3076static void 3077zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 3078{ 3079 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 3080 ASSERT(zio->io_bp_override == NULL); 3081 3082 if (!BP_IS_HOLE(bp)) 3083 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 3084 3085 if (gn != NULL) { 3086 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3087 zio_dva_unallocate(zio, gn->gn_child[g], 3088 &gn->gn_gbh->zg_blkptr[g]); 3089 } 3090 } 3091} 3092 3093/* 3094 * Try to allocate an intent log block. Return 0 on success, errno on failure. 3095 */ 3096int 3097zio_alloc_zil(spa_t *spa, uint64_t objset, uint64_t txg, blkptr_t *new_bp, 3098 blkptr_t *old_bp, uint64_t size, boolean_t *slog) 3099{ 3100 int error = 1; 3101 zio_alloc_list_t io_alloc_list; 3102 3103 ASSERT(txg > spa_syncing_txg(spa)); 3104 3105 metaslab_trace_init(&io_alloc_list); 3106 /* 3107 * When allocating a zil block, we don't have information about 3108 * the final destination of the block except the objset it's part 3109 * of, so we just hash the objset ID to pick the allocator to get 3110 * some parallelism. 3111 */ 3112 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3113 txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL, 3114 cityhash4(0, 0, 0, objset) % spa->spa_alloc_count); 3115 if (error == 0) { 3116 *slog = TRUE; 3117 } else { 3118 error = metaslab_alloc(spa, spa_normal_class(spa), size, 3119 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, 3120 &io_alloc_list, NULL, cityhash4(0, 0, 0, objset) % 3121 spa->spa_alloc_count); 3122 if (error == 0) 3123 *slog = FALSE; 3124 } 3125 metaslab_trace_fini(&io_alloc_list); 3126 3127 if (error == 0) { 3128 BP_SET_LSIZE(new_bp, size); 3129 BP_SET_PSIZE(new_bp, size); 3130 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 3131 BP_SET_CHECKSUM(new_bp, 3132 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 3133 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3134 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3135 BP_SET_LEVEL(new_bp, 0); 3136 BP_SET_DEDUP(new_bp, 0); 3137 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3138 } else { 3139 zfs_dbgmsg("%s: zil block allocation failure: " 3140 "size %llu, error %d", spa_name(spa), size, error); 3141 } 3142 3143 return (error); 3144} 3145 3146/* 3147 * ========================================================================== 3148 * Read, write and delete to physical devices 3149 * ========================================================================== 3150 */ 3151 3152 3153/* 3154 * Issue an I/O to the underlying vdev. Typically the issue pipeline 3155 * stops after this stage and will resume upon I/O completion. 3156 * However, there are instances where the vdev layer may need to 3157 * continue the pipeline when an I/O was not issued. Since the I/O 3158 * that was sent to the vdev layer might be different than the one 3159 * currently active in the pipeline (see vdev_queue_io()), we explicitly 3160 * force the underlying vdev layers to call either zio_execute() or 3161 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3162 */ 3163static int 3164zio_vdev_io_start(zio_t *zio) 3165{ 3166 vdev_t *vd = zio->io_vd; 3167 uint64_t align; 3168 spa_t *spa = zio->io_spa; 3169 int ret; 3170 3171 ASSERT(zio->io_error == 0); 3172 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3173 3174 if (vd == NULL) { 3175 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3176 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3177 3178 /* 3179 * The mirror_ops handle multiple DVAs in a single BP. 3180 */ 3181 vdev_mirror_ops.vdev_op_io_start(zio); 3182 return (ZIO_PIPELINE_STOP); 3183 } 3184 3185 if (vd->vdev_ops->vdev_op_leaf && zio->io_type == ZIO_TYPE_FREE && 3186 zio->io_priority == ZIO_PRIORITY_NOW) { 3187 trim_map_free(vd, zio->io_offset, zio->io_size, zio->io_txg); 3188 return (ZIO_PIPELINE_CONTINUE); 3189 } 3190 3191 ASSERT3P(zio->io_logical, !=, zio); 3192 if (zio->io_type == ZIO_TYPE_WRITE) { 3193 ASSERT(spa->spa_trust_config); 3194 3195 if (zio->io_vd->vdev_removing) { 3196 /* 3197 * Note: the code can handle other kinds of writes, 3198 * but we don't expect them. 3199 */ 3200 ASSERT(zio->io_flags & 3201 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 3202 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 3203 } 3204 } 3205 3206 /* 3207 * We keep track of time-sensitive I/Os so that the scan thread 3208 * can quickly react to certain workloads. In particular, we care 3209 * about non-scrubbing, top-level reads and writes with the following 3210 * characteristics: 3211 * - synchronous writes of user data to non-slog devices 3212 * - any reads of user data 3213 * When these conditions are met, adjust the timestamp of spa_last_io 3214 * which allows the scan thread to adjust its workload accordingly. 3215 */ 3216 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 3217 vd == vd->vdev_top && !vd->vdev_islog && 3218 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 3219 zio->io_txg != spa_syncing_txg(spa)) { 3220 uint64_t old = spa->spa_last_io; 3221 uint64_t new = ddi_get_lbolt64(); 3222 if (old != new) 3223 (void) atomic_cas_64(&spa->spa_last_io, old, new); 3224 } 3225 align = 1ULL << vd->vdev_top->vdev_ashift; 3226 3227 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 3228 P2PHASE(zio->io_size, align) != 0) { 3229 /* Transform logical writes to be a full physical block size. */ 3230 uint64_t asize = P2ROUNDUP(zio->io_size, align); 3231 abd_t *abuf = NULL; 3232 if (zio->io_type == ZIO_TYPE_READ || 3233 zio->io_type == ZIO_TYPE_WRITE) 3234 abuf = abd_alloc_sametype(zio->io_abd, asize); 3235 ASSERT(vd == vd->vdev_top); 3236 if (zio->io_type == ZIO_TYPE_WRITE) { 3237 abd_copy(abuf, zio->io_abd, zio->io_size); 3238 abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3239 } 3240 zio_push_transform(zio, abuf, asize, abuf ? asize : 0, 3241 zio_subblock); 3242 } 3243 3244 /* 3245 * If this is not a physical io, make sure that it is properly aligned 3246 * before proceeding. 3247 */ 3248 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 3249 ASSERT0(P2PHASE(zio->io_offset, align)); 3250 ASSERT0(P2PHASE(zio->io_size, align)); 3251 } else { 3252 /* 3253 * For the physical io we allow alignment 3254 * to a logical block size. 3255 */ 3256 uint64_t log_align = 3257 1ULL << vd->vdev_top->vdev_logical_ashift; 3258 ASSERT0(P2PHASE(zio->io_offset, log_align)); 3259 ASSERT0(P2PHASE(zio->io_size, log_align)); 3260 } 3261 3262 VERIFY(zio->io_type == ZIO_TYPE_READ || spa_writeable(spa)); 3263 3264 /* 3265 * If this is a repair I/O, and there's no self-healing involved -- 3266 * that is, we're just resilvering what we expect to resilver -- 3267 * then don't do the I/O unless zio's txg is actually in vd's DTL. 3268 * This prevents spurious resilvering. 3269 * 3270 * There are a few ways that we can end up creating these spurious 3271 * resilver i/os: 3272 * 3273 * 1. A resilver i/o will be issued if any DVA in the BP has a 3274 * dirty DTL. The mirror code will issue resilver writes to 3275 * each DVA, including the one(s) that are not on vdevs with dirty 3276 * DTLs. 3277 * 3278 * 2. With nested replication, which happens when we have a 3279 * "replacing" or "spare" vdev that's a child of a mirror or raidz. 3280 * For example, given mirror(replacing(A+B), C), it's likely that 3281 * only A is out of date (it's the new device). In this case, we'll 3282 * read from C, then use the data to resilver A+B -- but we don't 3283 * actually want to resilver B, just A. The top-level mirror has no 3284 * way to know this, so instead we just discard unnecessary repairs 3285 * as we work our way down the vdev tree. 3286 * 3287 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 3288 * The same logic applies to any form of nested replication: ditto 3289 * + mirror, RAID-Z + replacing, etc. 3290 * 3291 * However, indirect vdevs point off to other vdevs which may have 3292 * DTL's, so we never bypass them. The child i/os on concrete vdevs 3293 * will be properly bypassed instead. 3294 */ 3295 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 3296 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 3297 zio->io_txg != 0 && /* not a delegated i/o */ 3298 vd->vdev_ops != &vdev_indirect_ops && 3299 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 3300 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3301 zio_vdev_io_bypass(zio); 3302 return (ZIO_PIPELINE_CONTINUE); 3303 } 3304 3305 if (vd->vdev_ops->vdev_op_leaf) { 3306 switch (zio->io_type) { 3307 case ZIO_TYPE_READ: 3308 if (vdev_cache_read(zio)) 3309 return (ZIO_PIPELINE_CONTINUE); 3310 /* FALLTHROUGH */ 3311 case ZIO_TYPE_WRITE: 3312 case ZIO_TYPE_FREE: 3313 if ((zio = vdev_queue_io(zio)) == NULL) 3314 return (ZIO_PIPELINE_STOP); 3315 3316 if (!vdev_accessible(vd, zio)) { 3317 zio->io_error = SET_ERROR(ENXIO); 3318 zio_interrupt(zio); 3319 return (ZIO_PIPELINE_STOP); 3320 } 3321 break; 3322 } 3323 /* 3324 * Note that we ignore repair writes for TRIM because they can 3325 * conflict with normal writes. This isn't an issue because, by 3326 * definition, we only repair blocks that aren't freed. 3327 */ 3328 if (zio->io_type == ZIO_TYPE_WRITE && 3329 !(zio->io_flags & ZIO_FLAG_IO_REPAIR) && 3330 !trim_map_write_start(zio)) 3331 return (ZIO_PIPELINE_STOP); 3332 } 3333 3334 vd->vdev_ops->vdev_op_io_start(zio); 3335 return (ZIO_PIPELINE_STOP); 3336} 3337 3338static int 3339zio_vdev_io_done(zio_t *zio) 3340{ 3341 vdev_t *vd = zio->io_vd; 3342 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3343 boolean_t unexpected_error = B_FALSE; 3344 3345 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3346 return (ZIO_PIPELINE_STOP); 3347 } 3348 3349 ASSERT(zio->io_type == ZIO_TYPE_READ || 3350 zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_FREE); 3351 3352 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 3353 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE || 3354 zio->io_type == ZIO_TYPE_FREE)) { 3355 3356 if (zio->io_type == ZIO_TYPE_WRITE && 3357 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) 3358 trim_map_write_done(zio); 3359 3360 vdev_queue_io_done(zio); 3361 3362 if (zio->io_type == ZIO_TYPE_WRITE) 3363 vdev_cache_write(zio); 3364 3365 if (zio_injection_enabled && zio->io_error == 0) 3366 zio->io_error = zio_handle_device_injection(vd, 3367 zio, EIO); 3368 3369 if (zio_injection_enabled && zio->io_error == 0) 3370 zio->io_error = zio_handle_label_injection(zio, EIO); 3371 3372 if (zio->io_error) { 3373 if (zio->io_error == ENOTSUP && 3374 zio->io_type == ZIO_TYPE_FREE) { 3375 /* Not all devices support TRIM. */ 3376 } else if (!vdev_accessible(vd, zio)) { 3377 zio->io_error = SET_ERROR(ENXIO); 3378 } else { 3379 unexpected_error = B_TRUE; 3380 } 3381 } 3382 } 3383 3384 ops->vdev_op_io_done(zio); 3385 3386 if (unexpected_error) 3387 VERIFY(vdev_probe(vd, zio) == NULL); 3388 3389 return (ZIO_PIPELINE_CONTINUE); 3390} 3391 3392/* 3393 * This function is used to change the priority of an existing zio that is 3394 * currently in-flight. This is used by the arc to upgrade priority in the 3395 * event that a demand read is made for a block that is currently queued 3396 * as a scrub or async read IO. Otherwise, the high priority read request 3397 * would end up having to wait for the lower priority IO. 3398 */ 3399void 3400zio_change_priority(zio_t *pio, zio_priority_t priority) 3401{ 3402 zio_t *cio, *cio_next; 3403 zio_link_t *zl = NULL; 3404 3405 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 3406 3407 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { 3408 vdev_queue_change_io_priority(pio, priority); 3409 } else { 3410 pio->io_priority = priority; 3411 } 3412 3413 mutex_enter(&pio->io_lock); 3414 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 3415 cio_next = zio_walk_children(pio, &zl); 3416 zio_change_priority(cio, priority); 3417 } 3418 mutex_exit(&pio->io_lock); 3419} 3420 3421/* 3422 * For non-raidz ZIOs, we can just copy aside the bad data read from the 3423 * disk, and use that to finish the checksum ereport later. 3424 */ 3425static void 3426zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 3427 const void *good_buf) 3428{ 3429 /* no processing needed */ 3430 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 3431} 3432 3433/*ARGSUSED*/ 3434void 3435zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 3436{ 3437 void *buf = zio_buf_alloc(zio->io_size); 3438 3439 abd_copy_to_buf(buf, zio->io_abd, zio->io_size); 3440 3441 zcr->zcr_cbinfo = zio->io_size; 3442 zcr->zcr_cbdata = buf; 3443 zcr->zcr_finish = zio_vsd_default_cksum_finish; 3444 zcr->zcr_free = zio_buf_free; 3445} 3446 3447static int 3448zio_vdev_io_assess(zio_t *zio) 3449{ 3450 vdev_t *vd = zio->io_vd; 3451 3452 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3453 return (ZIO_PIPELINE_STOP); 3454 } 3455 3456 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3457 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 3458 3459 if (zio->io_vsd != NULL) { 3460 zio->io_vsd_ops->vsd_free(zio); 3461 zio->io_vsd = NULL; 3462 } 3463 3464 if (zio_injection_enabled && zio->io_error == 0) 3465 zio->io_error = zio_handle_fault_injection(zio, EIO); 3466 3467 if (zio->io_type == ZIO_TYPE_FREE && 3468 zio->io_priority != ZIO_PRIORITY_NOW) { 3469 switch (zio->io_error) { 3470 case 0: 3471 ZIO_TRIM_STAT_INCR(bytes, zio->io_size); 3472 ZIO_TRIM_STAT_BUMP(success); 3473 break; 3474 case EOPNOTSUPP: 3475 ZIO_TRIM_STAT_BUMP(unsupported); 3476 break; 3477 default: 3478 ZIO_TRIM_STAT_BUMP(failed); 3479 break; 3480 } 3481 } 3482 3483 /* 3484 * If the I/O failed, determine whether we should attempt to retry it. 3485 * 3486 * On retry, we cut in line in the issue queue, since we don't want 3487 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 3488 */ 3489 if (zio->io_error && vd == NULL && 3490 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 3491 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 3492 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 3493 zio->io_error = 0; 3494 zio->io_flags |= ZIO_FLAG_IO_RETRY | 3495 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 3496 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 3497 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 3498 zio_requeue_io_start_cut_in_line); 3499 return (ZIO_PIPELINE_STOP); 3500 } 3501 3502 /* 3503 * If we got an error on a leaf device, convert it to ENXIO 3504 * if the device is not accessible at all. 3505 */ 3506 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 3507 !vdev_accessible(vd, zio)) 3508 zio->io_error = SET_ERROR(ENXIO); 3509 3510 /* 3511 * If we can't write to an interior vdev (mirror or RAID-Z), 3512 * set vdev_cant_write so that we stop trying to allocate from it. 3513 */ 3514 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 3515 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 3516 vd->vdev_cant_write = B_TRUE; 3517 } 3518 3519 /* 3520 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 3521 * attempts will ever succeed. In this case we set a persistent bit so 3522 * that we don't bother with it in the future. 3523 */ 3524 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 3525 zio->io_type == ZIO_TYPE_IOCTL && 3526 zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 3527 vd->vdev_nowritecache = B_TRUE; 3528 3529 if (zio->io_error) 3530 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3531 3532 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 3533 zio->io_physdone != NULL) { 3534 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 3535 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 3536 zio->io_physdone(zio->io_logical); 3537 } 3538 3539 return (ZIO_PIPELINE_CONTINUE); 3540} 3541 3542void 3543zio_vdev_io_reissue(zio_t *zio) 3544{ 3545 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3546 ASSERT(zio->io_error == 0); 3547 3548 zio->io_stage >>= 1; 3549} 3550 3551void 3552zio_vdev_io_redone(zio_t *zio) 3553{ 3554 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 3555 3556 zio->io_stage >>= 1; 3557} 3558 3559void 3560zio_vdev_io_bypass(zio_t *zio) 3561{ 3562 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3563 ASSERT(zio->io_error == 0); 3564 3565 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 3566 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 3567} 3568 3569/* 3570 * ========================================================================== 3571 * Generate and verify checksums 3572 * ========================================================================== 3573 */ 3574static int 3575zio_checksum_generate(zio_t *zio) 3576{ 3577 blkptr_t *bp = zio->io_bp; 3578 enum zio_checksum checksum; 3579 3580 if (bp == NULL) { 3581 /* 3582 * This is zio_write_phys(). 3583 * We're either generating a label checksum, or none at all. 3584 */ 3585 checksum = zio->io_prop.zp_checksum; 3586 3587 if (checksum == ZIO_CHECKSUM_OFF) 3588 return (ZIO_PIPELINE_CONTINUE); 3589 3590 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3591 } else { 3592 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3593 ASSERT(!IO_IS_ALLOCATING(zio)); 3594 checksum = ZIO_CHECKSUM_GANG_HEADER; 3595 } else { 3596 checksum = BP_GET_CHECKSUM(bp); 3597 } 3598 } 3599 3600 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 3601 3602 return (ZIO_PIPELINE_CONTINUE); 3603} 3604 3605static int 3606zio_checksum_verify(zio_t *zio) 3607{ 3608 zio_bad_cksum_t info; 3609 blkptr_t *bp = zio->io_bp; 3610 int error; 3611 3612 ASSERT(zio->io_vd != NULL); 3613 3614 if (bp == NULL) { 3615 /* 3616 * This is zio_read_phys(). 3617 * We're either verifying a label checksum, or nothing at all. 3618 */ 3619 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3620 return (ZIO_PIPELINE_CONTINUE); 3621 3622 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3623 } 3624 3625 if ((error = zio_checksum_error(zio, &info)) != 0) { 3626 zio->io_error = error; 3627 if (error == ECKSUM && 3628 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3629 zfs_ereport_start_checksum(zio->io_spa, 3630 zio->io_vd, zio, zio->io_offset, 3631 zio->io_size, NULL, &info); 3632 } 3633 } 3634 3635 return (ZIO_PIPELINE_CONTINUE); 3636} 3637 3638/* 3639 * Called by RAID-Z to ensure we don't compute the checksum twice. 3640 */ 3641void 3642zio_checksum_verified(zio_t *zio) 3643{ 3644 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3645} 3646 3647/* 3648 * ========================================================================== 3649 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 3650 * An error of 0 indicates success. ENXIO indicates whole-device failure, 3651 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3652 * indicate errors that are specific to one I/O, and most likely permanent. 3653 * Any other error is presumed to be worse because we weren't expecting it. 3654 * ========================================================================== 3655 */ 3656int 3657zio_worst_error(int e1, int e2) 3658{ 3659 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3660 int r1, r2; 3661 3662 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3663 if (e1 == zio_error_rank[r1]) 3664 break; 3665 3666 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3667 if (e2 == zio_error_rank[r2]) 3668 break; 3669 3670 return (r1 > r2 ? e1 : e2); 3671} 3672 3673/* 3674 * ========================================================================== 3675 * I/O completion 3676 * ========================================================================== 3677 */ 3678static int 3679zio_ready(zio_t *zio) 3680{ 3681 blkptr_t *bp = zio->io_bp; 3682 zio_t *pio, *pio_next; 3683 zio_link_t *zl = NULL; 3684 3685 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, 3686 ZIO_WAIT_READY)) { 3687 return (ZIO_PIPELINE_STOP); 3688 } 3689 3690 if (zio->io_ready) { 3691 ASSERT(IO_IS_ALLOCATING(zio)); 3692 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 3693 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3694 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3695 3696 zio->io_ready(zio); 3697 } 3698 3699 if (bp != NULL && bp != &zio->io_bp_copy) 3700 zio->io_bp_copy = *bp; 3701 3702 if (zio->io_error != 0) { 3703 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3704 3705 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3706 ASSERT(IO_IS_ALLOCATING(zio)); 3707 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3708 /* 3709 * We were unable to allocate anything, unreserve and 3710 * issue the next I/O to allocate. 3711 */ 3712 metaslab_class_throttle_unreserve( 3713 spa_normal_class(zio->io_spa), 3714 zio->io_prop.zp_copies, zio->io_allocator, zio); 3715 zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 3716 } 3717 } 3718 3719 mutex_enter(&zio->io_lock); 3720 zio->io_state[ZIO_WAIT_READY] = 1; 3721 pio = zio_walk_parents(zio, &zl); 3722 mutex_exit(&zio->io_lock); 3723 3724 /* 3725 * As we notify zio's parents, new parents could be added. 3726 * New parents go to the head of zio's io_parent_list, however, 3727 * so we will (correctly) not notify them. The remainder of zio's 3728 * io_parent_list, from 'pio_next' onward, cannot change because 3729 * all parents must wait for us to be done before they can be done. 3730 */ 3731 for (; pio != NULL; pio = pio_next) { 3732 pio_next = zio_walk_parents(zio, &zl); 3733 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 3734 } 3735 3736 if (zio->io_flags & ZIO_FLAG_NODATA) { 3737 if (BP_IS_GANG(bp)) { 3738 zio->io_flags &= ~ZIO_FLAG_NODATA; 3739 } else { 3740 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 3741 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 3742 } 3743 } 3744 3745 if (zio_injection_enabled && 3746 zio->io_spa->spa_syncing_txg == zio->io_txg) 3747 zio_handle_ignored_writes(zio); 3748 3749 return (ZIO_PIPELINE_CONTINUE); 3750} 3751 3752/* 3753 * Update the allocation throttle accounting. 3754 */ 3755static void 3756zio_dva_throttle_done(zio_t *zio) 3757{ 3758 zio_t *lio = zio->io_logical; 3759 zio_t *pio = zio_unique_parent(zio); 3760 vdev_t *vd = zio->io_vd; 3761 int flags = METASLAB_ASYNC_ALLOC; 3762 3763 ASSERT3P(zio->io_bp, !=, NULL); 3764 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 3765 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 3766 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 3767 ASSERT(vd != NULL); 3768 ASSERT3P(vd, ==, vd->vdev_top); 3769 ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY))); 3770 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 3771 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 3772 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 3773 3774 /* 3775 * Parents of gang children can have two flavors -- ones that 3776 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 3777 * and ones that allocated the constituent blocks. The allocation 3778 * throttle needs to know the allocating parent zio so we must find 3779 * it here. 3780 */ 3781 if (pio->io_child_type == ZIO_CHILD_GANG) { 3782 /* 3783 * If our parent is a rewrite gang child then our grandparent 3784 * would have been the one that performed the allocation. 3785 */ 3786 if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 3787 pio = zio_unique_parent(pio); 3788 flags |= METASLAB_GANG_CHILD; 3789 } 3790 3791 ASSERT(IO_IS_ALLOCATING(pio)); 3792 ASSERT3P(zio, !=, zio->io_logical); 3793 ASSERT(zio->io_logical != NULL); 3794 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 3795 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 3796 3797 mutex_enter(&pio->io_lock); 3798 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 3799 pio->io_allocator, B_TRUE); 3800 mutex_exit(&pio->io_lock); 3801 3802 metaslab_class_throttle_unreserve(spa_normal_class(zio->io_spa), 3803 1, pio->io_allocator, pio); 3804 3805 /* 3806 * Call into the pipeline to see if there is more work that 3807 * needs to be done. If there is work to be done it will be 3808 * dispatched to another taskq thread. 3809 */ 3810 zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 3811} 3812 3813static int 3814zio_done(zio_t *zio) 3815{ 3816 spa_t *spa = zio->io_spa; 3817 zio_t *lio = zio->io_logical; 3818 blkptr_t *bp = zio->io_bp; 3819 vdev_t *vd = zio->io_vd; 3820 uint64_t psize = zio->io_size; 3821 zio_t *pio, *pio_next; 3822 metaslab_class_t *mc = spa_normal_class(spa); 3823 zio_link_t *zl = NULL; 3824 3825 /* 3826 * If our children haven't all completed, 3827 * wait for them and then repeat this pipeline stage. 3828 */ 3829 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 3830 return (ZIO_PIPELINE_STOP); 3831 } 3832 3833 /* 3834 * If the allocation throttle is enabled, then update the accounting. 3835 * We only track child I/Os that are part of an allocating async 3836 * write. We must do this since the allocation is performed 3837 * by the logical I/O but the actual write is done by child I/Os. 3838 */ 3839 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 3840 zio->io_child_type == ZIO_CHILD_VDEV) { 3841 ASSERT(mc->mc_alloc_throttle_enabled); 3842 zio_dva_throttle_done(zio); 3843 } 3844 3845 /* 3846 * If the allocation throttle is enabled, verify that 3847 * we have decremented the refcounts for every I/O that was throttled. 3848 */ 3849 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3850 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3851 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3852 ASSERT(bp != NULL); 3853 metaslab_group_alloc_verify(spa, zio->io_bp, zio, 3854 zio->io_allocator); 3855 VERIFY(refcount_not_held(&mc->mc_alloc_slots[zio->io_allocator], 3856 zio)); 3857 } 3858 3859 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 3860 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 3861 ASSERT(zio->io_children[c][w] == 0); 3862 3863 if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 3864 ASSERT(bp->blk_pad[0] == 0); 3865 ASSERT(bp->blk_pad[1] == 0); 3866 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 3867 (bp == zio_unique_parent(zio)->io_bp)); 3868 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 3869 zio->io_bp_override == NULL && 3870 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 3871 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 3872 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 3873 ASSERT(BP_COUNT_GANG(bp) == 0 || 3874 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 3875 } 3876 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 3877 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 3878 } 3879 3880 /* 3881 * If there were child vdev/gang/ddt errors, they apply to us now. 3882 */ 3883 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 3884 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 3885 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 3886 3887 /* 3888 * If the I/O on the transformed data was successful, generate any 3889 * checksum reports now while we still have the transformed data. 3890 */ 3891 if (zio->io_error == 0) { 3892 while (zio->io_cksum_report != NULL) { 3893 zio_cksum_report_t *zcr = zio->io_cksum_report; 3894 uint64_t align = zcr->zcr_align; 3895 uint64_t asize = P2ROUNDUP(psize, align); 3896 char *abuf = NULL; 3897 abd_t *adata = zio->io_abd; 3898 3899 if (asize != psize) { 3900 adata = abd_alloc_linear(asize, B_TRUE); 3901 abd_copy(adata, zio->io_abd, psize); 3902 abd_zero_off(adata, psize, asize - psize); 3903 } 3904 3905 if (adata != NULL) 3906 abuf = abd_borrow_buf_copy(adata, asize); 3907 3908 zio->io_cksum_report = zcr->zcr_next; 3909 zcr->zcr_next = NULL; 3910 zcr->zcr_finish(zcr, abuf); 3911 zfs_ereport_free_checksum(zcr); 3912 3913 if (adata != NULL) 3914 abd_return_buf(adata, abuf, asize); 3915 3916 if (asize != psize) 3917 abd_free(adata); 3918 } 3919 } 3920 3921 zio_pop_transforms(zio); /* note: may set zio->io_error */ 3922 3923 vdev_stat_update(zio, psize); 3924 3925 if (zio->io_error) { 3926 /* 3927 * If this I/O is attached to a particular vdev, 3928 * generate an error message describing the I/O failure 3929 * at the block level. We ignore these errors if the 3930 * device is currently unavailable. 3931 */ 3932 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3933 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3934 3935 if ((zio->io_error == EIO || !(zio->io_flags & 3936 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 3937 zio == lio) { 3938 /* 3939 * For logical I/O requests, tell the SPA to log the 3940 * error and generate a logical data ereport. 3941 */ 3942 spa_log_error(spa, zio); 3943 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3944 0, 0); 3945 } 3946 } 3947 3948 if (zio->io_error && zio == lio) { 3949 /* 3950 * Determine whether zio should be reexecuted. This will 3951 * propagate all the way to the root via zio_notify_parent(). 3952 */ 3953 ASSERT(vd == NULL && bp != NULL); 3954 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3955 3956 if (IO_IS_ALLOCATING(zio) && 3957 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3958 if (zio->io_error != ENOSPC) 3959 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3960 else 3961 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3962 } 3963 3964 if ((zio->io_type == ZIO_TYPE_READ || 3965 zio->io_type == ZIO_TYPE_FREE) && 3966 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3967 zio->io_error == ENXIO && 3968 spa_load_state(spa) == SPA_LOAD_NONE && 3969 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3970 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3971 3972 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3973 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3974 3975 /* 3976 * Here is a possibly good place to attempt to do 3977 * either combinatorial reconstruction or error correction 3978 * based on checksums. It also might be a good place 3979 * to send out preliminary ereports before we suspend 3980 * processing. 3981 */ 3982 } 3983 3984 /* 3985 * If there were logical child errors, they apply to us now. 3986 * We defer this until now to avoid conflating logical child 3987 * errors with errors that happened to the zio itself when 3988 * updating vdev stats and reporting FMA events above. 3989 */ 3990 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 3991 3992 if ((zio->io_error || zio->io_reexecute) && 3993 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 3994 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3995 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3996 3997 zio_gang_tree_free(&zio->io_gang_tree); 3998 3999 /* 4000 * Godfather I/Os should never suspend. 4001 */ 4002 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 4003 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 4004 zio->io_reexecute = 0; 4005 4006 if (zio->io_reexecute) { 4007 /* 4008 * This is a logical I/O that wants to reexecute. 4009 * 4010 * Reexecute is top-down. When an i/o fails, if it's not 4011 * the root, it simply notifies its parent and sticks around. 4012 * The parent, seeing that it still has children in zio_done(), 4013 * does the same. This percolates all the way up to the root. 4014 * The root i/o will reexecute or suspend the entire tree. 4015 * 4016 * This approach ensures that zio_reexecute() honors 4017 * all the original i/o dependency relationships, e.g. 4018 * parents not executing until children are ready. 4019 */ 4020 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4021 4022 zio->io_gang_leader = NULL; 4023 4024 mutex_enter(&zio->io_lock); 4025 zio->io_state[ZIO_WAIT_DONE] = 1; 4026 mutex_exit(&zio->io_lock); 4027 4028 /* 4029 * "The Godfather" I/O monitors its children but is 4030 * not a true parent to them. It will track them through 4031 * the pipeline but severs its ties whenever they get into 4032 * trouble (e.g. suspended). This allows "The Godfather" 4033 * I/O to return status without blocking. 4034 */ 4035 zl = NULL; 4036 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 4037 pio = pio_next) { 4038 zio_link_t *remove_zl = zl; 4039 pio_next = zio_walk_parents(zio, &zl); 4040 4041 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 4042 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 4043 zio_remove_child(pio, zio, remove_zl); 4044 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4045 } 4046 } 4047 4048 if ((pio = zio_unique_parent(zio)) != NULL) { 4049 /* 4050 * We're not a root i/o, so there's nothing to do 4051 * but notify our parent. Don't propagate errors 4052 * upward since we haven't permanently failed yet. 4053 */ 4054 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 4055 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 4056 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4057 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 4058 /* 4059 * We'd fail again if we reexecuted now, so suspend 4060 * until conditions improve (e.g. device comes online). 4061 */ 4062 zio_suspend(spa, zio); 4063 } else { 4064 /* 4065 * Reexecution is potentially a huge amount of work. 4066 * Hand it off to the otherwise-unused claim taskq. 4067 */ 4068#if defined(illumos) || !defined(_KERNEL) 4069 ASSERT(zio->io_tqent.tqent_next == NULL); 4070#else 4071 ASSERT(zio->io_tqent.tqent_task.ta_pending == 0); 4072#endif 4073 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 4074 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 4075 0, &zio->io_tqent); 4076 } 4077 return (ZIO_PIPELINE_STOP); 4078 } 4079 4080 ASSERT(zio->io_child_count == 0); 4081 ASSERT(zio->io_reexecute == 0); 4082 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 4083 4084 /* 4085 * Report any checksum errors, since the I/O is complete. 4086 */ 4087 while (zio->io_cksum_report != NULL) { 4088 zio_cksum_report_t *zcr = zio->io_cksum_report; 4089 zio->io_cksum_report = zcr->zcr_next; 4090 zcr->zcr_next = NULL; 4091 zcr->zcr_finish(zcr, NULL); 4092 zfs_ereport_free_checksum(zcr); 4093 } 4094 4095 /* 4096 * It is the responsibility of the done callback to ensure that this 4097 * particular zio is no longer discoverable for adoption, and as 4098 * such, cannot acquire any new parents. 4099 */ 4100 if (zio->io_done) 4101 zio->io_done(zio); 4102 4103 mutex_enter(&zio->io_lock); 4104 zio->io_state[ZIO_WAIT_DONE] = 1; 4105 mutex_exit(&zio->io_lock); 4106 4107 zl = NULL; 4108 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 4109 zio_link_t *remove_zl = zl; 4110 pio_next = zio_walk_parents(zio, &zl); 4111 zio_remove_child(pio, zio, remove_zl); 4112 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4113 } 4114 4115 if (zio->io_waiter != NULL) { 4116 mutex_enter(&zio->io_lock); 4117 zio->io_executor = NULL; 4118 cv_broadcast(&zio->io_cv); 4119 mutex_exit(&zio->io_lock); 4120 } else { 4121 zio_destroy(zio); 4122 } 4123 4124 return (ZIO_PIPELINE_STOP); 4125} 4126 4127/* 4128 * ========================================================================== 4129 * I/O pipeline definition 4130 * ========================================================================== 4131 */ 4132static zio_pipe_stage_t *zio_pipeline[] = { 4133 NULL, 4134 zio_read_bp_init, 4135 zio_write_bp_init, 4136 zio_free_bp_init, 4137 zio_issue_async, 4138 zio_write_compress, 4139 zio_checksum_generate, 4140 zio_nop_write, 4141 zio_ddt_read_start, 4142 zio_ddt_read_done, 4143 zio_ddt_write, 4144 zio_ddt_free, 4145 zio_gang_assemble, 4146 zio_gang_issue, 4147 zio_dva_throttle, 4148 zio_dva_allocate, 4149 zio_dva_free, 4150 zio_dva_claim, 4151 zio_ready, 4152 zio_vdev_io_start, 4153 zio_vdev_io_done, 4154 zio_vdev_io_assess, 4155 zio_checksum_verify, 4156 zio_done 4157}; 4158 4159 4160 4161 4162/* 4163 * Compare two zbookmark_phys_t's to see which we would reach first in a 4164 * pre-order traversal of the object tree. 4165 * 4166 * This is simple in every case aside from the meta-dnode object. For all other 4167 * objects, we traverse them in order (object 1 before object 2, and so on). 4168 * However, all of these objects are traversed while traversing object 0, since 4169 * the data it points to is the list of objects. Thus, we need to convert to a 4170 * canonical representation so we can compare meta-dnode bookmarks to 4171 * non-meta-dnode bookmarks. 4172 * 4173 * We do this by calculating "equivalents" for each field of the zbookmark. 4174 * zbookmarks outside of the meta-dnode use their own object and level, and 4175 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 4176 * blocks this bookmark refers to) by multiplying their blkid by their span 4177 * (the number of L0 blocks contained within one block at their level). 4178 * zbookmarks inside the meta-dnode calculate their object equivalent 4179 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 4180 * level + 1<<31 (any value larger than a level could ever be) for their level. 4181 * This causes them to always compare before a bookmark in their object 4182 * equivalent, compare appropriately to bookmarks in other objects, and to 4183 * compare appropriately to other bookmarks in the meta-dnode. 4184 */ 4185int 4186zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 4187 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 4188{ 4189 /* 4190 * These variables represent the "equivalent" values for the zbookmark, 4191 * after converting zbookmarks inside the meta dnode to their 4192 * normal-object equivalents. 4193 */ 4194 uint64_t zb1obj, zb2obj; 4195 uint64_t zb1L0, zb2L0; 4196 uint64_t zb1level, zb2level; 4197 4198 if (zb1->zb_object == zb2->zb_object && 4199 zb1->zb_level == zb2->zb_level && 4200 zb1->zb_blkid == zb2->zb_blkid) 4201 return (0); 4202 4203 /* 4204 * BP_SPANB calculates the span in blocks. 4205 */ 4206 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 4207 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 4208 4209 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 4210 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4211 zb1L0 = 0; 4212 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 4213 } else { 4214 zb1obj = zb1->zb_object; 4215 zb1level = zb1->zb_level; 4216 } 4217 4218 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 4219 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4220 zb2L0 = 0; 4221 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 4222 } else { 4223 zb2obj = zb2->zb_object; 4224 zb2level = zb2->zb_level; 4225 } 4226 4227 /* Now that we have a canonical representation, do the comparison. */ 4228 if (zb1obj != zb2obj) 4229 return (zb1obj < zb2obj ? -1 : 1); 4230 else if (zb1L0 != zb2L0) 4231 return (zb1L0 < zb2L0 ? -1 : 1); 4232 else if (zb1level != zb2level) 4233 return (zb1level > zb2level ? -1 : 1); 4234 /* 4235 * This can (theoretically) happen if the bookmarks have the same object 4236 * and level, but different blkids, if the block sizes are not the same. 4237 * There is presently no way to change the indirect block sizes 4238 */ 4239 return (0); 4240} 4241 4242/* 4243 * This function checks the following: given that last_block is the place that 4244 * our traversal stopped last time, does that guarantee that we've visited 4245 * every node under subtree_root? Therefore, we can't just use the raw output 4246 * of zbookmark_compare. We have to pass in a modified version of 4247 * subtree_root; by incrementing the block id, and then checking whether 4248 * last_block is before or equal to that, we can tell whether or not having 4249 * visited last_block implies that all of subtree_root's children have been 4250 * visited. 4251 */ 4252boolean_t 4253zbookmark_subtree_completed(const dnode_phys_t *dnp, 4254 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 4255{ 4256 zbookmark_phys_t mod_zb = *subtree_root; 4257 mod_zb.zb_blkid++; 4258 ASSERT(last_block->zb_level == 0); 4259 4260 /* The objset_phys_t isn't before anything. */ 4261 if (dnp == NULL) 4262 return (B_FALSE); 4263 4264 /* 4265 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 4266 * data block size in sectors, because that variable is only used if 4267 * the bookmark refers to a block in the meta-dnode. Since we don't 4268 * know without examining it what object it refers to, and there's no 4269 * harm in passing in this value in other cases, we always pass it in. 4270 * 4271 * We pass in 0 for the indirect block size shift because zb2 must be 4272 * level 0. The indirect block size is only used to calculate the span 4273 * of the bookmark, but since the bookmark must be level 0, the span is 4274 * always 1, so the math works out. 4275 * 4276 * If you make changes to how the zbookmark_compare code works, be sure 4277 * to make sure that this code still works afterwards. 4278 */ 4279 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 4280 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 4281 last_block) <= 0); 4282} 4283